├── .gitattributes ├── test ├── test_helper.exs ├── support │ └── test_util.ex ├── nif │ ├── last_insert_rowid_test.exs │ ├── connection_test.exs │ ├── pragma_test.exs │ ├── read_only_db_test.exs │ ├── error_input_test.exs │ ├── transaction_test.exs │ ├── cancellation_test.exs │ ├── query_test.exs │ ├── stream_test.exs │ └── execution_test.exs ├── xqlite_test.exs ├── pragma_test.exs └── schema_introspection_test.exs ├── native └── xqlitenif │ ├── .rustfmt.toml │ ├── config.toml │ ├── Cargo.toml │ ├── src │ ├── cancel.rs │ ├── lib.rs │ ├── schema.rs │ ├── stream.rs │ └── util.rs │ └── Cargo.lock ├── .iex.exs ├── .formatter.exs ├── lib ├── xqlite │ ├── schema │ │ ├── database_info.ex │ │ ├── index_info.ex │ │ ├── schema_object_info.ex │ │ ├── types.ex │ │ ├── index_column_info.ex │ │ ├── foreign_key_info.ex │ │ └── column_info.ex │ ├── pragma_util.ex │ └── stream_resource_callbacks.ex ├── mix │ └── tasks │ │ └── test_seq.ex └── xqlite.ex ├── .gitignore ├── LICENSE.md ├── mix.exs ├── mix.lock ├── scripts └── release.sh ├── .github └── workflows │ └── ci.yml └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | -------------------------------------------------------------------------------- /native/xqlitenif/.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 95 -------------------------------------------------------------------------------- /.iex.exs: -------------------------------------------------------------------------------- 1 | IEx.configure(width: 200, inspect: [charlists: :as_lists, limit: 20_000]) 2 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"], 3 | line_length: 95 4 | ] 5 | -------------------------------------------------------------------------------- /native/xqlitenif/config.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-apple-darwin] 2 | rustflags = [ 3 | "-C", "link-arg=-undefined", 4 | "-C", "link-arg=dynamic_lookup", 5 | ] 6 | -------------------------------------------------------------------------------- /native/xqlitenif/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xqlitenif" 3 | version = "0.3.1" 4 | authors = ["Dimitar Panayotov "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "xqlitenif" 9 | path = "src/lib.rs" 10 | crate-type = ["cdylib"] 11 | 12 | [dependencies] 13 | rusqlite = { version = "0.37.0", features = [ 14 | "bundled", 15 | "hooks", 16 | "modern_sqlite", 17 | ] } 18 | rustler = { version = "0.37.0" } 19 | 20 | [profile.release] 21 | lto = true 22 | codegen-units = 1 23 | -------------------------------------------------------------------------------- /lib/xqlite/schema/database_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.DatabaseInfo do 2 | @moduledoc """ 3 | Information about an attached database, corresponding to `PRAGMA database_list`. 4 | """ 5 | 6 | @typedoc """ 7 | Struct definition. 8 | 9 | * `:name` - The logical name of the database (e.g., "main", "temp", or attached name). 10 | * `:file` - The absolute path to the database file, or `nil` for in-memory/temporary databases. 11 | """ 12 | @type t :: %__MODULE__{ 13 | name: String.t(), 14 | file: String.t() | nil 15 | } 16 | 17 | defstruct [ 18 | :name, 19 | :file 20 | ] 21 | end 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | xqlite-*.tar 24 | 25 | # Local tool versions should not be enforced on the users of the app. 26 | .tool-versions 27 | 28 | # Native compiled artifacts. 29 | /priv/native 30 | /native/xqlitenif/target 31 | 32 | # Dialyzer artifacts. 33 | /priv/plts 34 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2025 Dimitar Panayotov (@dimitarvp on GitHub) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /lib/xqlite/schema/index_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.IndexInfo do 2 | @moduledoc """ 3 | Information about an index on a table, corresponding to `PRAGMA index_list`. 4 | """ 5 | 6 | alias Xqlite.Schema.Types 7 | 8 | @typedoc """ 9 | Struct definition. 10 | 11 | * `:name` - Name of the index. SQLite automatically generates names for indexes created by constraints (e.g., `sqlite_autoindex_tablename_1`). 12 | * `:unique` - `true` if the index enforces uniqueness, `false` otherwise. 13 | * `:origin` - How the index was created (see `t:Types.index_origin/0`). `:primary_key` for primary key constraints, `:unique_constraint` for unique constraints, `:create_index` for `CREATE INDEX` statements. 14 | * `:partial` - `true` if the index is partial (has a `WHERE` clause), `false` otherwise. 15 | """ 16 | @type t :: %__MODULE__{ 17 | name: String.t(), 18 | unique: boolean(), 19 | origin: Types.index_origin(), 20 | partial: boolean() 21 | } 22 | 23 | defstruct [ 24 | :name, 25 | :unique, 26 | :origin, 27 | :partial 28 | ] 29 | end 30 | -------------------------------------------------------------------------------- /lib/xqlite/schema/schema_object_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.SchemaObjectInfo do 2 | @moduledoc """ 3 | Information about a schema object (table, view, etc.), corresponding to `PRAGMA table_list`. 4 | Note: `PRAGMA table_list` primarily lists tables, views, and virtual tables. 5 | """ 6 | 7 | alias Xqlite.Schema.Types 8 | 9 | @typedoc """ 10 | Struct definition. 11 | 12 | * `:schema` - Name of the schema containing the object (e.g., "main"). 13 | * `:name` - Name of the object. 14 | * `:object_type` - The type of object (see `t:Types.object_type/0`). 15 | * `:column_count` - Number of columns (meaningful for tables/views). 16 | * `:is_without_rowid` - `true` if the table was created with the `WITHOUT ROWID` optimization, `false` otherwise. This is derived from the `wr` column in `PRAGMA table_list`. 17 | * `:strict` - `true` if the table was declared using `STRICT` mode, `false` otherwise. 18 | """ 19 | @type t :: %__MODULE__{ 20 | schema: String.t(), 21 | name: String.t(), 22 | object_type: Types.object_type(), 23 | column_count: integer(), 24 | is_without_rowid: boolean(), 25 | strict: boolean() 26 | } 27 | 28 | defstruct [ 29 | :schema, 30 | :name, 31 | :object_type, 32 | :column_count, 33 | :is_without_rowid, 34 | :strict 35 | ] 36 | end 37 | -------------------------------------------------------------------------------- /lib/mix/tasks/test_seq.ex: -------------------------------------------------------------------------------- 1 | # lib/mix/tasks/test_seq.ex 2 | defmodule Mix.Tasks.Test.Seq do 3 | use Mix.Task 4 | 5 | @shortdoc "Run tests sequentially, one file at a time" 6 | 7 | def run(args) do 8 | test_files = find_test_files() 9 | 10 | IO.puts("Found #{length(test_files)} test files") 11 | IO.puts("Running tests sequentially...") 12 | 13 | failed_files = run_test_files(test_files, args, []) 14 | 15 | if failed_files != [] do 16 | IO.puts("\nFailed files: #{Enum.join(failed_files, ", ")}") 17 | System.halt(1) 18 | else 19 | IO.puts("\n✓ All tests passed!") 20 | end 21 | end 22 | 23 | defp run_test_files([], _args, failed_files), do: failed_files 24 | 25 | defp run_test_files([file | rest], args, failed_files) do 26 | IO.puts("\n=== Running #{file} ===") 27 | 28 | # Use a different task name to avoid infinite recursion 29 | case System.cmd("mix", ["test", file] ++ args, into: IO.stream()) do 30 | {_, 0} -> 31 | IO.puts("✓ #{file} passed") 32 | run_test_files(rest, args, failed_files) 33 | 34 | {_, exit_code} -> 35 | IO.puts("✗ #{file} failed (exit code: #{exit_code})") 36 | run_test_files(rest, args, [file | failed_files]) 37 | end 38 | end 39 | 40 | defp find_test_files do 41 | Path.wildcard("test/**/*_test.exs") 42 | |> Enum.sort() 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /lib/xqlite/schema/types.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.Types do 2 | @moduledoc """ 3 | Defines shared types used across schema information structs. 4 | """ 5 | 6 | @typedoc """ 7 | The type of schema object (table, view, etc.). 8 | """ 9 | @type object_type :: :table | :view | :shadow | :virtual | :sequence 10 | 11 | @typedoc """ 12 | The resolved type affinity of a column. 13 | """ 14 | @type type_affinity :: :text | :numeric | :integer | :real | :blob 15 | 16 | @typedoc """ 17 | The action to take on a foreign key constraint violation. 18 | """ 19 | @type fk_action :: :no_action | :restrict | :set_null | :set_default | :cascade 20 | 21 | @typedoc """ 22 | The foreign key matching clause type (rarely used beyond `:none`). 23 | """ 24 | @type fk_match :: :none | :simple | :partial | :full 25 | 26 | @typedoc """ 27 | The origin of an index (how it was created). 28 | """ 29 | @type index_origin :: :create_index | :unique_constraint | :primary_key_constraint 30 | 31 | @typedoc """ 32 | The sort order for a column within an index. 33 | """ 34 | @type sort_order :: :asc | :desc 35 | 36 | @typedoc """ 37 | Indicates if and how a column is hidden/generated. 38 | From PRAGMA table_xinfo: 39 | * `:normal` - A normal, visible column. 40 | * `:hidden_alias` - A hidden column (e.g., a rowid alias in a WITHOUT ROWID table). 41 | * `:virtual_generated` - A VIRTUAL generated column. 42 | * `:stored_generated` - A STORED generated column. 43 | """ 44 | @type column_hidden_kind :: :normal | :hidden_alias | :virtual_generated | :stored_generated 45 | end 46 | -------------------------------------------------------------------------------- /lib/xqlite/schema/index_column_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.IndexColumnInfo do 2 | @moduledoc """ 3 | Information about a specific column within an index, corresponding to `PRAGMA index_xinfo`. 4 | """ 5 | 6 | alias Xqlite.Schema.Types 7 | 8 | @typedoc """ 9 | Struct definition. 10 | 11 | * `:index_column_sequence` - 0-based position of this column within the index key/definition. 12 | * `:table_column_id` - The ID (0-based index) of the column in the base table definition, corresponding to the `cid` from `PRAGMA table_info`. A value of `-1` indicates that the indexed item is an expression, not a direct table column. 13 | * `:name` - Name of the table column included in the index, or `nil` if the index is on an expression. 14 | * `:sort_order` - Sort order for this column (`:asc` or `:desc`). 15 | * `:collation` - Name of the collation sequence used for this column (e.g., "BINARY", "NOCASE", "RTRIM"). 16 | * `:is_key_column` - `true` if this column is part of the primary index key used for lookups. `false` if it's an "included" column (only stored in the index, part of covering indexes, SQLite >= 3.9.0). 17 | """ 18 | @type t :: %__MODULE__{ 19 | index_column_sequence: integer(), 20 | table_column_id: integer(), 21 | name: String.t() | nil, 22 | sort_order: Types.sort_order(), 23 | collation: String.t(), 24 | is_key_column: boolean() 25 | } 26 | 27 | defstruct [ 28 | :index_column_sequence, 29 | :table_column_id, 30 | :name, 31 | :sort_order, 32 | :collation, 33 | :is_key_column 34 | ] 35 | end 36 | -------------------------------------------------------------------------------- /lib/xqlite/schema/foreign_key_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.ForeignKeyInfo do 2 | @moduledoc """ 3 | Information about a foreign key constraint originating from a table, 4 | corresponding to `PRAGMA foreign_key_list`. 5 | """ 6 | 7 | alias Xqlite.Schema.Types 8 | 9 | @typedoc """ 10 | Struct definition. 11 | 12 | * `:id` - ID of the foreign key constraint (0-based index for the table). 13 | * `:column_sequence` - 0-based index of the column within the foreign key constraint (for compound FKs). 14 | * `:target_table` - Name of the table referenced by the foreign key. 15 | * `:from_column` - Name of the column in the current table that is part of the foreign key. 16 | * `:to_column` - Name of the column in the target table that is referenced. Can be `nil` if the FK targets a UNIQUE constraint rather than a primary key. 17 | * `:on_update` - Action taken on update (see `t:Types.fk_action/0`). 18 | * `:on_delete` - Action taken on delete (see `t:Types.fk_action/0`). 19 | * `:match_clause` - The `MATCH` clause specified (see `t:Types.fk_match/0`). 20 | """ 21 | @type t :: %__MODULE__{ 22 | id: integer(), 23 | column_sequence: integer(), 24 | target_table: String.t(), 25 | from_column: String.t(), 26 | to_column: String.t() | nil, 27 | on_update: Types.fk_action(), 28 | on_delete: Types.fk_action(), 29 | match_clause: Types.fk_match() 30 | } 31 | 32 | defstruct [ 33 | :id, 34 | :column_sequence, 35 | :target_table, 36 | :from_column, 37 | :to_column, 38 | :on_update, 39 | :on_delete, 40 | :match_clause 41 | ] 42 | end 43 | -------------------------------------------------------------------------------- /lib/xqlite/schema/column_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.Schema.ColumnInfo do 2 | @moduledoc """ 3 | Information about a specific column in a table, corresponding to `PRAGMA table_info`. 4 | """ 5 | 6 | alias Xqlite.Schema.Types 7 | 8 | @typedoc """ 9 | Struct definition. 10 | 11 | * `:column_id` - The zero-indexed ID of the column within the table. 12 | * `:name` - Name of the column. 13 | * `:type_affinity` - The resolved data type affinity (see `t:Types.type_affinity/0`). 14 | * `:declared_type` - The original data type string exactly as declared in the `CREATE TABLE` statement (e.g., "VARCHAR(50)", "INTEGER", "BOOLEAN"). 15 | * `:nullable` - `true` if the column allows NULL values, `false` otherwise (derived from `NOT NULL` constraint). 16 | * `:default_value` - The default value expression as a string literal (e.g., "'default'", "123", "CURRENT_TIMESTAMP"), or `nil` if no default. 17 | * `:primary_key_index` - If this column is part of the primary key, its 1-based index within the key (e.g., 1 for single PK, 1 or 2 for compound PK). `0` if not part of the primary key. 18 | * `:hidden_kind` - Indicates if and how a column is hidden/generated (see `t:Types.column_hidden_kind/0`). 19 | """ 20 | @type t :: %__MODULE__{ 21 | column_id: integer(), 22 | name: String.t(), 23 | type_affinity: Types.type_affinity(), 24 | declared_type: String.t(), 25 | nullable: boolean(), 26 | default_value: String.t() | nil, 27 | primary_key_index: non_neg_integer(), 28 | hidden_kind: Types.column_hidden_kind() 29 | } 30 | 31 | defstruct [ 32 | :column_id, 33 | :name, 34 | :type_affinity, 35 | :declared_type, 36 | :nullable, 37 | :default_value, 38 | :primary_key_index, 39 | :hidden_kind 40 | ] 41 | end 42 | -------------------------------------------------------------------------------- /native/xqlitenif/src/cancel.rs: -------------------------------------------------------------------------------- 1 | use rusqlite::Connection; 2 | use rustler::{resource_impl, Resource}; 3 | use std::fmt::Debug; 4 | use std::os::raw::c_int; 5 | use std::sync::atomic::{AtomicBool, Ordering}; 6 | use std::sync::Arc; 7 | 8 | #[derive(Debug)] 9 | pub(crate) struct XqliteCancelToken(pub(crate) Arc); 10 | 11 | #[resource_impl] 12 | impl Resource for XqliteCancelToken {} 13 | 14 | impl XqliteCancelToken { 15 | pub(crate) fn new() -> Self { 16 | XqliteCancelToken(Arc::new(AtomicBool::new(false))) 17 | } 18 | 19 | pub(crate) fn cancel(&self) { 20 | self.0.store(true, Ordering::Release); 21 | } 22 | } 23 | 24 | // --- RAII Guard for Progress Handler --- 25 | // Needs a lifetime tied to the Connection reference it holds temporarily 26 | pub(crate) struct ProgressHandlerGuard<'conn> { 27 | conn: &'conn Connection, 28 | // Store a flag to ensure unregister is only called if register succeeded 29 | is_registered: bool, 30 | } 31 | 32 | impl<'conn> ProgressHandlerGuard<'conn> { 33 | pub(crate) fn new( 34 | conn: &'conn Connection, 35 | token_bool: Arc, 36 | interval: i32, 37 | ) -> Result { 38 | let handler = move || -> bool { 39 | if token_bool.load(Ordering::Relaxed) { 40 | true // Return true (non-zero) to interrupt 41 | } else { 42 | false // Return false (zero) to keep going 43 | } 44 | }; 45 | 46 | // The progress_handler function itself doesn't return a Result we can use `?` on. 47 | // It doesn't typically error in a way that prevents registration if arguments are valid. 48 | // We'll assume registration works if types are correct. 49 | conn.progress_handler(interval as c_int, Some(handler)); 50 | 51 | Ok(ProgressHandlerGuard { 52 | conn, 53 | is_registered: true, 54 | }) // Assume success if we get to here 55 | } 56 | } 57 | 58 | impl Drop for ProgressHandlerGuard<'_> { 59 | fn drop(&mut self) { 60 | if self.is_registered { 61 | self.conn.progress_handler(0, None:: bool>); 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/xqlite/pragma_util.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.PragmaUtil do 2 | @moduledoc ~S""" 3 | A module with zero dependencies on the rest of the modules in this library. 4 | Its functions are useful for being invoked in module definitions of other modules. 5 | """ 6 | 7 | @type spec :: keyword() 8 | @type arg_type :: :blob | :bool | :int | :list | :nothing | :real | :text 9 | @type pragma :: {atom(), spec()} 10 | @type pragma_specs :: %{required(atom()) => spec()} 11 | @type filter :: (pragma() -> boolean()) 12 | 13 | defguard is_spec(x) when is_list(x) 14 | defguard is_arg_type(x) when x in [:blob, :bool, :int, :list, :nothing, :real, :text] 15 | defguard is_pragma(x) when is_tuple(x) and is_atom(elem(x, 0)) and is_spec(elem(x, 1)) 16 | defguard is_pragma_specs(x) when is_map(x) 17 | defguard is_filter(x) when is_function(x, 1) 18 | 19 | @spec readable?(pragma()) :: boolean() 20 | def readable?({_name, spec} = pragma) when is_pragma(pragma), do: Keyword.has_key?(spec, :r) 21 | 22 | @spec readable_with_zero_args?(pragma()) :: boolean() 23 | def readable_with_zero_args?({_name, spec} = pragma) when is_pragma(pragma) do 24 | spec 25 | |> Keyword.get_values(:r) 26 | |> Enum.any?(fn pragma_return -> match?({0, _, _}, pragma_return) end) 27 | end 28 | 29 | @spec readable_with_one_arg?(pragma()) :: boolean() 30 | def readable_with_one_arg?({_name, spec} = pragma) when is_pragma(pragma) do 31 | spec 32 | |> Keyword.get_values(:r) 33 | |> Enum.any?(fn pragma_return -> match?({1, _, _, _}, pragma_return) end) 34 | end 35 | 36 | @spec writable?(pragma()) :: boolean() 37 | def writable?({_name, spec} = pragma) when is_pragma(pragma), do: Keyword.has_key?(spec, :w) 38 | 39 | @spec returns_type?(pragma(), arg_type()) :: boolean() 40 | def returns_type?({_name, spec} = pragma, type) 41 | when is_pragma(pragma) and is_arg_type(type) do 42 | Enum.any?(spec, fn 43 | {:r, {0, _, ^type}} -> true 44 | {:r, {1, _, _, ^type}} -> true 45 | {:w, {_, _, ^type}} -> true 46 | _ -> false 47 | end) 48 | end 49 | 50 | @spec of_type(pragma_specs(), arg_type()) :: [atom()] 51 | def of_type(pragma_specs, type) when is_pragma_specs(pragma_specs) and is_arg_type(type) do 52 | filter(pragma_specs, fn pragma -> returns_type?(pragma, type) end) 53 | end 54 | 55 | @spec filter(pragma_specs(), filter()) :: [atom()] 56 | def filter(pragma_specs, func) when is_pragma_specs(pragma_specs) and is_filter(func) do 57 | pragma_specs 58 | |> Stream.filter(fn pragma -> func.(pragma) end) 59 | |> Stream.map(fn {name, _spec} -> name end) 60 | |> Enum.sort() 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /native/xqlitenif/src/lib.rs: -------------------------------------------------------------------------------- 1 | rustler::atoms! { 2 | asc, 3 | atom, 4 | binary, 5 | cannot_convert_atom_to_string, 6 | cannot_convert_to_sqlite_value, 7 | cannot_execute, 8 | cannot_execute_pragma, 9 | cannot_fetch_row, 10 | cannot_open_database, 11 | cannot_prepare_statement, 12 | cascade, 13 | code, 14 | columns, 15 | constraint_check, 16 | constraint_commit_hook, 17 | constraint_datatype, 18 | constraint_foreign_key, 19 | constraint_function, 20 | constraint_not_null, 21 | constraint_pinned, 22 | constraint_primary_key, 23 | constraint_rowid, 24 | constraint_trigger, 25 | constraint_unique, 26 | constraint_violation, 27 | constraint_vtab, 28 | create_index, 29 | database_busy_or_locked, 30 | desc, 31 | done, 32 | error, 33 | execute_returned_results, 34 | expected, 35 | expected_keyword_list, 36 | expected_keyword_tuple, 37 | expected_list, 38 | float, 39 | from_sql_conversion_failure, 40 | full, 41 | function, 42 | hidden_alias, 43 | index_exists, 44 | integer, 45 | integral_value_out_of_range, 46 | internal_encoding_error, 47 | invalid_batch_size, 48 | invalid_column_index, 49 | invalid_column_name, 50 | invalid_column_type, 51 | invalid_parameter_count, 52 | invalid_parameter_name, 53 | invalid_stream_handle, 54 | list, 55 | lock_error, 56 | map, 57 | message, 58 | minimum, 59 | multiple_statements, 60 | no_action, 61 | no_such_index, 62 | no_such_table, 63 | no_value, 64 | none, 65 | normal, 66 | null_byte_in_string, 67 | num_rows, 68 | numeric, 69 | offset, 70 | operation_cancelled, 71 | partial, 72 | pid, 73 | port, 74 | primary_key_constraint, 75 | provided, 76 | read_only_database, 77 | reference, 78 | restrict, 79 | rows, 80 | schema_changed, 81 | schema_parsing_error, 82 | sequence, 83 | set_default, 84 | set_null, 85 | shadow, 86 | simple, 87 | sql, 88 | sql_input_error, 89 | sqlite_failure, 90 | stored_generated, 91 | string, 92 | table, 93 | table_exists, 94 | text, 95 | to_sql_conversion_failure, 96 | tuple, 97 | unexpected_value, 98 | unique_constraint, 99 | unknown, 100 | unsupported_atom, 101 | unsupported_data_type, 102 | utf8_error, 103 | r#virtual, 104 | virtual_generated, 105 | view 106 | } 107 | 108 | mod cancel; 109 | mod error; 110 | mod nif; 111 | mod schema; 112 | mod stream; 113 | mod util; 114 | 115 | use rustler::{Env, Term}; 116 | 117 | fn on_load(_env: Env, _info: Term) -> bool { 118 | true 119 | } 120 | 121 | rustler::init!("Elixir.XqliteNIF", load = on_load); 122 | -------------------------------------------------------------------------------- /test/support/test_util.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.TestUtil do 2 | alias XqliteNIF, as: NIF 3 | 4 | # A list of: an ExUnit tag, a `describe` block prefix, and a MFA to open a connection. 5 | # This data structure is used to generate tests for different DB types. 6 | @connection_openers [ 7 | {:memory_private, "private in-memory DB", {__MODULE__, :open_in_memory, []}}, 8 | {:file_temp, "temporary file DB", {__MODULE__, :open_temporary, []}} 9 | ] 10 | 11 | @tag_to_mfa_map Map.new(@connection_openers, fn {tag, _prefix, mfa} -> {tag, mfa} end) 12 | 13 | defp open_and_configure({mod, fun, args}) do 14 | with {:ok, conn} <- apply(mod, fun, args) do 15 | :ok = NIF.set_pragma(conn, "journal_mode", "WAL") 16 | :ok = NIF.set_pragma(conn, "journal_size_limit", 0) 17 | :ok = NIF.set_pragma(conn, "cache_size", -1000) 18 | :ok = NIF.set_pragma(conn, "foreign_keys", true) 19 | {:ok, conn} 20 | end 21 | end 22 | 23 | def open_in_memory(), do: open_and_configure({NIF, :open_in_memory, []}) 24 | def open_temporary(), do: open_and_configure({NIF, :open_temporary, []}) 25 | 26 | @doc """ 27 | Returns a list of connection opener strategies for test generation. 28 | Each element is `{ex_unit_tag, description_prefix, opener_mfa}`. 29 | """ 30 | def connection_openers(), do: @connection_openers 31 | 32 | @doc """ 33 | Finds the opener MFA tuple based on the tag present in the ExUnit context map. 34 | 35 | Raises an error if a known tag key isn't found in the context. 36 | """ 37 | def find_opener_mfa!(context) when is_map(context) do 38 | # Find the first known tag that exists as a key in the context map 39 | # Get known tags from the source map keys directly 40 | found_tag = Enum.find(Map.keys(@tag_to_mfa_map), fn tag -> Map.has_key?(context, tag) end) 41 | 42 | # Raise an error if no known tag is found in the context 43 | unless found_tag do 44 | raise """ 45 | Could not determine current test tag from context needed to find opener MFA. 46 | Expected one of #{inspect(Map.keys(@tag_to_mfa_map))} to be a key in context map. 47 | Context: #{inspect(context)} 48 | """ 49 | end 50 | 51 | # Lookup and return the MFA using the found tag 52 | Map.fetch!(@tag_to_mfa_map, found_tag) 53 | end 54 | 55 | def normalize_test_values(values) do 56 | Enum.map(values, fn 57 | {set, expected} -> {set, expected} 58 | value -> {value, value} 59 | end) 60 | end 61 | 62 | def default_verify_values(_context, _set_val, fetched_val, expected_val) do 63 | fetched_val in List.wrap(expected_val) 64 | end 65 | 66 | def verify_is_integer(_context, _set_val, fetched_val, _expected_val), 67 | do: is_integer(fetched_val) 68 | 69 | def verify_is_atom(_context, _set_val, fetched_val, _expected_val), do: is_atom(fetched_val) 70 | def verify_is_ok_atom(_context, _set_val, fetched_val, _expected_val), do: fetched_val == :ok 71 | 72 | def verify_mmap_size_value(:memory_private, _set_val, actual, _expected_val), 73 | do: actual == :no_value 74 | 75 | def verify_mmap_size_value(:file_temp, _set_val, actual, _expected_val) do 76 | is_integer(actual) or actual == :no_value 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /lib/xqlite/stream_resource_callbacks.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.StreamResourceCallbacks do 2 | @moduledoc false 3 | 4 | # Callbacks for implementing Xqlite.stream/4 via Stream.resource/3. 5 | # This module is not intended for direct use. 6 | 7 | alias XqliteNIF, as: NIF 8 | 9 | require Logger 10 | 11 | @type acc :: %{ 12 | handle: reference(), 13 | columns: [String.t()], 14 | batch_size: pos_integer(), 15 | original_opts: keyword() 16 | } 17 | 18 | @spec start_fun({Xqlite.conn(), String.t(), list() | keyword(), keyword()}) :: 19 | {:ok, acc()} | {:error, Xqlite.error()} 20 | def start_fun({conn, sql, params, opts}) do 21 | case NIF.stream_open(conn, sql, params, []) do 22 | {:ok, handle} -> 23 | # stream_open succeeded, now try to get columns. 24 | case NIF.stream_get_columns(handle) do 25 | {:ok, columns} -> 26 | # Both NIF calls succeeded. Build the accumulator. 27 | batch_size = Keyword.get(opts, :batch_size, 500) 28 | 29 | acc = %{ 30 | handle: handle, 31 | columns: columns, 32 | batch_size: batch_size, 33 | original_opts: opts 34 | } 35 | 36 | {:ok, acc} 37 | 38 | {:error, _reason} = error -> 39 | # stream_get_columns failed. We MUST close the handle we just opened. 40 | NIF.stream_close(handle) 41 | error 42 | end 43 | 44 | {:error, _reason} = error -> 45 | # stream_open failed. Nothing to clean up, just return the error. 46 | error 47 | end 48 | end 49 | 50 | @spec next_fun(acc()) :: {[map()], acc()} | {:halt, acc()} 51 | def next_fun(acc) do 52 | # Fetch the next batch of rows from the NIF. 53 | case NIF.stream_fetch(acc.handle, acc.batch_size) do 54 | {:ok, %{rows: rows}} -> 55 | # Successfully fetched rows. Map them into Elixir maps. 56 | mapped_rows = map_rows_to_structs(rows, acc.columns) 57 | {mapped_rows, acc} 58 | 59 | :done -> 60 | # The stream is exhausted. Halt the stream. 61 | {:halt, acc} 62 | 63 | {:error, reason} -> 64 | # An error occurred while fetching. Log it and halt the stream. 65 | # Note: Stream.resource/3 does not propagate this error to the consumer. 66 | # Raising an exception is an alternative, but logging is safer for now. 67 | Logger.error("Error fetching from Xqlite stream: #{inspect(reason)}") 68 | {:halt, acc} 69 | end 70 | end 71 | 72 | @spec after_fun(acc()) :: any() 73 | def after_fun(acc) do 74 | # Ensure the underlying NIF stream resource is closed. 75 | # The return value of this function is ignored by Stream.resource/3, 76 | # but we can still handle a potential error case by logging it. 77 | case NIF.stream_close(acc.handle) do 78 | :ok -> 79 | :ok 80 | 81 | {:error, reason} -> 82 | Logger.error("Error closing Xqlite stream handle: #{inspect(reason)}") 83 | # Still return :ok, as the stream is finished regardless 84 | :ok 85 | end 86 | end 87 | 88 | defp map_rows_to_structs(rows, columns) do 89 | # Convert column names (strings) to atoms once for efficiency. 90 | column_atoms = Enum.map(columns, &String.to_atom/1) 91 | 92 | Enum.map(rows, fn row_list -> 93 | # Combine atom keys with row values into a map. 94 | Map.new(Enum.zip(column_atoms, row_list)) 95 | end) 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.MixProject do 2 | use Mix.Project 3 | 4 | @name "Xqlite" 5 | 6 | def project do 7 | [ 8 | app: :xqlite, 9 | version: "0.3.1", 10 | elixir: "~> 1.15", 11 | name: @name, 12 | start_permanent: Mix.env() == :prod, 13 | docs: docs(), 14 | deps: deps(), 15 | compilers: Mix.compilers(), 16 | elixirc_paths: elixirc_paths(Mix.env()), 17 | 18 | # hex 19 | description: description(), 20 | package: package(), 21 | 22 | # testing 23 | test_coverage: [tool: ExCoveralls], 24 | 25 | # type checking 26 | dialyzer: dialyzer(Mix.env()) 27 | ] 28 | end 29 | 30 | defp elixirc_paths(:test), do: ["lib", "test/support"] 31 | defp elixirc_paths(_), do: ["lib"] 32 | 33 | def application do 34 | [ 35 | extra_applications: [:logger] 36 | ] 37 | end 38 | 39 | def cli do 40 | [ 41 | preferred_envs: [ 42 | coveralls: :test, 43 | "coveralls.detail": :test, 44 | "coveralls.post": :test, 45 | "coveralls.html": :test, 46 | "coveralls.circle": :test, 47 | "test.seq": :test 48 | ] 49 | ] 50 | end 51 | 52 | defp deps do 53 | [ 54 | {:rustler, "~> 0.37.1", runtime: false}, 55 | 56 | # dev / test. 57 | {:benchee, "~> 1.0", only: :dev, runtime: false}, 58 | {:dialyxir, "~> 1.4", only: :dev, runtime: false}, 59 | {:ex_doc, "~> 0.20", only: :dev, runtime: false}, 60 | {:excoveralls, "~> 0.11", only: :test} 61 | ] 62 | end 63 | 64 | defp docs() do 65 | [ 66 | main: "readme", 67 | name: "Xqlite", 68 | source_url: "https://github.com/dimitarvp/xqlite", 69 | source_ref: "v0.2.9", 70 | extras: ["README.md", "LICENSE.md"], 71 | groups_for_modules: [ 72 | "High-Level API": [ 73 | Xqlite, 74 | Xqlite.Pragma, 75 | Xqlite.StreamResourceCallbacks 76 | ], 77 | "Schema Structs": [ 78 | Xqlite.Schema.ColumnInfo, 79 | Xqlite.Schema.DatabaseInfo, 80 | Xqlite.Schema.ForeignKeyInfo, 81 | Xqlite.Schema.IndexColumnInfo, 82 | Xqlite.Schema.IndexInfo, 83 | Xqlite.Schema.SchemaObjectInfo, 84 | Xqlite.Schema.Types 85 | ], 86 | "Low-Level NIFs": [ 87 | XqliteNIF 88 | ], 89 | "Internal Helpers": [ 90 | Xqlite.PragmaUtil, 91 | Xqlite.TestUtil 92 | ] 93 | ] 94 | ] 95 | end 96 | 97 | defp description(), do: "An Elixir SQLite database library utilising the rusqlite Rust crate" 98 | 99 | defp package() do 100 | [ 101 | licenses: ["MIT"], 102 | links: %{ 103 | "Github" => "https://github.com/dimitarvp/xqlite", 104 | "Hexdocs" => "https://hexdocs.pm/xqlite" 105 | }, 106 | files: [ 107 | "lib", 108 | "native/xqlitenif/src", 109 | "native/xqlitenif/Cargo.toml", 110 | "native/xqlitenif/Cargo.lock", 111 | ".formatter.exs", 112 | "mix.exs", 113 | "README.md", 114 | "LICENSE.md" 115 | # "checksum-*.exs" 116 | ] 117 | ] 118 | end 119 | 120 | defp dialyzer(_env) do 121 | [ 122 | # Specifies the directory where core PLTs (OTP, Elixir stdlib) are stored. 123 | plt_core_path: "priv/plts/", 124 | # Specifies the path to the final project PLT file, which includes dependencies. 125 | # Using {:no_warn, ...} suppresses warnings if the file doesn't exist initially. 126 | plt_file: {:no_warn, "priv/plts/core.plt"}, 127 | plt_add_apps: [:mix] 128 | # flags: ["-Wunmatched_returns", ...], 129 | ] 130 | end 131 | end 132 | -------------------------------------------------------------------------------- /test/nif/last_insert_rowid_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.LastInsertRowidTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | 7 | @table_sql "CREATE TABLE rowid_test (id INTEGER PRIMARY KEY, data TEXT);" 8 | 9 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 10 | describe "using #{prefix}" do 11 | @describetag type_tag 12 | 13 | setup context do 14 | {mod, fun, args} = find_opener_mfa!(context) 15 | assert {:ok, conn} = apply(mod, fun, args) 16 | assert {:ok, 0} = NIF.execute(conn, @table_sql, []) 17 | on_exit(fn -> NIF.close(conn) end) 18 | {:ok, conn: conn} 19 | end 20 | 21 | test "returns the explicit rowid", %{conn: conn} do 22 | insert_sql = "INSERT INTO rowid_test (id, data) VALUES (?1, ?2);" 23 | explicit_id = 123 24 | insert_params = [explicit_id, "explicit data"] 25 | assert {:ok, 1} = NIF.execute(conn, insert_sql, insert_params) 26 | assert {:ok, ^explicit_id} = NIF.last_insert_rowid(conn) 27 | end 28 | 29 | test "returns the auto-generated rowid", %{conn: conn} do 30 | insert_sql = "INSERT INTO rowid_test (data) VALUES (?1);" 31 | assert {:ok, 1} = NIF.execute(conn, insert_sql, ["auto data 1"]) 32 | # First auto-generated rowid is typically 1 33 | assert {:ok, 1} = NIF.last_insert_rowid(conn) 34 | assert {:ok, 1} = NIF.execute(conn, insert_sql, ["auto data 2"]) 35 | assert {:ok, 2} = NIF.last_insert_rowid(conn) 36 | end 37 | 38 | test "returns 0 if no rows inserted on connection", %{conn: conn} do 39 | assert {:ok, 0} = NIF.last_insert_rowid(conn) 40 | end 41 | 42 | test "value persists after failed insert", %{conn: conn} do 43 | assert {:ok, 1} = 44 | NIF.execute( 45 | conn, 46 | "INSERT INTO rowid_test (id, data) VALUES (55, 'conn1 data')", 47 | [] 48 | ) 49 | 50 | assert {:ok, 55} = NIF.last_insert_rowid(conn) 51 | # Attempt a failing insert 52 | assert {:error, _} = 53 | NIF.execute( 54 | conn, 55 | "INSERT INTO rowid_test (id, data) VALUES (55, 'duplicate id')", 56 | [] 57 | ) 58 | 59 | # Verify last_insert_rowid still returns the ID from the *last successful* insert 60 | assert {:ok, 55} = NIF.last_insert_rowid(conn) 61 | end 62 | end 63 | end 64 | 65 | describe "using separate connections" do 66 | setup do 67 | assert {:ok, conn1} = NIF.open_in_memory() 68 | assert {:ok, conn2} = NIF.open_in_memory() 69 | assert {:ok, 0} = NIF.execute(conn1, @table_sql, []) 70 | assert {:ok, 0} = NIF.execute(conn2, @table_sql, []) 71 | 72 | on_exit(fn -> 73 | NIF.close(conn1) 74 | NIF.close(conn2) 75 | end) 76 | 77 | {:ok, conn1: conn1, conn2: conn2} 78 | end 79 | 80 | test "last_insert_rowid is connection specific", %{conn1: conn1, conn2: conn2} do 81 | assert {:ok, 1} = 82 | NIF.execute( 83 | conn1, 84 | "INSERT INTO rowid_test (id, data) VALUES (55, 'conn1 data')", 85 | [] 86 | ) 87 | 88 | assert {:ok, 1} = 89 | NIF.execute( 90 | conn2, 91 | "INSERT INTO rowid_test (id, data) VALUES (77, 'conn2 data')", 92 | [] 93 | ) 94 | 95 | assert {:ok, 55} = NIF.last_insert_rowid(conn1) 96 | assert {:ok, 77} = NIF.last_insert_rowid(conn2) 97 | end 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.5.0", "4d812c31d54b0ec0167e91278e7de3f596324a78a096fd3d0bea68bb0c513b10", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.1", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "5b075393aea81b8ae74eadd1c28b1d87e8a63696c649d8293db7c4df3eb67535"}, 3 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 4 | "dialyxir": {:hex, :dialyxir, "1.4.7", "dda948fcee52962e4b6c5b4b16b2d8fa7d50d8645bbae8b8685c3f9ecb7f5f4d", [:mix], [{:erlex, ">= 0.2.8", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b34527202e6eb8cee198efec110996c25c5898f43a4094df157f8d28f27d9efe"}, 5 | "earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"}, 6 | "erlex": {:hex, :erlex, "0.2.8", "cd8116f20f3c0afe376d1e8d1f0ae2452337729f68be016ea544a72f767d9c12", [:mix], [], "hexpm", "9d66ff9fedf69e49dc3fd12831e12a8a37b76f8651dd21cd45fcf5561a8a7590"}, 7 | "ex_doc": {:hex, :ex_doc, "0.39.2", "da5549bbce34c5fb0811f829f9f6b7a13d5607b222631d9e989447096f295c57", [:mix], [{:earmark_parser, "~> 1.4.44", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "62665526a88c207653dbcee2aac66c2c229d7c18a70ca4ffc7f74f9e01324daa"}, 8 | "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"}, 9 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 10 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 11 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, 12 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, 13 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"}, 14 | "rustler": {:hex, :rustler, "0.37.1", "721434020c7f6f8e1cdc57f44f75c490435b01de96384f8ccb96043f12e8a7e0", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "24547e9b8640cf00e6a2071acb710f3e12ce0346692e45098d84d45cdb54fd79"}, 15 | "statistex": {:hex, :statistex, "1.1.0", "7fec1eb2f580a0d2c1a05ed27396a084ab064a40cfc84246dbfb0c72a5c761e5", [:mix], [], "hexpm", "f5950ea26ad43246ba2cce54324ac394a4e7408fdcf98b8e230f503a0cba9cf5"}, 16 | } 17 | -------------------------------------------------------------------------------- /test/xqlite_test.exs: -------------------------------------------------------------------------------- 1 | defmodule XqliteTest do 2 | use ExUnit.Case, async: true 3 | doctest Xqlite 4 | 5 | alias Xqlite.TestUtil 6 | alias XqliteNIF, as: NIF 7 | 8 | @record_count 20 9 | 10 | # Use the multi-DB test pattern 11 | for {type_tag, prefix, _opener_mfa} <- TestUtil.connection_openers() do 12 | describe "Xqlite.stream/4 using #{prefix}" do 13 | @describetag type_tag 14 | 15 | # Setup for each connection type 16 | setup context do 17 | {mod, fun, args} = TestUtil.find_opener_mfa!(context) 18 | assert {:ok, conn} = apply(mod, fun, args) 19 | 20 | # Create and populate a test table 21 | assert :ok = 22 | NIF.execute_batch( 23 | conn, 24 | "CREATE TABLE stream_test_users (id INTEGER PRIMARY KEY, name TEXT, email TEXT);" 25 | ) 26 | 27 | for i <- 1..@record_count do 28 | assert {:ok, 1} = 29 | NIF.execute( 30 | conn, 31 | "INSERT INTO stream_test_users (id, name, email) VALUES (?1, ?2, ?3);", 32 | [i, "User #{i}", "user#{i}@example.com"] 33 | ) 34 | end 35 | 36 | on_exit(fn -> NIF.close(conn) end) 37 | {:ok, conn: conn} 38 | end 39 | 40 | test "streams all results as a list of maps", %{conn: conn} do 41 | stream = Xqlite.stream(conn, "SELECT id, name FROM stream_test_users ORDER BY id;") 42 | 43 | # Verify it's a stream 44 | assert Enumerable.impl_for(stream) != nil 45 | 46 | results = Enum.to_list(stream) 47 | 48 | assert length(results) == @record_count 49 | assert List.first(results) == %{id: 1, name: "User 1"} 50 | assert List.last(results) == %{id: @record_count, name: "User #{@record_count}"} 51 | end 52 | 53 | test "streams correctly with a small batch size", %{conn: conn} do 54 | # Batch size of 5 means it will take 4 batches to consume 20 records. 55 | stream = 56 | Xqlite.stream(conn, "SELECT id FROM stream_test_users ORDER BY id;", [], 57 | batch_size: 5 58 | ) 59 | 60 | results = Enum.map(stream, & &1.id) 61 | 62 | assert results == Enum.to_list(1..@record_count) 63 | end 64 | 65 | test "streams an empty result set correctly", %{conn: conn} do 66 | stream = Xqlite.stream(conn, "SELECT id FROM stream_test_users WHERE id < 0;") 67 | assert Enum.to_list(stream) == [] 68 | end 69 | 70 | test "streams with positional parameters", %{conn: conn} do 71 | stream = 72 | Xqlite.stream( 73 | conn, 74 | "SELECT name FROM stream_test_users WHERE id > ?1 ORDER BY id;", 75 | [ 76 | @record_count - 2 77 | ] 78 | ) 79 | 80 | results = Enum.to_list(stream) 81 | 82 | assert results == [ 83 | %{name: "User #{@record_count - 1}"}, 84 | %{name: "User #{@record_count}"} 85 | ] 86 | end 87 | 88 | test "streams with named parameters", %{conn: conn} do 89 | stream = 90 | Xqlite.stream(conn, "SELECT name FROM stream_test_users WHERE email = :email;", 91 | email: "user3@example.com" 92 | ) 93 | 94 | results = Enum.to_list(stream) 95 | assert results == [%{name: "User 3"}] 96 | end 97 | 98 | test "returns an error tuple for invalid SQL", %{conn: conn} do 99 | # This tests the `case start_fun` logic in Xqlite.stream/4 100 | result = Xqlite.stream(conn, "SELEKT * FROM stream_test_users;") 101 | 102 | assert match?({:error, {:sqlite_failure, _, _, _}}, result) 103 | end 104 | 105 | test "stream created with empty SQL results in an empty stream", %{conn: conn} do 106 | stream = Xqlite.stream(conn, "") 107 | assert Enum.to_list(stream) == [] 108 | end 109 | end 110 | end 111 | end 112 | -------------------------------------------------------------------------------- /lib/xqlite.ex: -------------------------------------------------------------------------------- 1 | defmodule Xqlite do 2 | @moduledoc ~S""" 3 | This is the central module of this library. All SQLite operations can be performed from here. 4 | Note that they delegate to other modules which you can also use directly. 5 | """ 6 | 7 | @type conn :: reference() 8 | @type error :: {:error, any()} 9 | 10 | @spec int2bool(0 | 1) :: true | false 11 | def int2bool(0), do: false 12 | def int2bool(1), do: true 13 | 14 | @doc """ 15 | Enables strict mode only for the lifetime of the given database connection. 16 | 17 | In strict mode, SQLite is less forgiving. For example, an attempt to insert 18 | a string into an INTEGER column of a `STRICT` table will result in an error, 19 | whereas in normal mode it might be coerced or stored as text. 20 | This setting only affects tables declared with the `STRICT` keyword. 21 | 22 | See: [STRICT Tables](https://www.sqlite.org/stricttables.html) 23 | """ 24 | @spec enable_strict_mode(conn()) :: :ok | error() 25 | def enable_strict_mode(conn) do 26 | XqliteNIF.set_pragma(conn, "strict", :on) 27 | end 28 | 29 | @doc """ 30 | Disables strict mode only for the lifetime given database connection (SQLite's default). 31 | 32 | See `enable_strict_mode/1` for details. 33 | """ 34 | @spec disable_strict_mode(conn()) :: :ok | error() 35 | def disable_strict_mode(conn) do 36 | XqliteNIF.set_pragma(conn, "strict", :off) 37 | end 38 | 39 | @doc """ 40 | Enables foreign key constraint enforcement for the given database connection. 41 | 42 | By default, SQLite parses foreign key constraints but does not enforce them. 43 | This function turns on enforcement. 44 | 45 | See: [SQLite PRAGMA foreign_keys](https://www.sqlite.org/pragma.html#pragma_foreign_keys) 46 | """ 47 | @spec enable_foreign_key_enforcement(conn()) :: :ok | error() 48 | def enable_foreign_key_enforcement(conn) do 49 | XqliteNIF.set_pragma(conn, "foreign_keys", :on) 50 | end 51 | 52 | @doc """ 53 | Disables foreign key constraint enforcement for the given database connection (default behavior). 54 | 55 | See `enable_foreign_key_enforcement/1` for details. 56 | """ 57 | @spec disable_foreign_key_enforcement(conn()) :: :ok | error() 58 | def disable_foreign_key_enforcement(conn) do 59 | XqliteNIF.set_pragma(conn, "foreign_keys", :off) 60 | end 61 | 62 | @doc """ 63 | Creates a stream that executes a query and emits rows as maps. 64 | 65 | This provides a high-level, idiomatic Elixir `Stream` for processing large 66 | result sets without loading them all into memory at once. Rows are fetched 67 | from the database in batches as the stream is consumed. 68 | 69 | ## Options 70 | 71 | * `:batch_size` (integer, default: `500`) - The maximum number of rows 72 | to fetch from the database in a single batch. 73 | 74 | ## Examples 75 | 76 | iex> {:ok, conn} = XqliteNIF.open_in_memory() 77 | iex> XqliteNIF.execute_batch(conn, "CREATE TABLE users(id, name); INSERT INTO users VALUES (1, 'Alice'), (2, 'Bob');") 78 | :ok 79 | iex> Xqlite.stream(conn, "SELECT id, name FROM users;") |> Enum.to_list() 80 | [%{id: 1, name: "Alice"}, %{id: 2, name: "Bob"}] 81 | 82 | If the underlying query preparation or initial NIF stream setup fails, this 83 | function will return an `{:error, reason}` tuple directly instead of a stream. 84 | Errors that occur during stream consumption (e.g., database connection lost 85 | mid-stream) will be logged and will cause the stream to halt. 86 | """ 87 | @spec stream(conn(), String.t(), list() | keyword(), keyword()) :: 88 | Enumerable.t() | {:error, Xqlite.error()} 89 | def stream(conn, sql, params \\ [], opts \\ []) do 90 | start_fun = &Xqlite.StreamResourceCallbacks.start_fun/1 91 | next_fun = &Xqlite.StreamResourceCallbacks.next_fun/1 92 | after_fun = &Xqlite.StreamResourceCallbacks.after_fun/1 93 | 94 | # `Stream.resource/3` expects the start_fun to return {:ok, acc} or {:error, reason}. 95 | # If it returns {:error, reason}, Stream.resource will raise an error. 96 | # To align with our spec of returning {:error, reason} directly, we must 97 | # call start_fun ourselves first. 98 | 99 | case start_fun.({conn, sql, params, opts}) do 100 | {:ok, acc} -> 101 | # If setup is successful, build the stream resource. 102 | # The start function for Stream.resource now just returns the successful acc. 103 | Stream.resource(fn -> acc end, next_fun, after_fun) 104 | 105 | {:error, _reason} = error -> 106 | # If setup fails, return the error tuple directly. 107 | error 108 | end 109 | end 110 | end 111 | -------------------------------------------------------------------------------- /test/nif/connection_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.ConnectionTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | 6 | alias XqliteNIF, as: NIF 7 | alias Xqlite.Schema 8 | 9 | # --- Shared test code (generated via `for` loop for different DB types) --- 10 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 11 | describe "using #{prefix}" do 12 | @describetag type_tag 13 | 14 | # Setup uses a single helper to find the appropriate MFA based on context tag 15 | setup context do 16 | {mod, fun, args} = find_opener_mfa!(context) 17 | 18 | # Open connection 19 | assert {:ok, conn} = apply(mod, fun, args), 20 | "Failed to open connection using #{inspect({mod, fun, args})}" 21 | 22 | on_exit(fn -> NIF.close(conn) end) 23 | {:ok, conn: conn} 24 | end 25 | 26 | # --- Shared test cases applicable to all DB types follow --- 27 | # These tests inherit the simple atom tag (e.g. :memory_private or :file_temp etc.) 28 | 29 | test "connection is usable (set/get pragma)", %{conn: conn} do 30 | assert :ok = NIF.set_pragma(conn, "cache_size", 4000) 31 | assert {:ok, 4000} = NIF.get_pragma(conn, "cache_size") 32 | end 33 | 34 | test "close returns true even when called multiple times", %{conn: conn} do 35 | assert :ok = NIF.close(conn) 36 | 37 | # Subsequent calls are no-ops on the Rust side but should still return ok via the NIF interface. 38 | assert :ok = NIF.close(conn) 39 | assert :ok = NIF.close(conn) 40 | end 41 | 42 | test "basic query execution works", %{conn: conn} do 43 | assert {:ok, %{columns: ["1"], rows: [[1]], num_rows: 1}} = 44 | NIF.query(conn, "SELECT 1;", []) 45 | end 46 | 47 | test "basic statement execution works", %{conn: conn} do 48 | sql = "CREATE TABLE conn_test_basic (id INTEGER PRIMARY KEY);" 49 | assert {:ok, 0} = NIF.execute(conn, sql, []) 50 | end 51 | end 52 | 53 | # end describe "Using #{prefix}" 54 | end 55 | 56 | # end `for` loop that generates a bunch of tests for each DB type 57 | 58 | # --- DB type-specific or other tests (outside the `for` loop) --- 59 | describe "temporary file DB" do 60 | setup do 61 | assert {:ok, conn} = NIF.open_temporary() 62 | on_exit(fn -> NIF.close(conn) end) 63 | {:ok, conn: conn} 64 | end 65 | 66 | @tag :file_temp 67 | test "schema_databases shows empty file path", %{conn: conn} do 68 | assert {:ok, [%Schema.DatabaseInfo{name: "main", file: ""}]} = 69 | NIF.schema_databases(conn) 70 | end 71 | end 72 | 73 | describe "shared memory DB" do 74 | @shared_mem_db_uri "file:shared_mem_conn_test_specific?mode=memory&cache=shared" 75 | 76 | setup do 77 | assert {:ok, conn1} = NIF.open(@shared_mem_db_uri) 78 | assert {:ok, conn2} = NIF.open(@shared_mem_db_uri) 79 | 80 | on_exit(fn -> 81 | NIF.close(conn1) 82 | NIF.close(conn2) 83 | end) 84 | 85 | {:ok, conn1: conn1, conn2: conn2} 86 | end 87 | 88 | # @tag :memory_shared 89 | test "handles reference the same underlying shared DB", %{conn1: conn1, conn2: conn2} do 90 | # Handles themselves are distinct ResourceArcs. 91 | refute conn1 == conn2 92 | 93 | # Check they point to the same DB using cache_size 94 | # Set cache_size via conn1, assert set success 95 | assert :ok = NIF.set_pragma(conn1, "cache_size", 5000) 96 | # Read back via conn2 using get_pragma, assert it returns the value set by conn1 97 | assert {:ok, 5000} = NIF.get_pragma(conn2, "cache_size") 98 | end 99 | end 100 | 101 | describe "open failure" do 102 | # This path is used specifically for testing open failures 103 | @invalid_db_path "file:./non_existent_dir_for_sure/read_only_db?mode=ro" 104 | # Get directory name 105 | @invalid_dir Path.dirname(@invalid_db_path) 106 | 107 | # Setup ensures the problematic path doesn't exist and registers cleanup 108 | setup do 109 | # Cleanup first in case previous run failed badly 110 | if File.exists?(@invalid_db_path), do: File.rm!(@invalid_db_path) 111 | if File.exists?(@invalid_dir), do: File.rmdir!(@invalid_dir) 112 | 113 | on_exit(fn -> 114 | if File.exists?(@invalid_db_path), do: File.rm!(@invalid_db_path) 115 | if File.exists?(@invalid_dir), do: File.rmdir!(@invalid_dir) 116 | end) 117 | 118 | :ok 119 | end 120 | 121 | # These tests don't depend on the opener type, so define once outside loop 122 | test "open/1 fails for an invalid path" do 123 | assert {:error, {:cannot_open_database, @invalid_db_path, _error_code, _reason}} = 124 | NIF.open(@invalid_db_path) 125 | end 126 | 127 | test "open_in_memory/1 fails for an invalid URI schema" do 128 | # Test attempting to open non-file/memory URI via open_in_memory 129 | assert {:error, {:cannot_open_database, "http://invalid", _error_code, _reason}} = 130 | NIF.open_in_memory("http://invalid") 131 | end 132 | end 133 | end 134 | -------------------------------------------------------------------------------- /scripts/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # =================CONFIGURATION================= 5 | # Path to your NIF manifest (Relative to Project Root) 6 | CARGO_MANIFEST="native/xqlitenif/Cargo.toml" 7 | # The actual name of the package inside Cargo.toml 8 | CRATE_NAME="xqlitenif" 9 | # =============================================== 10 | 11 | # 0. Safety & Context 🛡️ 12 | # Ensure Ctrl-C kills the whole script 13 | trap "echo '❌ Script interrupted by user'; exit 1" SIGINT SIGTERM 14 | 15 | SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" 16 | PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" 17 | 18 | echo "📂 Context: Moving to Project Root ($PROJECT_ROOT)" 19 | pushd "$PROJECT_ROOT" >/dev/null 20 | 21 | # 1. Dependency Pre-flight Check 🛫 22 | echo "🔍 Checking system dependencies..." 23 | REQUIRED_TOOLS=("mix" "cargo" "jq" "git") 24 | ALL_GOOD=true 25 | 26 | for tool in "${REQUIRED_TOOLS[@]}"; do 27 | if ! command -v "$tool" &>/dev/null; then 28 | echo "❌ $tool is missing" 29 | ALL_GOOD=false 30 | else 31 | echo "✅ $tool" 32 | fi 33 | done 34 | 35 | if ! cargo set-version --version &>/dev/null; then 36 | echo "❌ cargo-edit (set-version) is missing. Install via: cargo install cargo-edit" 37 | ALL_GOOD=false 38 | else 39 | echo "✅ cargo-edit" 40 | fi 41 | 42 | if [ "$ALL_GOOD" = false ]; then 43 | echo "💥 Missing dependencies. Aborting." 44 | popd >/dev/null 45 | exit 1 46 | fi 47 | 48 | # 2. Git Cleanliness Check 🧹 49 | # mix_version requires a clean state. We check this early to avoid interactive prompts/failures. 50 | if [ "$(git status --porcelain)" != "" ]; then 51 | echo "❌ Error: Git working directory is dirty." 52 | echo " mix_version requires a clean state." 53 | echo " Please commit your changes (including this script!) before releasing." 54 | popd >/dev/null 55 | exit 1 56 | fi 57 | echo "✨ Git is clean." 58 | echo "" 59 | 60 | # 3. Input Validation 🛡️ 61 | TYPE=$1 62 | if [[ ! "$TYPE" =~ ^(patch|minor|major)$ ]]; then 63 | echo "❌ Error: Argument must be 'patch', 'minor', or 'major'." 64 | echo "Usage: ./scripts/release.sh " 65 | popd >/dev/null 66 | exit 1 67 | fi 68 | 69 | # 4. Define Helper Functions 🛠️ 70 | 71 | # Get Rust version via metadata (Source of Truth for Rust) 72 | get_rust_version() { 73 | cargo metadata --format-version 1 --no-deps --manifest-path "$CARGO_MANIFEST" | 74 | jq -r --arg name "$CRATE_NAME" '.packages[] | select(.name == $name) | .version' 75 | } 76 | 77 | # Get Elixir version via Mix config (Source of Truth for Elixir) 78 | get_elixir_version() { 79 | mix run --no-start --no-compile -e 'IO.puts Mix.Project.config[:version]' 80 | } 81 | 82 | # 5. Capture Initial State 📸 83 | echo "🚀 Starting release process for bump type: $TYPE" 84 | 85 | OLD_RUST_VER=$(get_rust_version) 86 | if [[ -z "$OLD_RUST_VER" || "$OLD_RUST_VER" == "null" ]]; then 87 | echo "❌ FATAL: Could not detect version for crate '$CRATE_NAME'." 88 | popd >/dev/null 89 | exit 1 90 | fi 91 | 92 | OLD_MIX_VER=$(get_elixir_version) 93 | PRE_BUMP_COMMIT=$(git rev-parse HEAD) 94 | 95 | echo "📦 Current Elixir version: $OLD_MIX_VER" 96 | echo "📦 Current Rust version: $OLD_RUST_VER" 97 | 98 | # 6. Run mix version (The Driver) 💧 99 | echo "Running mix version..." 100 | # We use --$TYPE (e.g., --patch) as per mix_version args 101 | mix version --"$TYPE" 102 | 103 | # 7. Safety Valve: Verify Elixir Bump & Commit 🛑 104 | # We must ensure mix_version actually did its job before we touch anything else. 105 | 106 | NEW_MIX_VER=$(get_elixir_version) 107 | POST_BUMP_COMMIT=$(git rev-parse HEAD) 108 | 109 | # Check 1: Did the version string change? 110 | if [ "$OLD_MIX_VER" == "$NEW_MIX_VER" ]; then 111 | echo "❌ FATAL: Elixir version did not change!" 112 | echo " Was: $OLD_MIX_VER" 113 | echo " Now: $NEW_MIX_VER" 114 | echo " mix_version might have failed silently." 115 | popd >/dev/null 116 | exit 1 117 | fi 118 | 119 | # Check 2: Did a new commit appear? 120 | if [ "$PRE_BUMP_COMMIT" == "$POST_BUMP_COMMIT" ]; then 121 | echo "❌ FATAL: mix version did not create a new commit!" 122 | echo " We cannot proceed with amending. Aborting to protect history." 123 | popd >/dev/null 124 | exit 1 125 | fi 126 | 127 | echo "✅ mix.exs bumped to: $NEW_MIX_VER" 128 | echo "✅ New commit created: $POST_BUMP_COMMIT" 129 | 130 | # 8. Update Rust Version (The Modern Way) 🦀 131 | echo "🦀 Bumping Rust crate..." 132 | cargo set-version --manifest-path "$CARGO_MANIFEST" "$NEW_MIX_VER" 133 | 134 | # 9. Paranoid Verification of Rust Bump 🕵️‍♂️ 135 | CHECK_VER=$(get_rust_version) 136 | 137 | if [[ "$CHECK_VER" != "$NEW_MIX_VER" ]]; then 138 | echo "❌ FATAL: Cargo.toml update failed!" 139 | echo " Tried to upgrade from $OLD_RUST_VER to $NEW_MIX_VER" 140 | echo " But cargo metadata reports: $CHECK_VER" 141 | echo " Aborting before git operations." 142 | # Note: You are left with a dirty state here (Cargo.toml changed), 143 | # but your git history is safe (we haven't amended yet). 144 | popd >/dev/null 145 | exit 1 146 | fi 147 | echo "✅ Verified: Rust crate is now $CHECK_VER" 148 | 149 | # 10. Git Magic (Amend & Retag) 🪄 150 | echo "🎨 Amending git commit..." 151 | 152 | # Stage the Rust changes 153 | git add "$CARGO_MANIFEST" 154 | # Try adding lockfile if it exists (cargo set-version usually updates it) 155 | LOCK_FILE="${CARGO_MANIFEST%Cargo.toml}Cargo.lock" 156 | if [ -f "$LOCK_FILE" ]; then 157 | git add "$LOCK_FILE" 158 | fi 159 | 160 | # Amend the commit created by mix_version 161 | git commit --amend --no-edit >/dev/null 162 | 163 | # Force move the tag to the new amended commit hash 164 | TAG_NAME="v$NEW_MIX_VER" 165 | git tag -f "$TAG_NAME" >/dev/null 166 | 167 | echo "✅ Git commit amended and tag $TAG_NAME updated." 168 | echo "" 169 | echo "🎉 Release $NEW_MIX_VER successfully prepared!" 170 | echo "Next steps:" 171 | echo " 1. git push && git push --tags" 172 | echo " 2. mix hex.publish" 173 | 174 | # 11. Cleanup 175 | popd >/dev/null 176 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Xqlite 2 | 3 | env: 4 | DEFAULT_ELIXIR: "1.19" 5 | DEFAULT_OTP: "28" 6 | 7 | permissions: 8 | contents: read 9 | 10 | on: 11 | push: 12 | branches: 13 | - "main" 14 | pull_request: 15 | branches: 16 | - "main" 17 | workflow_dispatch: 18 | 19 | jobs: 20 | format_and_lint: 21 | name: Format & lints 22 | runs-on: ubuntu-latest 23 | timeout-minutes: 15 24 | steps: 25 | - name: Checkout code 26 | uses: actions/checkout@v6 27 | 28 | - name: Set up Elixir 29 | id: beam 30 | uses: erlef/setup-beam@v1 31 | with: 32 | elixir-version: ${{ env.DEFAULT_ELIXIR }} 33 | otp-version: ${{ env.DEFAULT_OTP }} 34 | 35 | - name: Set up Rust toolchain 36 | uses: dtolnay/rust-toolchain@master 37 | with: 38 | toolchain: stable 39 | components: rustfmt, clippy 40 | 41 | - name: Restore dependencies cache 42 | uses: actions/cache@v4 43 | with: 44 | path: | 45 | deps 46 | _build 47 | priv/native 48 | key: v0-elixir-lint-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix-${{ hashFiles('**/mix.lock') }} 49 | restore-keys: | 50 | v0-elixir-lint-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix- 51 | 52 | - name: Restore Rust (Cargo) cache 53 | uses: Swatinem/rust-cache@v2 54 | with: 55 | workspaces: native/xqlitenif 56 | prefix-key: "v0-rust-lint-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}" 57 | 58 | - name: Install dependencies 59 | run: mix deps.get 60 | 61 | - name: Check Elixir formatting 62 | run: mix format --check-formatted 63 | 64 | - name: Rust formatting 65 | run: cargo fmt --manifest-path native/xqlitenif/Cargo.toml -- --check 66 | 67 | - name: Rust clippy 68 | run: cargo clippy --manifest-path native/xqlitenif/Cargo.toml -- -D warnings 69 | 70 | dialyzer: 71 | name: Type checks 72 | runs-on: ubuntu-latest 73 | timeout-minutes: 15 74 | needs: format_and_lint 75 | steps: 76 | - name: Checkout code 77 | uses: actions/checkout@v6 78 | 79 | - name: Set up Elixir 80 | id: beam 81 | uses: erlef/setup-beam@v1 82 | with: 83 | elixir-version: ${{ env.DEFAULT_ELIXIR }} 84 | otp-version: ${{ env.DEFAULT_OTP }} 85 | 86 | - name: Restore dependencies cache 87 | uses: actions/cache@v4 88 | with: 89 | path: | 90 | deps 91 | _build 92 | priv/native 93 | key: v0-elixir-dialyzer-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix-${{ hashFiles('**/mix.lock') }} 94 | restore-keys: | 95 | v0-elixir-dialyzer-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix- 96 | 97 | - name: Restore PLT cache 98 | uses: actions/cache@v4 99 | with: 100 | path: | 101 | priv/plts 102 | key: v0-plt-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix-${{ hashFiles('**/mix.lock') }} 103 | restore-keys: | 104 | v0-plt-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}- 105 | 106 | - name: Install dependencies 107 | run: mix deps.get 108 | 109 | - name: Create PLT 110 | run: mix dialyzer --plt 111 | 112 | - name: Run Dialyzer 113 | run: mix dialyzer --format short 114 | 115 | test: 116 | name: Tests 117 | runs-on: ${{ matrix.os }} 118 | timeout-minutes: 15 119 | needs: format_and_lint 120 | strategy: 121 | fail-fast: false 122 | matrix: 123 | os: [ubuntu-latest, macos-latest, windows-2022] 124 | elixir: ["1.16", "1.17", "1.18", "1.19"] 125 | otp: ["26", "27", "28"] 126 | rust: ["stable"] 127 | exclude: 128 | - otp: "28" 129 | elixir: "1.18" 130 | - otp: "28" 131 | elixir: "1.17" 132 | - otp: "28" 133 | elixir: "1.16" 134 | - otp: "27" 135 | elixir: "1.16" 136 | steps: 137 | - name: Checkout code 138 | uses: actions/checkout@v6 139 | 140 | - name: Set up Elixir 141 | id: beam 142 | uses: erlef/setup-beam@v1 143 | with: 144 | elixir-version: ${{ matrix.elixir }} 145 | otp-version: ${{ matrix.otp }} 146 | 147 | - name: Set up Rust toolchain 148 | uses: dtolnay/rust-toolchain@master 149 | with: 150 | toolchain: ${{ matrix.rust }} 151 | components: rustfmt, clippy 152 | 153 | - name: Restore Elixir dependencies cache 154 | uses: actions/cache@v4 155 | with: 156 | path: | 157 | deps 158 | _build 159 | priv/native 160 | key: v0-elixir-test-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix-${{ hashFiles('**/mix.lock') }} 161 | restore-keys: | 162 | v0-elixir-test-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}-elixir-${{ steps.beam.outputs.elixir-version }}-mix- 163 | 164 | - name: Restore Rust (Cargo) cache 165 | uses: Swatinem/rust-cache@v2 166 | with: 167 | workspaces: native/xqlitenif 168 | prefix-key: "v0-rust-test-${{ runner.os }}-otp-${{ steps.beam.outputs.otp-version }}" 169 | 170 | - name: Install Elixir dependencies 171 | run: mix deps.get 172 | 173 | - name: Compile project (includes NIF) 174 | run: mix compile 175 | 176 | - name: Run tests 177 | run: mix test.seq 178 | -------------------------------------------------------------------------------- /test/nif/pragma_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.PragmaTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | 7 | # --- Shared test code (generated via `for` loop) --- 8 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 9 | describe "using #{prefix}" do 10 | @describetag type_tag 11 | 12 | setup context do 13 | {mod, fun, args} = find_opener_mfa!(context) 14 | assert {:ok, conn} = apply(mod, fun, args) 15 | on_exit(fn -> NIF.close(conn) end) 16 | {:ok, conn: conn} 17 | end 18 | 19 | # --- Shared test cases applicable to all DB types follow --- 20 | 21 | # --- get_pragma/2 Tests --- 22 | test "get_pragma/2 reads default values", %{conn: conn} do 23 | assert {:ok, 0} = NIF.get_pragma(conn, "user_version") 24 | assert {:ok, limit} = NIF.get_pragma(conn, "journal_size_limit") 25 | assert is_integer(limit) 26 | assert {:ok, mode} = NIF.get_pragma(conn, "journal_mode") 27 | assert mode in ["persist", "wal", "truncate", "memory", "delete", "off"] 28 | end 29 | 30 | test "get_pragma/2 returns :no_value for non-value pragmas", %{conn: conn} do 31 | assert :ok = NIF.execute_batch(conn, "PRAGMA optimize;") 32 | assert {:ok, :no_value} = NIF.get_pragma(conn, "optimize") 33 | end 34 | 35 | test "get_pragma/2 returns :no_value for invalid pragma name", %{conn: conn} do 36 | assert {:ok, :no_value} = NIF.get_pragma(conn, "invalid_pragma_name") 37 | end 38 | 39 | # --- set_pragma/3 Tests --- 40 | test "set_pragma/3 sets and get_pragma/2 reads integer value", %{conn: conn} do 41 | assert :ok = NIF.set_pragma(conn, "cache_size", 5000) 42 | assert {:ok, 5000} = NIF.get_pragma(conn, "cache_size") 43 | end 44 | 45 | test "set_pragma/3 sets and get_pragma/2 reads boolean ON/true", %{conn: conn} do 46 | assert :ok = NIF.set_pragma(conn, "foreign_keys", :on) 47 | assert {:ok, 1} = NIF.get_pragma(conn, "foreign_keys") 48 | assert :ok = NIF.set_pragma(conn, "foreign_keys", true) 49 | assert {:ok, 1} = NIF.get_pragma(conn, "foreign_keys") 50 | end 51 | 52 | test "set_pragma/3 sets and get_pragma/2 reads boolean OFF/false", %{conn: conn} do 53 | assert :ok = NIF.set_pragma(conn, "foreign_keys", :on) 54 | assert {:ok, 1} = NIF.get_pragma(conn, "foreign_keys") 55 | assert :ok = NIF.set_pragma(conn, "foreign_keys", :off) 56 | assert {:ok, 0} = NIF.get_pragma(conn, "foreign_keys") 57 | assert :ok = NIF.set_pragma(conn, "foreign_keys", :on) 58 | assert {:ok, 1} = NIF.get_pragma(conn, "foreign_keys") 59 | assert :ok = NIF.set_pragma(conn, "foreign_keys", false) 60 | assert {:ok, 0} = NIF.get_pragma(conn, "foreign_keys") 61 | end 62 | 63 | # NOTE: Test for journal_mode moved outside the loop as behavior differs 64 | 65 | test "set_pragma/3 succeeds silently for invalid pragma name", %{conn: conn} do 66 | assert :ok = NIF.set_pragma(conn, "invalid_pragma", 123) 67 | assert {:ok, :no_value} = NIF.get_pragma(conn, "invalid_pragma") 68 | end 69 | 70 | test "set_pragma/3 succeeds silently for invalid value", %{conn: conn} do 71 | # First, explicitly set the pragma to a known state (OFF/0) 72 | assert :ok = NIF.set_pragma(conn, "foreign_keys", :off) 73 | assert {:ok, 0} = NIF.get_pragma(conn, "foreign_keys") 74 | 75 | # Now, attempt to set an invalid value. This should be a no-op. 76 | assert :ok = NIF.set_pragma(conn, "foreign_keys", "invalid_string") 77 | 78 | # Verify the value has not changed from our known state. 79 | assert {:ok, 0} = NIF.get_pragma(conn, "foreign_keys") 80 | end 81 | 82 | test "set_pragma/3 returns error for unsupported Elixir type", %{conn: conn} do 83 | assert {:error, {:unsupported_data_type, :map}} = 84 | NIF.set_pragma(conn, "cache_size", %{}) 85 | end 86 | end 87 | 88 | # end describe "using #{prefix}" 89 | end 90 | 91 | # end `for` loop 92 | 93 | # --- DB type-specific tests (outside the `for` loop) --- 94 | 95 | describe "using Private In-memory DB (Specific PRAGMA tests)" do 96 | # Tag specific block 97 | @tag :memory_private 98 | setup do 99 | assert {:ok, conn} = NIF.open_in_memory() 100 | on_exit(fn -> NIF.close(conn) end) 101 | {:ok, conn: conn} 102 | end 103 | 104 | test "set_pragma/3 ignores journal_mode WAL/DELETE for :memory:", %{conn: conn} do 105 | assert {:ok, "memory"} = NIF.get_pragma(conn, "journal_mode") 106 | # Attempt to set WAL succeeds, but readback shows it remains memory 107 | assert :ok = NIF.set_pragma(conn, "journal_mode", :wal) 108 | assert {:ok, "memory"} = NIF.get_pragma(conn, "journal_mode") 109 | # Attempt to set DELETE succeeds, but readback shows it remains memory 110 | assert :ok = NIF.set_pragma(conn, "journal_mode", "DELETE") 111 | assert {:ok, "memory"} = NIF.get_pragma(conn, "journal_mode") 112 | end 113 | end 114 | 115 | describe "using Temporary Disk DB (Specific PRAGMA tests)" do 116 | # Tag specific block 117 | @tag :file_temp 118 | setup do 119 | assert {:ok, conn} = NIF.open_temporary() 120 | on_exit(fn -> NIF.close(conn) end) 121 | {:ok, conn: conn} 122 | end 123 | 124 | test "set_pragma/3 allows setting journal_mode for temp file", %{conn: conn} do 125 | # Default might be DELETE or OFF for temp file 126 | assert {:ok, initial_mode} = NIF.get_pragma(conn, "journal_mode") 127 | assert initial_mode in ["delete", "off", "memory"] 128 | 129 | # Attempt to set WAL 130 | assert :ok = NIF.set_pragma(conn, "journal_mode", :wal) 131 | # Check actual resulting mode - might be WAL or a fallback like DELETE 132 | assert {:ok, mode_after_wal} = NIF.get_pragma(conn, "journal_mode") 133 | # Assert it's one of the expected outcomes (WAL ideally, DELETE is common fallback) 134 | assert mode_after_wal in ["wal", "delete"] 135 | 136 | # Set DELETE explicitly 137 | assert :ok = NIF.set_pragma(conn, "journal_mode", "DELETE") 138 | # Expect DELETE should always work for a file DB 139 | assert {:ok, "delete"} = NIF.get_pragma(conn, "journal_mode") 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /test/nif/read_only_db_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.ReadOnlyDbTest do 2 | # Read-only tests involve file setup once 3 | use ExUnit.Case, async: false 4 | 5 | alias XqliteNIF, as: NIF 6 | 7 | @db_file_prefix "read_only_test_" 8 | @test_table_name "ro_test_table" 9 | @create_table_sql "CREATE TABLE #{@test_table_name} (id INTEGER PRIMARY KEY, data TEXT);" 10 | @insert_sql "INSERT INTO #{@test_table_name} (id, data) VALUES (1, 'sample data');" 11 | 12 | defp create_temp_db_file() do 13 | temp_db_path = 14 | Path.join( 15 | System.tmp_dir!(), 16 | @db_file_prefix <> Integer.to_string(:erlang.unique_integer([:positive])) <> ".db" 17 | ) 18 | 19 | # Clean up if exists 20 | File.rm(temp_db_path) 21 | {:ok, conn_rw} = NIF.open(temp_db_path) 22 | assert {:ok, 0} = NIF.execute(conn_rw, @create_table_sql, []) 23 | assert {:ok, 1} = NIF.execute(conn_rw, @insert_sql, []) 24 | assert :ok = NIF.close(conn_rw) 25 | temp_db_path 26 | end 27 | 28 | setup_all do 29 | db_path = create_temp_db_file() 30 | read_only_uri = "file:#{db_path}?mode=ro" 31 | {:ok, ro_conn} = NIF.open(read_only_uri) 32 | 33 | on_exit(fn -> 34 | NIF.close(ro_conn) 35 | File.rm(db_path) 36 | end) 37 | 38 | {:ok, conn: ro_conn, db_path: db_path} 39 | end 40 | 41 | # --- Read Operations on Read-Only DB --- 42 | 43 | test "query/3 (SELECT) succeeds on a read-only database", %{conn: ro_conn} do 44 | # Added column names for completeness 45 | assert {:ok, %{columns: ["id", "data"], rows: [[1, "sample data"]], num_rows: 1}} = 46 | NIF.query(ro_conn, "SELECT id, data FROM #{@test_table_name} WHERE id = 1;", []) 47 | end 48 | 49 | test "get_pragma/2 succeeds for read-only pragmas", %{conn: ro_conn} do 50 | # More specific assertion 51 | assert {:ok, "UTF-8"} = NIF.get_pragma(ro_conn, "encoding") 52 | end 53 | 54 | test "schema introspection NIFs succeed on a read-only database", %{conn: ro_conn} do 55 | # Check main db file path in schema_databases 56 | assert {:ok, [db_info | _]} = NIF.schema_databases(ro_conn) 57 | assert db_info.name == "main" 58 | # Path will be absolute 59 | assert String.ends_with?(db_info.file, ".db") 60 | 61 | # More robustly find the specific table 62 | assert {:ok, [object_info | _]} = 63 | NIF.schema_list_objects(ro_conn, "main") 64 | |> then(fn {:ok, objects} -> 65 | filtered = Enum.filter(objects, &(&1.name == @test_table_name)) 66 | {:ok, filtered} 67 | end) 68 | 69 | assert object_info.name == @test_table_name and object_info.object_type == :table 70 | 71 | assert {:ok, columns} = NIF.schema_columns(ro_conn, @test_table_name) 72 | # id, data 73 | assert Enum.count(columns) == 2 74 | end 75 | 76 | # --- Write Operations on Read-Only DB (Expect :read_only_database error) --- 77 | 78 | @tag :expect_read_only_error 79 | test "execute/3 (INSERT) fails with :read_only_database", %{conn: ro_conn} do 80 | sql = "INSERT INTO #{@test_table_name} (id, data) VALUES (2, 'new data');" 81 | assert {:error, {:read_only_database, _msg}} = NIF.execute(ro_conn, sql, []) 82 | end 83 | 84 | @tag :expect_read_only_error 85 | test "execute/3 (UPDATE) fails with :read_only_database", %{conn: ro_conn} do 86 | sql = "UPDATE #{@test_table_name} SET data = 'updated' WHERE id = 1;" 87 | assert {:error, {:read_only_database, _msg}} = NIF.execute(ro_conn, sql, []) 88 | end 89 | 90 | @tag :expect_read_only_error 91 | test "execute/3 (DELETE) fails with :read_only_database", %{conn: ro_conn} do 92 | sql = "DELETE FROM #{@test_table_name} WHERE id = 1;" 93 | assert {:error, {:read_only_database, _msg}} = NIF.execute(ro_conn, sql, []) 94 | end 95 | 96 | @tag :expect_read_only_error 97 | test "execute/3 (CREATE TABLE) fails with :read_only_database", %{conn: ro_conn} do 98 | sql = "CREATE TABLE new_ro_table (id INTEGER);" 99 | assert {:error, {:read_only_database, _msg}} = NIF.execute(ro_conn, sql, []) 100 | end 101 | 102 | @tag :expect_read_only_error 103 | test "execute_batch/2 with write statements fails with :read_only_database", %{conn: ro_conn} do 104 | sql_batch = "INSERT INTO #{@test_table_name} (id, data) VALUES (3, 'batch data');" 105 | assert {:error, {:read_only_database, _msg}} = NIF.execute_batch(ro_conn, sql_batch) 106 | end 107 | 108 | # Removed set_pragma test for user_version as it's not a reliable RO error trigger here 109 | 110 | @tag :expect_read_only_error 111 | test "begin/1 followed by write attempt fails with :read_only_database", %{conn: ro_conn} do 112 | # BEGIN DEFERRED might succeed as it does nothing until the first write. 113 | # The crucial part is that the write operation itself fails. 114 | case NIF.begin(ro_conn) do 115 | :ok -> 116 | write_attempt_result = 117 | NIF.execute( 118 | ro_conn, 119 | "UPDATE #{@test_table_name} SET data = 'ro_tx_update' WHERE id=1;", 120 | [] 121 | ) 122 | 123 | assert {:error, {:read_only_database, _msg}} = write_attempt_result 124 | # Clean up the transaction state 125 | assert :ok = NIF.rollback(ro_conn) 126 | 127 | # Some SQLite versions/configurations might make BEGIN itself fail on a mode=ro DB 128 | # if it tries to acquire even a read lock that implies eventual write capability. 129 | {:error, {:read_only_database, _msg}} -> 130 | # This is also an acceptable outcome for BEGIN on a strictly read-only DB. 131 | :ok 132 | 133 | other_error -> 134 | # If begin succeeded, we must roll back if an assertion below fails. 135 | # However, if begin itself returned an unexpected error, flunk directly. 136 | # Attempt rollback just in case 137 | NIF.rollback(ro_conn) 138 | flunk("begin/1 returned unexpected result on read-only DB: #{inspect(other_error)}") 139 | end 140 | end 141 | 142 | @tag :read_only_commit_behavior 143 | test "commit/1 on a read-only database after begin (with no writes) succeeds as no-op", %{ 144 | conn: ro_conn 145 | } do 146 | # Deferred transaction starts 147 | assert :ok = NIF.begin(ro_conn) 148 | 149 | # On a read-only DB with mode=ro, a COMMIT with no preceding write operations 150 | # is a no-op and should succeed. 151 | # Expect success for vacuous commit 152 | assert :ok = NIF.commit(ro_conn) 153 | 154 | # Verify connection is no longer in a transaction (is_autocommit would be true) 155 | # We can test this by trying to start another transaction. If it succeeds, 156 | # the previous one was properly closed. 157 | assert :ok = NIF.begin(ro_conn) 158 | # Clean up the new transaction 159 | assert :ok = NIF.rollback(ro_conn) 160 | end 161 | end 162 | -------------------------------------------------------------------------------- /test/nif/error_input_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.ErrorInputTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | 7 | @simple_table "CREATE TABLE error_input_test (id INTEGER PRIMARY KEY, data TEXT);" 8 | 9 | # --- Shared test code (generated via `for` loop) --- 10 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 11 | describe "using #{prefix}" do 12 | @describetag type_tag 13 | 14 | setup context do 15 | {mod, fun, args} = find_opener_mfa!(context) 16 | assert {:ok, conn} = apply(mod, fun, args) 17 | # Setup a minimal table for tests that need a valid target 18 | assert {:ok, 0} = NIF.execute(conn, @simple_table, []) 19 | on_exit(fn -> NIF.close(conn) end) 20 | {:ok, conn: conn} 21 | end 22 | 23 | # --- Input Validation Error Tests --- 24 | 25 | test "execute/3 returns :expected_list when params is not a list", %{conn: conn} do 26 | sql = "INSERT INTO error_input_test (id) VALUES (?1);" 27 | invalid_params = :not_a_list 28 | assert {:error, {:expected_list, _}} = NIF.execute(conn, sql, invalid_params) 29 | end 30 | 31 | test "query/3 returns :expected_list when params is not a list", %{conn: conn} do 32 | sql = "SELECT * FROM error_input_test WHERE id = ?1;" 33 | invalid_params = :not_a_list 34 | assert {:error, {:expected_list, _}} = NIF.query(conn, sql, invalid_params) 35 | end 36 | 37 | test "query/3 returns :expected_keyword_list when keyword list expected but invalid list provided", 38 | %{conn: conn} do 39 | # This test assumes named params detection requires a non-empty list 40 | # starting with a valid tuple format. Providing a list not matching 41 | # keyword format should ideally trigger this, but might trigger 42 | # :invalid_parameter_name if the first element isn't a tuple. 43 | # Let's test passing a list of atoms. 44 | sql = "SELECT * FROM error_input_test WHERE id = :id;" 45 | invalid_keyword_list = [:not_a_keyword_list] 46 | # The specific error might depend on rusqlite's internal parsing order. 47 | # It might raise invalid_parameter_name or expected_keyword_tuple/list. 48 | # Based on implementation, ExpectedKeywordList seems less likely here than 49 | # ExpectedKeywordTuple or InvalidParameterName if it attempts binding. 50 | # Let's assert for the most likely based on needing {atom, term} tuples. 51 | assert {:error, :unsupported_atom} = 52 | NIF.query(conn, sql, invalid_keyword_list) 53 | end 54 | 55 | test "query/3 returns :expected_keyword_tuple when keyword list has invalid element", %{ 56 | conn: conn 57 | } do 58 | sql = "SELECT * FROM error_input_test WHERE id = :id;" 59 | # List starts like a keyword list but contains an invalid element 60 | invalid_element_list = [{:valid, 1}, :not_a_tuple] 61 | 62 | assert {:error, {:expected_keyword_tuple, _}} = 63 | NIF.query(conn, sql, invalid_element_list) 64 | end 65 | 66 | test "execute/3 returns :unsupported_atom when parameter atom is invalid", %{conn: conn} do 67 | sql = "INSERT INTO error_input_test (data) VALUES (?1);" 68 | params = [:unsupported_atom_value] 69 | assert {:error, :unsupported_atom} = NIF.execute(conn, sql, params) 70 | end 71 | 72 | test "query/3 returns :unsupported_atom when parameter atom is invalid", %{conn: conn} do 73 | sql = "SELECT * FROM error_input_test WHERE data = ?1;" 74 | params = [:unsupported_atom_value] 75 | assert {:error, :unsupported_atom} = NIF.query(conn, sql, params) 76 | end 77 | 78 | test "execute/3 returns :multiple_statements for multi-statement SQL", %{conn: conn} do 79 | sql = "UPDATE error_input_test SET data = 'a'; SELECT * FROM error_input_test;" 80 | assert {:error, :multiple_statements} = NIF.execute(conn, sql, []) 81 | end 82 | 83 | test "query/3 returns :multiple_statements for multi-statement SQL", %{conn: conn} do 84 | sql = "SELECT 1; SELECT 2;" 85 | assert {:error, {:cannot_prepare_statement, _sql, _reason}} = NIF.query(conn, sql, []) 86 | end 87 | 88 | # --- DB State / Execution Error Tests --- 89 | 90 | test "execute/3 returns :no_such_index when dropping non-existent index", %{conn: conn} do 91 | sql = "DROP INDEX non_existent_index;" 92 | # Note: SQLite error messages sometimes include the type, e.g., "index" 93 | assert {:error, {:no_such_index, msg}} = NIF.execute(conn, sql, []) 94 | assert String.contains?(msg || "", "non_existent_index") 95 | end 96 | 97 | # --- Foreign Key Constraint Violation Tests --- 98 | # DDL is now included within each test that needs it. 99 | 100 | test "execute/3 returns :constraint_foreign_key on invalid INSERT", %{conn: conn} do 101 | # Setup FK tables for this specific test 102 | fk_ddl = """ 103 | PRAGMA foreign_keys = ON; 104 | CREATE TABLE fk_parent_insert (id INTEGER PRIMARY KEY); 105 | CREATE TABLE fk_child_insert ( 106 | id INTEGER PRIMARY KEY, 107 | parent_id INTEGER NOT NULL REFERENCES fk_parent_insert(id) 108 | ); 109 | INSERT INTO fk_parent_insert (id) VALUES (1); 110 | """ 111 | 112 | assert :ok = NIF.execute_batch(conn, fk_ddl) 113 | 114 | # Test the violation 115 | # parent_id 99 doesn't exist 116 | sql = "INSERT INTO fk_child_insert (id, parent_id) VALUES (10, 99);" 117 | 118 | assert {:error, {:constraint_violation, :constraint_foreign_key, _msg}} = 119 | NIF.execute(conn, sql, []) 120 | end 121 | 122 | test "execute/3 returns :constraint_foreign_key on invalid DELETE", %{conn: conn} do 123 | # Setup FK tables for this specific test (using different names to avoid conflict) 124 | fk_ddl = """ 125 | PRAGMA foreign_keys = ON; 126 | CREATE TABLE fk_parent_delete (id INTEGER PRIMARY KEY); 127 | CREATE TABLE fk_child_delete ( 128 | id INTEGER PRIMARY KEY, 129 | parent_id INTEGER NOT NULL REFERENCES fk_parent_delete(id) 130 | ); 131 | INSERT INTO fk_parent_delete (id) VALUES (1); 132 | INSERT INTO fk_child_delete (id, parent_id) VALUES (10, 1); 133 | """ 134 | 135 | assert :ok = NIF.execute_batch(conn, fk_ddl) 136 | 137 | # Test the violation: Try deleting the parent row referenced by the child 138 | sql = "DELETE FROM fk_parent_delete WHERE id = 1;" 139 | 140 | assert {:error, {:constraint_violation, :constraint_foreign_key, _msg}} = 141 | NIF.execute(conn, sql, []) 142 | end 143 | end 144 | end 145 | end 146 | -------------------------------------------------------------------------------- /test/nif/transaction_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.TransactionTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | 7 | @simple_tx_table "CREATE TABLE tx_test (id INTEGER PRIMARY KEY, name TEXT);" 8 | 9 | @savepoint_table_setup ~S""" 10 | CREATE TABLE savepoint_test ( 11 | id INTEGER PRIMARY KEY, 12 | val TEXT NOT NULL 13 | ); 14 | INSERT INTO savepoint_test (id, val) VALUES (1, 'one'); 15 | """ 16 | 17 | # --- Helper functions specific to savepoint tests --- 18 | # Defined at module level as they are used across iterations of the loop 19 | defp query_savepoint_test_row(conn, id) do 20 | sql = "SELECT id, val FROM savepoint_test WHERE id = ?1;" 21 | NIF.query(conn, sql, [id]) 22 | end 23 | 24 | defp assert_savepoint_record_present(conn, id, expected_val) do 25 | expected_result = {:ok, %{columns: ["id", "val"], rows: [[id, expected_val]], num_rows: 1}} 26 | assert expected_result == query_savepoint_test_row(conn, id) 27 | end 28 | 29 | defp assert_savepoint_record_missing(conn, id) do 30 | expected_result = {:ok, %{columns: ["id", "val"], rows: [], num_rows: 0}} 31 | assert expected_result == query_savepoint_test_row(conn, id) 32 | end 33 | 34 | # --- Shared test code (generated via `for` loop) --- 35 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 36 | describe "using #{prefix}" do 37 | @describetag type_tag 38 | 39 | # Top-level setup for the describe block (opens connection) 40 | setup context do 41 | {mod, fun, args} = find_opener_mfa!(context) 42 | assert {:ok, conn} = apply(mod, fun, args) 43 | on_exit(fn -> NIF.close(conn) end) 44 | {:ok, conn: conn} 45 | end 46 | 47 | # --- Basic Commit/Rollback Tests --- 48 | # Setup specific table needed for these tests 49 | setup %{conn: conn} do 50 | assert {:ok, 0} = NIF.execute(conn, @simple_tx_table, []) 51 | :ok 52 | end 53 | 54 | test "insert a record and commit transaction", %{conn: conn} do 55 | assert :ok = NIF.begin(conn) 56 | 57 | assert {:ok, 1} = 58 | NIF.execute( 59 | conn, 60 | "INSERT INTO tx_test (id, name) VALUES (100, 'Committed');", 61 | [] 62 | ) 63 | 64 | assert :ok = NIF.commit(conn) 65 | 66 | assert {:ok, %{rows: [[100, "Committed"]], num_rows: 1}} = 67 | NIF.query(conn, "SELECT * FROM tx_test where id = 100;", []) 68 | end 69 | 70 | test "insert a record and rollback transaction", %{conn: conn} do 71 | assert :ok = NIF.begin(conn) 72 | 73 | assert {:ok, 1} = 74 | NIF.execute( 75 | conn, 76 | "INSERT INTO tx_test (id, name) VALUES (101, 'Rolled Back');", 77 | [] 78 | ) 79 | 80 | assert :ok = NIF.rollback(conn) 81 | 82 | assert {:ok, %{rows: [], num_rows: 0}} = 83 | NIF.query(conn, "SELECT * FROM tx_test where id = 101;", []) 84 | end 85 | 86 | test "commit without begin fails", %{conn: conn} do 87 | assert {:error, {:sqlite_failure, code, _, msg}} = NIF.commit(conn) 88 | assert code == 21 or String.contains?(msg || "", "no transaction is active") 89 | end 90 | 91 | test "rollback without begin fails", %{conn: conn} do 92 | assert {:error, {:sqlite_failure, code, _, msg}} = NIF.rollback(conn) 93 | assert code == 21 or String.contains?(msg || "", "no transaction is active") 94 | end 95 | 96 | test "begin within begin fails", %{conn: conn} do 97 | assert :ok = NIF.begin(conn) 98 | assert {:error, {:sqlite_failure, code, _, msg}} = NIF.begin(conn) 99 | assert code == 21 or String.contains?(msg || "", "within a transaction") 100 | # Clean up outer transaction 101 | assert :ok = NIF.rollback(conn) 102 | end 103 | 104 | # --- Savepoint Tests --- 105 | # Setup specific table needed for these tests 106 | setup %{conn: conn} do 107 | assert :ok = NIF.execute_batch(conn, @savepoint_table_setup) 108 | :ok 109 | end 110 | 111 | test "rollback_to_savepoint reverts changes", %{conn: conn} do 112 | assert_savepoint_record_present(conn, 1, "one") 113 | assert :ok = NIF.begin(conn) 114 | assert {:ok, 1} = NIF.execute(conn, "INSERT INTO savepoint_test VALUES (2, 'two')", []) 115 | assert_savepoint_record_present(conn, 2, "two") 116 | assert :ok = NIF.savepoint(conn, "sp1") 117 | 118 | assert {:ok, 1} = 119 | NIF.execute(conn, "INSERT INTO savepoint_test VALUES (3, 'three')", []) 120 | 121 | assert_savepoint_record_present(conn, 3, "three") 122 | assert :ok = NIF.rollback_to_savepoint(conn, "sp1") 123 | assert_savepoint_record_missing(conn, 3) 124 | assert_savepoint_record_present(conn, 2, "two") 125 | assert :ok = NIF.commit(conn) 126 | assert_savepoint_record_present(conn, 1, "one") 127 | assert_savepoint_record_present(conn, 2, "two") 128 | assert_savepoint_record_missing(conn, 3) 129 | end 130 | 131 | test "release_savepoint incorporates changes", %{conn: conn} do 132 | assert_savepoint_record_present(conn, 1, "one") 133 | assert :ok = NIF.begin(conn) 134 | assert {:ok, 1} = NIF.execute(conn, "INSERT INTO savepoint_test VALUES (2, 'two')", []) 135 | assert_savepoint_record_present(conn, 2, "two") 136 | assert :ok = NIF.savepoint(conn, "sp1") 137 | 138 | assert {:ok, 1} = 139 | NIF.execute(conn, "INSERT INTO savepoint_test VALUES (3, 'three')", []) 140 | 141 | assert_savepoint_record_present(conn, 3, "three") 142 | assert :ok = NIF.release_savepoint(conn, "sp1") 143 | assert_savepoint_record_present(conn, 3, "three") 144 | assert_savepoint_record_present(conn, 2, "two") 145 | assert :ok = NIF.commit(conn) 146 | assert_savepoint_record_present(conn, 1, "one") 147 | assert_savepoint_record_present(conn, 2, "two") 148 | assert_savepoint_record_present(conn, 3, "three") 149 | end 150 | 151 | test "rollback_to_savepoint after release fails", %{conn: conn} do 152 | assert :ok = NIF.begin(conn) 153 | assert :ok = NIF.savepoint(conn, "sp1") 154 | assert :ok = NIF.release_savepoint(conn, "sp1") 155 | 156 | assert {:error, {:sqlite_failure, code, _, msg}} = 157 | NIF.rollback_to_savepoint(conn, "sp1") 158 | 159 | assert code == 21 or String.contains?(msg || "", "no such savepoint") 160 | # Clean up main transaction 161 | assert :ok = NIF.rollback(conn) 162 | end 163 | end 164 | 165 | # end describe "using #{prefix}" 166 | end 167 | 168 | # end `for` loop 169 | end 170 | -------------------------------------------------------------------------------- /native/xqlitenif/src/schema.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | asc, binary, cascade, create_index, desc, float, full, hidden_alias, integer, no_action, 3 | none, normal, numeric, partial, primary_key_constraint, r#virtual, restrict, sequence, 4 | set_default, set_null, shadow, simple, stored_generated, table, text, unique_constraint, 5 | view, virtual_generated, 6 | }; 7 | use rustler::{Atom, NifStruct}; 8 | use std::convert::TryFrom; 9 | 10 | #[derive(Debug, Clone, NifStruct)] 11 | #[module = "Xqlite.Schema.DatabaseInfo"] 12 | pub(crate) struct DatabaseInfo { 13 | pub name: String, 14 | pub file: Option, 15 | } 16 | 17 | #[derive(Debug, Clone, NifStruct)] 18 | #[module = "Xqlite.Schema.SchemaObjectInfo"] 19 | pub(crate) struct SchemaObjectInfo { 20 | pub schema: String, 21 | pub name: String, 22 | pub object_type: Atom, 23 | pub column_count: i64, 24 | pub is_without_rowid: bool, 25 | pub strict: bool, 26 | } 27 | 28 | #[derive(Debug, Clone, NifStruct)] 29 | #[module = "Xqlite.Schema.ColumnInfo"] 30 | pub(crate) struct ColumnInfo { 31 | pub column_id: i64, 32 | pub name: String, 33 | pub type_affinity: Atom, 34 | pub declared_type: String, 35 | pub nullable: bool, 36 | pub default_value: Option, 37 | pub primary_key_index: u8, 38 | pub hidden_kind: Atom, 39 | } 40 | 41 | #[derive(Debug, Clone, NifStruct)] 42 | #[module = "Xqlite.Schema.ForeignKeyInfo"] 43 | pub(crate) struct ForeignKeyInfo { 44 | pub id: i64, 45 | pub column_sequence: i64, 46 | pub target_table: String, 47 | pub from_column: String, 48 | pub to_column: String, 49 | pub on_update: Atom, 50 | pub on_delete: Atom, 51 | pub match_clause: Atom, 52 | } 53 | 54 | #[derive(Debug, Clone, NifStruct)] 55 | #[module = "Xqlite.Schema.IndexInfo"] 56 | pub(crate) struct IndexInfo { 57 | pub name: String, 58 | pub unique: bool, 59 | pub origin: Atom, 60 | pub partial: bool, 61 | } 62 | 63 | #[derive(Debug, Clone, NifStruct)] 64 | #[module = "Xqlite.Schema.IndexColumnInfo"] 65 | pub(crate) struct IndexColumnInfo { 66 | pub index_column_sequence: i64, 67 | pub table_column_id: i64, 68 | pub name: Option, 69 | pub sort_order: Atom, 70 | pub collation: String, 71 | pub is_key_column: bool, 72 | } 73 | 74 | /// Maps PRAGMA table_list type string to an atom. 75 | #[inline] 76 | pub(crate) fn object_type_to_atom(s: &str) -> Result { 77 | match s { 78 | "table" => Ok(table()), 79 | "view" => Ok(view()), 80 | "shadow" => Ok(shadow()), 81 | "virtual" => Ok(r#virtual()), 82 | "sequence" => Ok(sequence()), 83 | _ => Err(s), 84 | } 85 | } 86 | 87 | /// Maps PRAGMA table_info type affinity string to an atom. 88 | pub(crate) fn type_affinity_to_atom(declared_type_str: &str) -> Result { 89 | // Convert to uppercase for case-insensitive matching of common patterns 90 | let upper_declared_type = declared_type_str.to_uppercase(); 91 | 92 | if upper_declared_type.contains("INT") { 93 | // Catches INT, INTEGER, BIGINT etc. 94 | Ok(integer()) 95 | } else if upper_declared_type.contains("CHAR") || // VARCHAR, CHARACTER 96 | upper_declared_type.contains("CLOB") || // CLOB 97 | upper_declared_type.contains("TEXT") 98 | // TEXT 99 | { 100 | Ok(text()) 101 | } else if upper_declared_type.contains("BLOB") || 102 | upper_declared_type.is_empty() || // No type specified means BLOB affinity 103 | upper_declared_type == "ANY" 104 | // ANY type columns also get BLOB affinity if no data type is forced by content 105 | { 106 | Ok(binary()) // For 'ANY' this is a simplification; typeof() would be more accurate for content. 107 | // But for PRAGMA table_info, this is a reasonable default mapping. 108 | } else if upper_declared_type.contains("REAL") || // REAL 109 | upper_declared_type.contains("FLOA") || // FLOAT 110 | upper_declared_type.contains("DOUB") 111 | // DOUBLE 112 | { 113 | Ok(float()) 114 | } else { 115 | // Default to NUMERIC affinity for anything else. 116 | // This covers BOOLEAN, DATE, DATETIME etc. which don't have their own affinity. 117 | Ok(numeric()) 118 | } 119 | } 120 | 121 | /// Maps the integer 'hidden' value from PRAGMA table_xinfo to an atom. 122 | #[inline] 123 | pub(crate) fn hidden_int_to_atom(hidden_val: i64) -> Result { 124 | match hidden_val { 125 | 0 => Ok(normal()), 126 | 1 => Ok(hidden_alias()), 127 | 2 => Ok(virtual_generated()), 128 | 3 => Ok(stored_generated()), 129 | _ => Err(hidden_val.to_string()), 130 | } 131 | } 132 | 133 | /// Maps PRAGMA foreign_key_list action string to an atom. 134 | #[inline] 135 | pub(crate) fn fk_action_to_atom(s: &str) -> Result { 136 | match s { 137 | "NO ACTION" => Ok(no_action()), 138 | "RESTRICT" => Ok(restrict()), 139 | "SET NULL" => Ok(set_null()), 140 | "SET DEFAULT" => Ok(set_default()), 141 | "CASCADE" => Ok(cascade()), 142 | _ => Err(s), 143 | } 144 | } 145 | 146 | /// Maps PRAGMA foreign_key_list match string to an atom. 147 | #[inline] 148 | pub(crate) fn fk_match_to_atom(s: &str) -> Result { 149 | match s { 150 | "NONE" => Ok(none()), 151 | "SIMPLE" => Ok(simple()), 152 | "PARTIAL" => Ok(partial()), 153 | "FULL" => Ok(full()), 154 | _ => Err(s), 155 | } 156 | } 157 | 158 | /// Maps PRAGMA index_list origin char to a descriptive atom. 159 | #[inline] 160 | pub(crate) fn index_origin_to_atom(s: &str) -> Result { 161 | match s { 162 | "c" => Ok(create_index()), 163 | "u" => Ok(unique_constraint()), 164 | "pk" => Ok(primary_key_constraint()), 165 | _ => Err(s), 166 | } 167 | } 168 | 169 | /// Maps PRAGMA index_xinfo sort order value (0/1) to an atom. 170 | /// Assumes the input 'val' is derived from an integer column. 171 | #[inline] 172 | pub(crate) fn sort_order_to_atom(val: i64) -> Result { 173 | match val { 174 | 0 => Ok(asc()), 175 | 1 => Ok(desc()), 176 | _ => Err(val.to_string()), 177 | } 178 | } 179 | 180 | /// Converts the 'notnull' integer flag from PRAGMA table_info to a boolean 'nullable'. 181 | /// Returns Err with the unexpected value as String if input is not 0 or 1. 182 | #[inline] 183 | pub(crate) fn notnull_to_nullable(notnull_flag: i64) -> Result { 184 | match notnull_flag { 185 | 0 => Ok(true), 186 | 1 => Ok(false), 187 | _ => Err(notnull_flag.to_string()), 188 | } 189 | } 190 | 191 | /// Converts the 'pk' integer flag from PRAGMA table_info to a u8 index. 192 | /// Returns Err with the unexpected value as String if input is negative or > 255. 193 | #[inline] 194 | pub(crate) fn pk_value_to_index(pk_flag: i64) -> Result { 195 | u8::try_from(pk_flag).map_err(|_| pk_flag.to_string()) 196 | } 197 | -------------------------------------------------------------------------------- /test/pragma_test.exs: -------------------------------------------------------------------------------- 1 | defmodule XqlitePragmaTest do 2 | use ExUnit.Case, async: true 3 | doctest Xqlite.Pragma 4 | 5 | alias XqliteNIF, as: NIF 6 | alias Xqlite.Pragma, as: P 7 | 8 | import Xqlite.TestUtil 9 | 10 | @write_test_cases [ 11 | # Simple set/get with representative values 12 | {:application_id, [0, 12345, 98765, -1000]}, 13 | {:analysis_limit, [0, 100, -1]}, 14 | {:user_version, [0, 5, 10, -100]}, 15 | # Can only be set on a fresh DB 16 | {:page_size, [2048, 4096, 8192]}, 17 | {:busy_timeout, [0, 1000, 5000]}, 18 | # -1 means no limit 19 | {:journal_size_limit, [0, -1, 102_400]}, 20 | {:max_page_count, [1, 1_000_000]}, 21 | 22 | # All boolean PRAGMAs 23 | {:automatic_index, [true, false]}, 24 | {:cell_size_check, [true, false]}, 25 | {:checkpoint_fullfsync, [true, false]}, 26 | {:defer_foreign_keys, [true, false]}, 27 | {:foreign_keys, [true, false]}, 28 | {:fullfsync, [true, false]}, 29 | {:ignore_check_constraints, [true, false]}, 30 | {:legacy_alter_table, [true, false]}, 31 | {:query_only, [true, false]}, 32 | {:read_uncommitted, [true, false]}, 33 | {:recursive_triggers, [true, false]}, 34 | {:reverse_unordered_selects, [true, false]}, 35 | {:trusted_schema, [true, false]}, 36 | 37 | # PRAGMAs with special value mappings (test all specified values) 38 | {:synchronous, 39 | [ 40 | {"NORMAL", :normal}, 41 | {1, :normal}, 42 | {"OFF", :off}, 43 | {0, :off}, 44 | {"FULL", :full}, 45 | {2, :full}, 46 | {"EXTRA", :extra}, 47 | {3, :extra} 48 | ]}, 49 | {:temp_store, 50 | [ 51 | {"DEFAULT", :default}, 52 | {0, :default}, 53 | {"FILE", :file}, 54 | {1, :file}, 55 | {"MEMORY", :memory}, 56 | {2, :memory} 57 | ]}, 58 | {:auto_vacuum, [{0, :none}, {1, :full}, {2, :incremental}], &verify_is_atom/4}, 59 | {:secure_delete, [{0, false}, {1, true}, {2, :fast}]}, 60 | 61 | # PRAGMAs with platform-dependent results 62 | {:journal_mode, 63 | [ 64 | # Most common default for file DBs 65 | {"DELETE", "delete"}, 66 | {"TRUNCATE", "truncate"}, 67 | {"PERSIST", "persist"}, 68 | {"MEMORY", "memory"}, 69 | # On in-memory, WAL falls back to memory 70 | {"WAL", ~w(wal memory)}, 71 | {"OFF", "off"} 72 | ]}, 73 | {:locking_mode, [{"NORMAL", "normal"}, {"EXCLUSIVE", "exclusive"}]}, 74 | {:encoding, 75 | [ 76 | {"UTF-8", "UTF-8"}, 77 | {"UTF-16le", "UTF-16le"}, 78 | {"UTF-16be", "UTF-16be"}, 79 | # Setting UTF-16 may result in le or be 80 | {"UTF-16", ~w(UTF-16le UTF-16be)} 81 | ]}, 82 | 83 | # Advisory values 84 | # Test with a positive, negative (if applicable), and zero value 85 | {:cache_size, [0, 8, -16], &verify_is_integer/4}, 86 | {:soft_heap_limit, [0, 1024 * 1024], &verify_is_integer/4}, 87 | {:hard_heap_limit, [0, 1024 * 1024], &verify_is_integer/4}, 88 | {:threads, [0, 1, 8], &verify_is_integer/4}, 89 | {:wal_autocheckpoint, [0, 1000], &verify_is_integer/4}, 90 | {:mmap_size, [0, 256 * 1024], &verify_mmap_size_value/4} 91 | ] 92 | 93 | for {type_tag, prefix, _opener_mfa} <- connection_openers() do 94 | describe "PRAGMA tests using #{prefix}" do 95 | @describetag type_tag 96 | 97 | setup context do 98 | {mod, fun, args} = find_opener_mfa!(context) 99 | assert {:ok, db} = apply(mod, fun, args) 100 | {:ok, db: db, test_context_tag: unquote(type_tag)} 101 | end 102 | 103 | # All readable PRAGMAs with zero arguments (they only fetch values and don't modify 104 | # any DB behaviour). 105 | 106 | for name <- P.readable_with_zero_args() do 107 | test "read pragma: #{name}", %{db: db} do 108 | assert valid_get_result(P.get(db, unquote(name))) 109 | end 110 | end 111 | 112 | # Test for a readable PRAGMA that takes one argument. 113 | test "read pragma: foreign_key_check with table name", %{db: db} do 114 | # Setup: Create tables but keep foreign keys OFF initially. 115 | assert :ok = P.put(db, :foreign_keys, false) 116 | 117 | assert :ok = 118 | NIF.execute_batch(db, """ 119 | CREATE TABLE parents(id INTEGER PRIMARY KEY); 120 | CREATE TABLE children(id INTEGER, parent_id INTEGER REFERENCES parents(id)); 121 | INSERT INTO parents (id) VALUES (1); 122 | INSERT INTO children (id, parent_id) VALUES (10, 1); 123 | """) 124 | 125 | # With FKs off, check should still pass as there are no violations yet. 126 | assert {:ok, []} = P.get(db, :foreign_key_check, "children") 127 | 128 | # Now, insert an invalid row. This will succeed because FKs are off. 129 | assert {:ok, 1} = 130 | NIF.execute(db, "INSERT INTO children (id, parent_id) VALUES (20, 99);", []) 131 | 132 | # Now, run the check. It should find the pre-existing violation. 133 | # The rowid of the new row is 2. 134 | assert {:ok, [["children", 2, "parents", 0]]} = 135 | P.get(db, :foreign_key_check, "children") 136 | end 137 | 138 | # All of the readable PRAGMAs with one arg are actually instructions that change the DB. 139 | # We are not going to test those. 140 | 141 | # All writable PRAGMAs with one arg. 142 | 143 | for {name, values_to_test, verify_fun} <- @write_test_cases, 144 | verify_fun = Macro.escape(verify_fun) do 145 | verify_fun = verify_fun || (&default_verify_values/4) 146 | 147 | # Generate a test for each value to be set for a given PRAGMA 148 | for {set_val, expected_val} <- normalize_test_values(values_to_test) do 149 | test_name_string = "write pragma: #{name} = #{inspect(set_val)}" 150 | 151 | test test_name_string, %{db: db, test_context_tag: test_context_tag} do 152 | # We have to do `unquote(name)` several times here because Elixir's 1.18 compiler 153 | # warns us that certain comparisons can never succeed. 154 | set_val = unquote(set_val) 155 | expected_val = unquote(expected_val) 156 | verify_fun = unquote(verify_fun) 157 | 158 | # We need a clean DB for some PRAGMAs like page_size 159 | db = if unquote(name) == :page_size, do: clean_db(), else: db 160 | 161 | # The core of the test: put, then get and verify 162 | assert :ok = P.put(db, unquote(name), set_val) 163 | 164 | case P.get(db, unquote(name)) do 165 | {:ok, fetched_val} -> 166 | assert verify_fun.(test_context_tag, set_val, fetched_val, expected_val), 167 | "Set `#{inspect(set_val)}`, but fetched `#{inspect(fetched_val)}`, expected one of `#{inspect(expected_val)}`" 168 | 169 | # For write-only PRAGMAs 170 | :ok -> 171 | assert verify_fun.(test_context_tag, set_val, :ok) 172 | 173 | error -> 174 | flunk( 175 | "P.get returned an unexpected error after a successful put: `#{inspect(error)}`" 176 | ) 177 | end 178 | end 179 | end 180 | end 181 | end 182 | end 183 | 184 | defp valid_get_result({:error, _, _}), do: false 185 | defp valid_get_result({:error, _}), do: false 186 | defp valid_get_result({:ok, _}), do: true 187 | defp valid_get_result(:ok), do: true 188 | 189 | defp valid_get_result(other) do 190 | IO.puts("pragma_get_result: unknown response: `#{inspect(other)}`") 191 | end 192 | 193 | defp clean_db() do 194 | {:ok, db} = NIF.open(":memory:") 195 | db 196 | end 197 | end 198 | -------------------------------------------------------------------------------- /native/xqlitenif/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 4 4 | 5 | [[package]] 6 | name = "bitflags" 7 | version = "2.6.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" 10 | 11 | [[package]] 12 | name = "cc" 13 | version = "1.2.38" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" 16 | dependencies = [ 17 | "find-msvc-tools", 18 | "shlex", 19 | ] 20 | 21 | [[package]] 22 | name = "cfg-if" 23 | version = "1.0.0" 24 | source = "registry+https://github.com/rust-lang/crates.io-index" 25 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 26 | 27 | [[package]] 28 | name = "fallible-iterator" 29 | version = "0.3.0" 30 | source = "registry+https://github.com/rust-lang/crates.io-index" 31 | checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" 32 | 33 | [[package]] 34 | name = "fallible-streaming-iterator" 35 | version = "0.1.9" 36 | source = "registry+https://github.com/rust-lang/crates.io-index" 37 | checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" 38 | 39 | [[package]] 40 | name = "find-msvc-tools" 41 | version = "0.1.2" 42 | source = "registry+https://github.com/rust-lang/crates.io-index" 43 | checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" 44 | 45 | [[package]] 46 | name = "foldhash" 47 | version = "0.1.5" 48 | source = "registry+https://github.com/rust-lang/crates.io-index" 49 | checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" 50 | 51 | [[package]] 52 | name = "hashbrown" 53 | version = "0.15.2" 54 | source = "registry+https://github.com/rust-lang/crates.io-index" 55 | checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" 56 | dependencies = [ 57 | "foldhash", 58 | ] 59 | 60 | [[package]] 61 | name = "hashlink" 62 | version = "0.10.0" 63 | source = "registry+https://github.com/rust-lang/crates.io-index" 64 | checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" 65 | dependencies = [ 66 | "hashbrown", 67 | ] 68 | 69 | [[package]] 70 | name = "heck" 71 | version = "0.5.0" 72 | source = "registry+https://github.com/rust-lang/crates.io-index" 73 | checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" 74 | 75 | [[package]] 76 | name = "inventory" 77 | version = "0.3.15" 78 | source = "registry+https://github.com/rust-lang/crates.io-index" 79 | checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" 80 | 81 | [[package]] 82 | name = "libloading" 83 | version = "0.8.6" 84 | source = "registry+https://github.com/rust-lang/crates.io-index" 85 | checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" 86 | dependencies = [ 87 | "cfg-if", 88 | "windows-targets", 89 | ] 90 | 91 | [[package]] 92 | name = "libsqlite3-sys" 93 | version = "0.35.0" 94 | source = "registry+https://github.com/rust-lang/crates.io-index" 95 | checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" 96 | dependencies = [ 97 | "cc", 98 | "pkg-config", 99 | "vcpkg", 100 | ] 101 | 102 | [[package]] 103 | name = "pkg-config" 104 | version = "0.3.19" 105 | source = "registry+https://github.com/rust-lang/crates.io-index" 106 | checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" 107 | 108 | [[package]] 109 | name = "proc-macro2" 110 | version = "1.0.94" 111 | source = "registry+https://github.com/rust-lang/crates.io-index" 112 | checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" 113 | dependencies = [ 114 | "unicode-ident", 115 | ] 116 | 117 | [[package]] 118 | name = "quote" 119 | version = "1.0.40" 120 | source = "registry+https://github.com/rust-lang/crates.io-index" 121 | checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" 122 | dependencies = [ 123 | "proc-macro2", 124 | ] 125 | 126 | [[package]] 127 | name = "regex-lite" 128 | version = "0.1.6" 129 | source = "registry+https://github.com/rust-lang/crates.io-index" 130 | checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" 131 | 132 | [[package]] 133 | name = "rusqlite" 134 | version = "0.37.0" 135 | source = "registry+https://github.com/rust-lang/crates.io-index" 136 | checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" 137 | dependencies = [ 138 | "bitflags", 139 | "fallible-iterator", 140 | "fallible-streaming-iterator", 141 | "hashlink", 142 | "libsqlite3-sys", 143 | "smallvec", 144 | ] 145 | 146 | [[package]] 147 | name = "rustler" 148 | version = "0.37.0" 149 | source = "registry+https://github.com/rust-lang/crates.io-index" 150 | checksum = "fb867bb35b291ef105abbe0a0d04bd4d7af372e023d08845698687bc254f222b" 151 | dependencies = [ 152 | "inventory", 153 | "libloading", 154 | "regex-lite", 155 | "rustler_codegen", 156 | ] 157 | 158 | [[package]] 159 | name = "rustler_codegen" 160 | version = "0.37.0" 161 | source = "registry+https://github.com/rust-lang/crates.io-index" 162 | checksum = "90993223c5ac0fb580ff966fb9477289c4e8a610a2f4639912a2639c5e7b5095" 163 | dependencies = [ 164 | "heck", 165 | "inventory", 166 | "proc-macro2", 167 | "quote", 168 | "syn", 169 | ] 170 | 171 | [[package]] 172 | name = "shlex" 173 | version = "1.3.0" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" 176 | 177 | [[package]] 178 | name = "smallvec" 179 | version = "1.6.1" 180 | source = "registry+https://github.com/rust-lang/crates.io-index" 181 | checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" 182 | 183 | [[package]] 184 | name = "syn" 185 | version = "2.0.104" 186 | source = "registry+https://github.com/rust-lang/crates.io-index" 187 | checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" 188 | dependencies = [ 189 | "proc-macro2", 190 | "quote", 191 | "unicode-ident", 192 | ] 193 | 194 | [[package]] 195 | name = "unicode-ident" 196 | version = "1.0.12" 197 | source = "registry+https://github.com/rust-lang/crates.io-index" 198 | checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" 199 | 200 | [[package]] 201 | name = "vcpkg" 202 | version = "0.2.15" 203 | source = "registry+https://github.com/rust-lang/crates.io-index" 204 | checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" 205 | 206 | [[package]] 207 | name = "windows-targets" 208 | version = "0.52.6" 209 | source = "registry+https://github.com/rust-lang/crates.io-index" 210 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" 211 | dependencies = [ 212 | "windows_aarch64_gnullvm", 213 | "windows_aarch64_msvc", 214 | "windows_i686_gnu", 215 | "windows_i686_gnullvm", 216 | "windows_i686_msvc", 217 | "windows_x86_64_gnu", 218 | "windows_x86_64_gnullvm", 219 | "windows_x86_64_msvc", 220 | ] 221 | 222 | [[package]] 223 | name = "windows_aarch64_gnullvm" 224 | version = "0.52.6" 225 | source = "registry+https://github.com/rust-lang/crates.io-index" 226 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" 227 | 228 | [[package]] 229 | name = "windows_aarch64_msvc" 230 | version = "0.52.6" 231 | source = "registry+https://github.com/rust-lang/crates.io-index" 232 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" 233 | 234 | [[package]] 235 | name = "windows_i686_gnu" 236 | version = "0.52.6" 237 | source = "registry+https://github.com/rust-lang/crates.io-index" 238 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" 239 | 240 | [[package]] 241 | name = "windows_i686_gnullvm" 242 | version = "0.52.6" 243 | source = "registry+https://github.com/rust-lang/crates.io-index" 244 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" 245 | 246 | [[package]] 247 | name = "windows_i686_msvc" 248 | version = "0.52.6" 249 | source = "registry+https://github.com/rust-lang/crates.io-index" 250 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" 251 | 252 | [[package]] 253 | name = "windows_x86_64_gnu" 254 | version = "0.52.6" 255 | source = "registry+https://github.com/rust-lang/crates.io-index" 256 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" 257 | 258 | [[package]] 259 | name = "windows_x86_64_gnullvm" 260 | version = "0.52.6" 261 | source = "registry+https://github.com/rust-lang/crates.io-index" 262 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" 263 | 264 | [[package]] 265 | name = "windows_x86_64_msvc" 266 | version = "0.52.6" 267 | source = "registry+https://github.com/rust-lang/crates.io-index" 268 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" 269 | 270 | [[package]] 271 | name = "xqlitenif" 272 | version = "0.3.1" 273 | dependencies = [ 274 | "rusqlite", 275 | "rustler", 276 | ] 277 | -------------------------------------------------------------------------------- /test/nif/cancellation_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.CancellationTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias XqliteNIF, as: NIF 5 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 6 | 7 | # Use a CPU-intensive, low-memory query for predictable "slowness". 8 | @cpu_intensive_limit 5_000_000 9 | @slow_query """ 10 | WITH RECURSIVE cnt(x) AS ( 11 | SELECT 1 12 | UNION ALL 13 | SELECT x + 1 FROM cnt 14 | LIMIT #{@cpu_intensive_limit} 15 | ) 16 | SELECT SUM(x) FROM cnt; 17 | """ 18 | 19 | # Setup a table and a trigger that runs the slow query on insert. 20 | @trigger_table_setup """ 21 | CREATE TABLE cancel_trigger_test (id INTEGER PRIMARY KEY); 22 | CREATE TEMP TRIGGER slow_insert_trigger 23 | AFTER INSERT ON cancel_trigger_test 24 | BEGIN 25 | WITH RECURSIVE cnt(x) AS ( 26 | SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT #{@cpu_intensive_limit} 27 | ) 28 | SELECT SUM(x) FROM cnt; 29 | END; 30 | """ 31 | 32 | @batch_cancel_table "cancel_batch_test" 33 | @batch_cancel_setup "CREATE TABLE #{@batch_cancel_table} (id INTEGER PRIMARY KEY, data TEXT); INSERT INTO #{@batch_cancel_table} (id, data) VALUES (0, 'initial');" 34 | @await_timeout 5_000 35 | 36 | # Test token creation separately, doesn't need the loop/connection setup. 37 | test "create_cancel_token/0 returns a resource" do 38 | assert {:ok, token} = NIF.create_cancel_token() 39 | assert is_reference(token) 40 | end 41 | 42 | test "cancel_operation/1 is idempotent" do 43 | {:ok, token} = NIF.create_cancel_token() 44 | assert :ok = NIF.cancel_operation(token) 45 | # Calling again is safe 46 | assert :ok = NIF.cancel_operation(token) 47 | end 48 | 49 | # --- Shared test code (generated via `for` loop) --- 50 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 51 | describe "using #{prefix}" do 52 | @describetag type_tag 53 | 54 | # Setup for each connection type 55 | setup context do 56 | {mod, fun, args} = find_opener_mfa!(context) 57 | assert {:ok, conn} = apply(mod, fun, args) 58 | on_exit(fn -> NIF.close(conn) end) 59 | {:ok, conn: conn} 60 | end 61 | 62 | # --- Cancellation Tests --- 63 | 64 | test "query_cancellable/4 successfully cancels a running query", %{conn: conn} do 65 | assert_cancellation(conn, fn conn, token -> 66 | NIF.query_cancellable(conn, @slow_query, [], token) 67 | end) 68 | end 69 | 70 | test "query_cancellable/4 completes normally if token is not cancelled", %{conn: conn} do 71 | {:ok, token} = NIF.create_cancel_token() 72 | 73 | # Run the query cancellably, but don't trigger the token 74 | assert {:ok, %{rows: [[_result]]}} = 75 | NIF.query_cancellable(conn, @slow_query, [], token) 76 | end 77 | 78 | test "normal query works after a cancelled query (handler unregistered)", %{conn: conn} do 79 | # --- Part 1: Run and cancel a query using the helper --- 80 | assert_cancellation(conn, fn conn, token -> 81 | NIF.query_cancellable(conn, @slow_query, [], token) 82 | end) 83 | 84 | # --- Part 2: Run a normal, non-cancellable query on the same connection --- 85 | assert {:ok, %{columns: ["1"], rows: [[1]], num_rows: 1}} = 86 | NIF.query(conn, "SELECT 1;", []) 87 | end 88 | 89 | test "normal query works after a completed cancellable query (handler unregistered)", 90 | %{conn: conn} do 91 | {:ok, token} = NIF.create_cancel_token() 92 | 93 | assert {:ok, %{rows: [[_result]]}} = 94 | NIF.query_cancellable(conn, @slow_query, [], token) 95 | 96 | assert {:ok, %{columns: ["1"], rows: [[1]], num_rows: 1}} = 97 | NIF.query(conn, "SELECT 1;", []) 98 | end 99 | 100 | test "execute_cancellable/4 successfully cancels a triggered slow operation", 101 | %{conn: conn} do 102 | assert :ok = NIF.execute_batch(conn, @trigger_table_setup) 103 | 104 | assert_cancellation(conn, fn conn, token -> 105 | NIF.execute_cancellable( 106 | conn, 107 | "INSERT INTO cancel_trigger_test (id) VALUES (1);", 108 | [], 109 | token 110 | ) 111 | end) 112 | end 113 | 114 | test "execute_cancellable/4 completes normally if token is not cancelled", %{conn: conn} do 115 | assert :ok = NIF.execute_batch(conn, @trigger_table_setup) 116 | {:ok, token} = NIF.create_cancel_token() 117 | 118 | assert {:ok, 1} = 119 | NIF.execute_cancellable( 120 | conn, 121 | "INSERT INTO cancel_trigger_test (id) VALUES (1);", 122 | [], 123 | token 124 | ) 125 | end 126 | 127 | test "normal execute works after a cancelled execute_cancellable (handler unregistered)", 128 | %{conn: conn} do 129 | assert :ok = NIF.execute_batch(conn, @trigger_table_setup) 130 | 131 | # --- Part 1: Run and cancel an execute --- 132 | assert_cancellation(conn, fn conn, token -> 133 | NIF.execute_cancellable( 134 | conn, 135 | "INSERT INTO cancel_trigger_test (id) VALUES (1);", 136 | [], 137 | token 138 | ) 139 | end) 140 | 141 | # --- Part 2: Run a normal, non-cancellable execute on the same connection --- 142 | assert {:ok, 0} = NIF.execute(conn, "CREATE TABLE normal_exec_test (id INT);", []) 143 | 144 | assert {:ok, 1} = 145 | NIF.execute(conn, "INSERT INTO normal_exec_test (id) VALUES (1);", []) 146 | end 147 | 148 | test "execute_batch_cancellable/3 successfully cancels a running batch", %{conn: conn} do 149 | assert :ok = NIF.execute_batch(conn, @batch_cancel_setup) 150 | 151 | long_batch = generate_long_batch(@batch_cancel_table) 152 | 153 | assert_cancellation(conn, fn conn, token -> 154 | NIF.execute_batch_cancellable(conn, long_batch, token) 155 | end) 156 | 157 | # Add an assertion to prove the batch was cancelled *during* execution. 158 | # The 'batch_started' update should have run, but the 'batch_finished' should not have. 159 | assert {:ok, %{rows: [["batch_started"]]}} = 160 | NIF.query(conn, "SELECT data FROM #{@batch_cancel_table} WHERE id = 0;", []) 161 | end 162 | 163 | test "execute_batch_cancellable/3 completes normally if token is not cancelled", 164 | %{conn: conn} do 165 | assert :ok = NIF.execute_batch(conn, @batch_cancel_setup) 166 | {:ok, token} = NIF.create_cancel_token() 167 | 168 | # Use a much smaller batch that completes quickly 169 | short_batch = "UPDATE #{@batch_cancel_table} SET data = 'batch_update' WHERE id=0;" 170 | assert :ok = NIF.execute_batch_cancellable(conn, short_batch, token) 171 | 172 | assert {:ok, %{rows: [["batch_update"]]}} = 173 | NIF.query(conn, "SELECT data FROM #{@batch_cancel_table} WHERE id = 0;", []) 174 | end 175 | 176 | test "normal batch works after a cancelled execute_batch_cancellable (handler unregistered)", 177 | %{conn: conn} do 178 | assert :ok = NIF.execute_batch(conn, @batch_cancel_setup) 179 | long_batch = generate_long_batch(@batch_cancel_table) 180 | 181 | # --- Part 1: Run and cancel a batch --- 182 | assert_cancellation(conn, fn conn, token -> 183 | NIF.execute_batch_cancellable(conn, long_batch, token) 184 | end) 185 | 186 | # --- Part 2: Run a normal, non-cancellable batch on the same connection --- 187 | normal_batch = "UPDATE #{@batch_cancel_table} SET data = 'normal_batch' WHERE id = 0;" 188 | assert :ok = NIF.execute_batch(conn, normal_batch) 189 | 190 | assert {:ok, %{rows: [["normal_batch"]]}} = 191 | NIF.query(conn, "SELECT data FROM #{@batch_cancel_table} WHERE id = 0;", []) 192 | end 193 | end 194 | end 195 | 196 | defp generate_long_batch(table_name) do 197 | # This batch does a quick update, then runs our reliably slow query, 198 | # then attempts another update that should not be reached if cancelled. 199 | """ 200 | UPDATE #{table_name} SET data = 'batch_started' WHERE id = 0; 201 | #{@slow_query} 202 | UPDATE #{table_name} SET data = 'batch_finished' WHERE id = 0; 203 | """ 204 | end 205 | 206 | # Helper function to assert cancellation in a deterministic way. 207 | defp assert_cancellation(conn, nif_fun) do 208 | {:ok, token} = NIF.create_cancel_token() 209 | parent = self() 210 | 211 | task = 212 | Task.async(fn -> 213 | send(parent, {:nif_started, self()}) 214 | # The provided function is called here with the conn and token 215 | nif_fun.(conn, token) 216 | end) 217 | 218 | # Wait for the task to signal it has started the NIF call 219 | receive do 220 | {:nif_started, _task_pid} -> 221 | :ok 222 | after 223 | # Use a reasonable timeout in case the task fails to start 224 | 1000 -> flunk("Test process did not receive :nif_started message from task") 225 | end 226 | 227 | # As soon as we get the signal, we cancel 228 | assert :ok = NIF.cancel_operation(token) 229 | 230 | result = Task.await(task, @await_timeout) 231 | assert {:error, :operation_cancelled} == result 232 | end 233 | end 234 | -------------------------------------------------------------------------------- /native/xqlitenif/src/stream.rs: -------------------------------------------------------------------------------- 1 | use crate::error::XqliteError; 2 | use crate::nif::XqliteConn; 3 | use crate::util::sqlite_row_to_elixir_terms; 4 | use rusqlite::ffi; 5 | use rusqlite::types::Value; 6 | use rustler::{Env, Resource, ResourceArc, Term}; 7 | use std::os::raw::c_int; 8 | use std::sync::atomic::{AtomicPtr, Ordering}; 9 | 10 | pub(crate) struct XqliteStream { 11 | // This AtomicPtr holds the raw SQLite statement. 12 | // If it's null_mut(), the stream is considered done/closed/finalized. 13 | pub(crate) atomic_raw_stmt: AtomicPtr, 14 | 15 | // These are immutable after stream_open completes 16 | pub(crate) conn_resource_arc: ResourceArc, 17 | pub(crate) column_names: Vec, 18 | pub(crate) column_count: usize, 19 | } 20 | 21 | #[rustler::resource_impl] 22 | impl Resource for XqliteStream {} 23 | 24 | impl XqliteStream { 25 | // Helper performs the atomic swap and finalization. 26 | // Called by Drop and by stream_close NIF. 27 | // It is pub(crate) for use by nif.rs. 28 | pub(crate) fn take_and_finalize_atomic_stmt( 29 | &self, // Takes &self to access atomic_raw_stmt and conn_resource_arc 30 | ) -> Result<(), XqliteError> { 31 | // Atomically swap the current pointer with null_mut(), getting the old pointer. 32 | // Ordering::AcqRel ensures that this operation synchronizes with other atomic 33 | // operations on other threads: acquire for the read (load of old value) 34 | // and release for the write (store of null_mut). 35 | let old_ptr = self 36 | .atomic_raw_stmt 37 | .swap(std::ptr::null_mut(), Ordering::AcqRel); 38 | 39 | if !old_ptr.is_null() { 40 | // If the old pointer was not null, it means we are responsible for finalizing it. 41 | // This is an unsafe FFI call. 42 | let result_code = unsafe { ffi::sqlite3_finalize(old_ptr) }; 43 | if result_code != ffi::SQLITE_OK { 44 | // Attempt to get a more detailed error message from the connection. 45 | let ffi_err = ffi::Error::new(result_code); 46 | let mut message = 47 | format!("Failed to finalize SQLite statement (code: {result_code})"); 48 | 49 | // Try to lock the connection to get a specific SQLite error message. 50 | // This lock is on a different Mutex (the one inside XqliteConn). 51 | if let Ok(conn_guard) = self.conn_resource_arc.0.lock() { 52 | // These FFI calls are unsafe. 53 | let specific_sqlite_msg = unsafe { 54 | let err_msg_ptr = ffi::sqlite3_errmsg(conn_guard.handle()); 55 | if !err_msg_ptr.is_null() { 56 | std::ffi::CStr::from_ptr(err_msg_ptr) 57 | .to_string_lossy() 58 | .into_owned() 59 | } else { 60 | // No specific message from SQLite, keep our formatted one. 61 | String::new() 62 | } 63 | }; 64 | if !specific_sqlite_msg.is_empty() 65 | && specific_sqlite_msg.to_lowercase() != "not an error" 66 | { 67 | message = specific_sqlite_msg; 68 | } 69 | } else { 70 | // Failed to lock the connection; append to the generic message. 71 | message.push_str(" (additionally, failed to lock connection for specific error message)"); 72 | } 73 | 74 | let rusqlite_err = rusqlite::Error::SqliteFailure(ffi_err, Some(message)); 75 | return Err(XqliteError::from(rusqlite_err)); 76 | } 77 | } 78 | // If old_ptr was null, it was already finalized by another call or was never set. 79 | Ok(()) 80 | } 81 | } 82 | 83 | impl Drop for XqliteStream { 84 | fn drop(&mut self) { 85 | // Call the helper method to take and finalize the statement. 86 | // `&mut self` allows access to `&self.atomic_raw_stmt` and `&self.conn_resource_arc`. 87 | if let Err(e) = self.take_and_finalize_atomic_stmt() { 88 | // Errors from Drop cannot be propagated. Log to stderr. 89 | // This indicates a problem during cleanup, potentially a resource leak 90 | // if SQLite itself failed to finalize properly. 91 | eprintln!( 92 | "[xqlite] Error finalizing SQLite statement during stream resource drop: {e:?}" 93 | ); 94 | } 95 | } 96 | } 97 | 98 | // Helper to process a single sqlite3_step. 99 | // Returns Ok(Some(row_data)) if a row is fetched. 100 | // Returns Ok(None) if SQLITE_DONE is reached. 101 | // Returns Err(XqliteError) if a step or conversion error occurs. 102 | // This function does NOT modify any shared XqliteStream state (like an is_done flag). 103 | // It is unsafe because it dereferences stmt_ptr and calls unsafe FFI functions. 104 | pub(crate) unsafe fn process_single_step<'a>( 105 | env: Env<'a>, 106 | stmt_ptr: *mut ffi::sqlite3_stmt, // Assumed to be valid and non-null by caller 107 | column_count: usize, 108 | db_handle_for_error_reporting: *mut ffi::sqlite3, // For sqlite3_errmsg 109 | ) -> Result>>, XqliteError> { 110 | let step_result = ffi::sqlite3_step(stmt_ptr); 111 | 112 | match step_result { 113 | ffi::SQLITE_ROW => { 114 | // sqlite_row_to_elixir_terms is also unsafe 115 | match sqlite_row_to_elixir_terms(env, stmt_ptr, column_count) { 116 | Ok(row_terms) => Ok(Some(row_terms)), 117 | Err(e) => Err(e), 118 | } 119 | } 120 | ffi::SQLITE_DONE => { 121 | Ok(None) // Signal DONE to the caller 122 | } 123 | err_code => { 124 | // Any other SQLite error code from sqlite3_step 125 | // Get specific error message from the connection using the provided db_handle 126 | let specific_message = { 127 | let err_msg_ptr = ffi::sqlite3_errmsg(db_handle_for_error_reporting); 128 | if err_msg_ptr.is_null() { 129 | format!("SQLite error {err_code} during step; no specific message.") 130 | } else { 131 | // This is an unsafe FFI call 132 | std::ffi::CStr::from_ptr(err_msg_ptr) 133 | .to_string_lossy() 134 | .into_owned() 135 | } 136 | }; 137 | let rusqlite_err = rusqlite::Error::SqliteFailure( 138 | ffi::Error::new(err_code), 139 | Some(specific_message), 140 | ); 141 | Err(XqliteError::from(rusqlite_err)) 142 | } 143 | } 144 | } 145 | 146 | fn bind_value_to_raw_stmt( 147 | raw_stmt_ptr: *mut ffi::sqlite3_stmt, 148 | bind_idx: c_int, 149 | value: &Value, 150 | db_handle: *mut ffi::sqlite3, 151 | ) -> Result<(), XqliteError> { 152 | let rc = unsafe { 153 | match value { 154 | Value::Null => ffi::sqlite3_bind_null(raw_stmt_ptr, bind_idx), 155 | Value::Integer(val) => ffi::sqlite3_bind_int64(raw_stmt_ptr, bind_idx, *val), 156 | Value::Real(val) => ffi::sqlite3_bind_double(raw_stmt_ptr, bind_idx, *val), 157 | Value::Text(s_val) => { 158 | let c_text = std::ffi::CString::new(s_val.as_str()) 159 | .map_err(|_e| XqliteError::NulErrorInString)?; 160 | ffi::sqlite3_bind_text( 161 | raw_stmt_ptr, 162 | bind_idx, 163 | c_text.as_ptr(), 164 | c_text.as_bytes().len() as c_int, 165 | ffi::SQLITE_TRANSIENT(), 166 | ) 167 | } 168 | Value::Blob(b_val) => ffi::sqlite3_bind_blob( 169 | raw_stmt_ptr, 170 | bind_idx, 171 | b_val.as_ptr() as *const std::ffi::c_void, 172 | b_val.len() as c_int, 173 | ffi::SQLITE_TRANSIENT(), 174 | ), 175 | } 176 | }; 177 | 178 | if rc != ffi::SQLITE_OK { 179 | let ffi_err = ffi::Error::new(rc); 180 | // Get specific message using db_handle if possible 181 | let message = unsafe { 182 | let err_msg_ptr = ffi::sqlite3_errmsg(db_handle); 183 | if err_msg_ptr.is_null() { 184 | format!("Parameter binding failed at index {bind_idx} (code {rc})") 185 | } else { 186 | std::ffi::CStr::from_ptr(err_msg_ptr) 187 | .to_string_lossy() 188 | .into_owned() 189 | } 190 | }; 191 | let rusqlite_err = rusqlite::Error::SqliteFailure(ffi_err, Some(message)); 192 | return Err(XqliteError::from(rusqlite_err)); 193 | } 194 | Ok(()) 195 | } 196 | 197 | pub(crate) fn bind_positional_params_ffi( 198 | raw_stmt_ptr: *mut ffi::sqlite3_stmt, 199 | params: &[Value], 200 | db_handle: *mut ffi::sqlite3, 201 | ) -> Result<(), XqliteError> { 202 | for (i, value) in params.iter().enumerate() { 203 | // SQLite bind indices are 1-based 204 | bind_value_to_raw_stmt(raw_stmt_ptr, (i + 1) as c_int, value, db_handle)?; 205 | } 206 | Ok(()) 207 | } 208 | 209 | pub(crate) fn bind_named_params_ffi( 210 | raw_stmt_ptr: *mut ffi::sqlite3_stmt, 211 | params: &[(String, Value)], 212 | db_handle: *mut ffi::sqlite3, 213 | ) -> Result<(), XqliteError> { 214 | for (name, value) in params { 215 | let c_name = std::ffi::CString::new(name.as_str()) 216 | .map_err(|_| XqliteError::InvalidParameterName(name.clone()))?; 217 | 218 | // This is an unsafe FFI call 219 | let bind_idx = 220 | unsafe { ffi::sqlite3_bind_parameter_index(raw_stmt_ptr, c_name.as_ptr()) }; 221 | 222 | if bind_idx == 0 { 223 | return Err(XqliteError::InvalidParameterName(name.clone())); 224 | } 225 | bind_value_to_raw_stmt(raw_stmt_ptr, bind_idx, value, db_handle)?; 226 | } 227 | Ok(()) 228 | } 229 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Xqlite 2 | 3 | [![Hex version](https://img.shields.io/hexpm/v/xqlite.svg?style=flat)](https://hex.pm/packages/xqlite) 4 | [![Build Status](https://github.com/dimitarvp/xqlite/workflows/CI/badge.svg)](https://github.com/dimitarvp/xqlite/actions) 5 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 6 | 7 | Low-level, safe, and fast NIF bindings to SQLite 3 for Elixir, powered by Rust and the excellent [rusqlite](https://crates.io/crates/rusqlite) crate. 8 | 9 | This library provides direct access to core SQLite functionality. For seamless Ecto 3.x integration (including connection pooling, migrations, and Ecto types), please see the planned [xqlite_ecto3](https://github.com/dimitarvp/xqlite_ecto3) library (work in progress). 10 | 11 | **Target Audience:** Developers needing direct, performant control over SQLite operations from Elixir, potentially as a foundation for higher-level libraries, or for those not interested in Ecto integration. 12 | 13 | ## Core design & thread safety 14 | 15 | SQLite connections (`rusqlite::Connection`) are not inherently thread-safe for concurrent access ([`!Sync`](https://github.com/rusqlite/rusqlite/issues/342#issuecomment-592624109)). To safely expose connections to the concurrent Elixir environment, `xqlite` wraps each `rusqlite::Connection` within an `Arc>` managed by a `ResourceArc`. 16 | 17 | - **Safety:** This ensures that only one Elixir process can access a specific SQLite connection handle at any given moment, preventing data races and ensuring compatibility with Rustler's `Resource` requirements (`Sync`). 18 | - **Handles:** NIF functions return opaque, thread-safe resource handles representing individual SQLite connections. 19 | - **Pooling:** This NIF layer **does not** implement connection pooling. Managing a pool of connections (e.g., using `DBConnection`) is the responsibility of the calling Elixir code or higher-level libraries like the planned `xqlite_ecto3`. 20 | 21 | This library prioritizes compatibility with **modern SQLite versions** (>= 3.35.0 recommended). While it may work on older versions, explicit support or workarounds for outdated SQLite features are not a primary goal. **Notably, retrieving primary key values automatically after insertion into `WITHOUT ROWID` tables is only reliably supported via the `RETURNING` clause (available since SQLite 3.35.0). Using `WITHOUT ROWID` tables on older SQLite versions may require you to supply primary key values explicitly within your application, as `last_insert_rowid/1` cannot be used for these tables.** 22 | 23 | ## Current capabilities 24 | 25 | The library provides two primary modules: `Xqlite` for a higher-level Elixir API, and `XqliteNIF` for direct, low-level access. 26 | 27 | ### High-level API (`Xqlite` and `Xqlite.Pragma` modules) 28 | 29 | - **`Xqlite.stream/4`**: Creates an Elixir `Stream` to lazily fetch rows from a query. Rows are returned as maps with atom keys. 30 | - **PRAGMA Helpers**: `Xqlite.Pragma.get/4` and `Xqlite.Pragma.put/3` provide a structured interface for interacting with SQLite PRAGMAs. 31 | - **Convenience Helpers**: `Xqlite.enable_foreign_key_enforcement/1`, `Xqlite.enable_strict_mode/1`, etc. 32 | 33 | ### Low-level NIF API (`XqliteNIF` module) 34 | 35 | - **Connection management:** 36 | - `open(path :: String.t())`: Opens a file-based database. 37 | - `open_in_memory()` or `open_in_memory(uri :: String.t())`: Opens an in-memory database. 38 | - `open_temporary()`: Opens a private, temporary on-disk database. 39 | - `close(conn)`: Closes the connection. 40 | 41 | - **Query execution:** 42 | - `query(conn, sql :: String.t(), params :: list() | keyword())`: Executes `SELECT` or other row-returning statements. 43 | - `query_cancellable(conn, sql :: String.t(), params :: list() | keyword(), cancel_token)`: Cancellable version. 44 | - Returns `{:ok, %{columns: [String.t()], rows: [[term()]], num_rows: non_neg_integer()}}` or `{:error, reason}`. 45 | 46 | - **Statement execution:** 47 | - `execute(conn, sql :: String.t(), params :: list())`: Executes non-row-returning statements (e.g., `INSERT`, `UPDATE`, `DDL`). 48 | - `execute_cancellable(conn, sql :: String.t(), params :: list(), cancel_token)`: Cancellable version. 49 | - `execute_batch(conn, sql_batch :: String.t())`: Executes multiple SQL statements. Returns `:ok` on success. 50 | - `execute_batch_cancellable(conn, sql_batch :: String.t(), cancel_token)`: Cancellable version. Returns `:ok` on success. 51 | - `execute` variants return `{:ok, affected_rows :: non_neg_integer()}`. 52 | - `execute_batch` variants return `:ok` on success or `{:error, reason}`. 53 | 54 | - **Streaming primitives:** 55 | - `stream_open(conn, sql, params, opts)`: Prepares a query and returns a stream handle. 56 | - `stream_get_columns(stream_handle)`: Retrieves column names from the prepared stream. 57 | - `stream_fetch(stream_handle, batch_size)`: Fetches a batch of rows from the stream. 58 | - `stream_close(stream_handle)`: Closes the stream and finalizes the statement. 59 | 60 | - **Operation cancellation:** 61 | - `create_cancel_token()`: Creates a token for signalling cancellation. Returns `{:ok, token_resource}`. 62 | - `cancel_operation(cancel_token)`: Signals an operation associated with the token to cancel. Returns `:ok` on success. 63 | 64 | - **PRAGMA handling:** 65 | - `get_pragma(conn, pragma_name :: String.t())`: Reads a PRAGMA value. 66 | - `set_pragma(conn, pragma_name :: String.t(), value :: term())`: Sets a PRAGMA value. Returns `:ok` on success. 67 | 68 | - **Transaction control:** 69 | - `begin(conn)`, `commit(conn)`, `rollback(conn)` 70 | - `savepoint(conn, name)`, `release_savepoint(conn, name)`, `rollback_to_savepoint(conn, name)` 71 | - All return `:ok` on success or `{:error, reason}`. 72 | 73 | - **Inserted row ID:** 74 | - `last_insert_rowid(conn)`: Retrieves the `rowid` of the most recent `INSERT`. 75 | 76 | - **Schema introspection:** 77 | - `schema_databases(conn)` 78 | - `schema_list_objects(conn, schema \\ nil)` (Returns `Xqlite.Schema.SchemaObjectInfo` with `:is_without_rowid` flag) 79 | - `schema_columns(conn, table_name)` (Returns `Xqlite.Schema.ColumnInfo` with `:hidden_kind` flag) 80 | - `schema_foreign_keys(conn, table_name)` 81 | - `schema_indexes(conn, table_name)` 82 | - `schema_index_columns(conn, index_name)` 83 | - `get_create_sql(conn, object_name)` 84 | 85 | - **Error handling:** 86 | - Functions return `{:ok, result}`, `:ok` (for simple success), or `{:error, reason}`. 87 | - `reason` is a structured tuple (e.g., `{:sqlite_failure, code, extended_code, message}`, `{:operation_cancelled}`). 88 | 89 | ## Known limitations and caveats 90 | 91 | - **`last_insert_rowid/1`:** 92 | - Reflects the state of the specific connection handle. Avoid sharing handles for concurrent `INSERT`s outside a proper pooling mechanism. 93 | - Does not work for `WITHOUT ROWID` tables. Use `INSERT ... RETURNING`. 94 | - **Operation Cancellation Performance:** The current cancellation mechanism uses SQLite's progress handler with a frequent check interval. This ensures testability but introduces overhead to cancellable operations. This will be benchmarked and potentially optimized in the future. 95 | - **Generated Column `default_value` (Schema Introspection):** `Xqlite.Schema.ColumnInfo.default_value` will be `nil` for generated columns when using `XqliteNIF.schema_columns/2`. The generation expression is not directly available in the `dflt_value` column of `PRAGMA table_xinfo`. To get the full expression, parse the output of `XqliteNIF.get_create_sql/2`. 96 | - **Invalid UTF-8 in TEXT Columns with SQL Functions:** Applying certain SQL text functions (e.g., `UPPER()`, `LOWER()`) to `TEXT` columns containing byte sequences that are not valid UTF-8 may cause the underlying SQLite C library to panic, leading to a NIF crash. Ensure data stored in `TEXT` columns intended for such processing is valid UTF-8, or avoid these functions on potentially corrupt data. 97 | - **User-Defined Functions (UDFs):** Support for UDFs is of very low priority due to its significant implementation complexity and is not currently planned. 98 | 99 | ## Basic usage examples 100 | 101 | ```elixir 102 | # --- Opening a connection --- 103 | {:ok, conn} = XqliteNIF.open("my_database.db") 104 | 105 | # --- Using Xqlite helpers --- 106 | :ok = Xqlite.enable_foreign_key_enforcement(conn) 107 | :ok = Xqlite.enable_strict_mode(conn) 108 | 109 | # --- Executing a query (SELECT) --- 110 | sql_select = "SELECT id, name FROM users WHERE id = ?1;" 111 | params_select = [1] 112 | IO.inspect(XqliteNIF.query(conn, sql_select, params_select), label: "Query Result") 113 | 114 | # --- Executing a cancellable query --- 115 | # (Assume `slow_query_sql` is a long-running SQL. See test/nif/cancellation_test.exs for examples) 116 | {:ok, cancel_token} = XqliteNIF.create_cancel_token() 117 | long_query_task = Task.async(fn -> 118 | XqliteNIF.query_cancellable(conn, slow_query_sql, [], cancel_token) 119 | end) 120 | Process.sleep(100) 121 | :ok = XqliteNIF.cancel_operation(cancel_token) 122 | IO.inspect(Task.await(long_query_task, 5000), label: "Cancelled Query Result") 123 | 124 | # --- Querying Schema Information --- 125 | {:ok, columns} = XqliteNIF.schema_columns(conn, "users") 126 | IO.inspect(columns, label: "Columns for 'users' table") 127 | 128 | # --- Using a transaction --- 129 | case XqliteNIF.begin(conn) do 130 | :ok -> 131 | # ... perform operations ... 132 | case XqliteNIF.execute(conn, "UPDATE accounts SET balance = 0 WHERE id = 1", []) do 133 | {:ok, _affected_rows} -> 134 | :ok = XqliteNIF.commit(conn) 135 | IO.puts("Transaction committed.") 136 | {:error, reason_update} -> 137 | IO.inspect(reason_update, label: "Update failed, rolling back") 138 | :ok = XqliteNIF.rollback(conn) 139 | end 140 | {:error, reason_begin} -> 141 | IO.inspect(reason_begin, label: "Failed to begin transaction") 142 | end 143 | ``` 144 | 145 | ## Roadmap 146 | 147 | The following features are planned for the **`xqlite`** library: 148 | 149 | 1. **Implement Extension Loading:** Add `load_extension/2` NIF. 150 | 2. **Implement Online Backup API:** Add NIFs for SQLite's Online Backup API. 151 | 3. **Implement Session Extension:** Add NIFs for SQLite's Session Extension. 152 | 4. **(Lower Priority)** Implement Incremental Blob I/O. 153 | 5. **(Optional)** Add SQLCipher Support (build feature). 154 | 6. **(Lowest Priority / Tentative)** User-Defined Functions (UDFs). 155 | 156 | The **`xqlite_ecto3`** library (separate project) will provide: 157 | 158 | - Full Ecto 3.x adapter implementation. 159 | - `DBConnection` integration. 160 | - Type handling, migrations, structure dump/load. 161 | 162 | ## Future considerations (post core roadmap) 163 | 164 | - Benchmark cancellation progress handler overhead. 165 | - Report `UPPER(invalid_utf8)` panic behavior observed with SQLite to relevant projects if appropriate. 166 | 167 | ## Installation 168 | 169 | This package is not yet published on hex.pm. To use it, add this to your list of dependencies in `mix.exs`: 170 | 171 | ```elixir 172 | def deps do 173 | [ 174 | {:xqlite, github: "dimitarvp/xqlite"} 175 | ] 176 | end 177 | ``` 178 | 179 | Ensure you have a compatible Rust toolchain installed. 180 | 181 | ## Contributing 182 | 183 | Contributions are welcome! Please feel free to open issues or submit pull requests. 184 | 185 | ## License 186 | 187 | This project is licensed under the terms of the MIT license. See the [`LICENSE.md`](LICENSE.md) file for details. 188 | -------------------------------------------------------------------------------- /test/nif/query_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.QueryTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | 6 | alias XqliteNIF, as: NIF 7 | 8 | @query_test_table_sql """ 9 | CREATE TABLE query_test ( 10 | id INTEGER PRIMARY KEY, 11 | name TEXT, 12 | age INTEGER, 13 | score REAL, 14 | is_active INTEGER, -- boolean 0/1 15 | utf8_text TEXT, -- Standard text column 16 | arbitrary_blob BLOB -- Column specifically for binary data 17 | ); 18 | """ 19 | 20 | @query_test_insert_sql """ 21 | INSERT INTO query_test (id, name, age, score, is_active, utf8_text, arbitrary_blob) 22 | VALUES 23 | (1, 'Alice', 30, 95.5, 1, 'First row', x'FF00FF'), 24 | (2, 'Bob', NULL, 80.0, 0, 'Second row', x'C0AF8F'), 25 | (3, 'Charlie', 35, NULL, 1, NULL, x'ED9FBFED'), 26 | (4, 'Diana', 28, 77.7, NULL,'Fourth row', NULL), 27 | (5, 'Eve', 40, 88.8, 1, 'Fifth row', x'FE8080'), 28 | (6, 'Frank', 22, 91.2, 0, 'Sixth row', x'F0808080'), 29 | (7, 'Grace', 50, 70.1, 1, 'Seventh row',x'FF'); 30 | """ 31 | 32 | # --- Shared test code (generated via `for` loop for different DB types) --- 33 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 34 | describe "using #{prefix}" do 35 | @describetag type_tag 36 | 37 | # Setup uses a single helper to find the appropriate MFA based on context tag 38 | setup context do 39 | {mod, fun, args} = find_opener_mfa!(context) 40 | 41 | assert {:ok, conn} = apply(mod, fun, args), 42 | "Failed to open connection for tag :#{context[:describetag]}" 43 | 44 | # Create and populate table for query tests 45 | assert {:ok, 0} = NIF.execute(conn, @query_test_table_sql, []) 46 | assert {:ok, 7} = NIF.execute(conn, @query_test_insert_sql, []) 47 | on_exit(fn -> NIF.close(conn) end) 48 | {:ok, conn: conn} 49 | end 50 | 51 | # --- Shared test cases applicable to all DB types follow --- 52 | 53 | test "query/3 fetches all records correctly, including blobs and nulls", %{conn: conn} do 54 | expected_rows = [ 55 | [1, "Alice", 30, 95.5, 1, "First row", <<255, 0, 255>>], 56 | [2, "Bob", nil, 80.0, 0, "Second row", <<192, 175, 143>>], 57 | [3, "Charlie", 35, nil, 1, nil, <<237, 159, 191, 237>>], 58 | [4, "Diana", 28, 77.7, nil, "Fourth row", nil], 59 | [5, "Eve", 40, 88.8, 1, "Fifth row", <<254, 128, 128>>], 60 | [6, "Frank", 22, 91.2, 0, "Sixth row", <<240, 128, 128, 128>>], 61 | [7, "Grace", 50, 70.1, 1, "Seventh row", <<255>>] 62 | ] 63 | 64 | expected_columns = [ 65 | "id", 66 | "name", 67 | "age", 68 | "score", 69 | "is_active", 70 | "utf8_text", 71 | "arbitrary_blob" 72 | ] 73 | 74 | assert {:ok, %{columns: actual_columns, rows: actual_rows, num_rows: actual_num_rows}} = 75 | NIF.query(conn, "SELECT * FROM query_test ORDER BY id;", []) 76 | 77 | assert actual_columns == expected_columns 78 | assert actual_num_rows == 7 79 | assert actual_rows == expected_rows 80 | end 81 | 82 | test "query/3 fetches records with positional parameter filters", %{conn: conn} do 83 | sql = "SELECT id, name FROM query_test WHERE age > ?1 AND is_active = ?2 ORDER BY id;" 84 | params = [29, 1] 85 | expected_rows = [[1, "Alice"], [3, "Charlie"], [5, "Eve"], [7, "Grace"]] 86 | 87 | assert {:ok, %{columns: ["id", "name"], rows: expected_rows, num_rows: 4}} == 88 | NIF.query(conn, sql, params) 89 | end 90 | 91 | test "query/3 fetches records with named parameter filters", %{conn: conn} do 92 | sql = 93 | "SELECT id, name FROM query_test WHERE age > :min_age AND arbitrary_blob = :blob ORDER BY id;" 94 | 95 | blob_param = <<0xED, 0x9F, 0xBF, 0xED>> 96 | params = [min_age: 30, blob: blob_param] 97 | expected_rows = [[3, "Charlie"]] 98 | 99 | assert {:ok, %{columns: ["id", "name"], rows: expected_rows, num_rows: 1}} == 100 | NIF.query(conn, sql, params) 101 | end 102 | 103 | test "query/3 handles various parameter data types (named params)", %{conn: conn} do 104 | sql = """ 105 | SELECT id FROM query_test 106 | WHERE age = :age 107 | OR score = :score 108 | OR (is_active = :active AND is_active IS NOT NULL) 109 | OR name = :name 110 | OR arbitrary_blob = :data 111 | ORDER BY id; 112 | """ 113 | 114 | blob_param = <<0xDD>> 115 | params = [age: 30, score: 80.0, active: false, name: "Diana", data: blob_param] 116 | expected_rows = [[1], [2], [4], [6]] 117 | 118 | assert {:ok, %{columns: ["id"], rows: expected_rows, num_rows: 4}} == 119 | NIF.query(conn, sql, params) 120 | end 121 | 122 | test "query/3 handles nil parameter correctly (positional)", %{conn: conn} do 123 | sql = "SELECT id FROM query_test WHERE arbitrary_blob IS ?1;" 124 | params = [nil] 125 | expected_rows = [[4]] 126 | 127 | assert {:ok, %{columns: ["id"], rows: expected_rows, num_rows: 1}} == 128 | NIF.query(conn, sql, params) 129 | end 130 | 131 | test "query/3 returns correct structure for query with no results", %{conn: conn} do 132 | sql = "SELECT id FROM query_test WHERE name = ?1;" 133 | params = ["NonExistent"] 134 | assert {:ok, %{columns: ["id"], rows: [], num_rows: 0}} == NIF.query(conn, sql, params) 135 | end 136 | 137 | # Test INSERT ... RETURNING via query/3 138 | test "query/3 executes INSERT ... RETURNING and returns inserted ID", %{conn: conn} do 139 | sql = "INSERT INTO query_test (name, utf8_text) VALUES (?1, ?2) RETURNING id;" 140 | params = ["New Guy", "Inserted via RETURNING"] 141 | 142 | assert {:ok, %{columns: ["id"], rows: [[inserted_id]], num_rows: 1}} = 143 | NIF.query(conn, sql, params) 144 | 145 | assert inserted_id == 8 146 | 147 | # Verify insertion separately 148 | assert {:ok, %{rows: [[8, "New Guy", "Inserted via RETURNING"]], num_rows: 1}} = 149 | NIF.query( 150 | conn, 151 | "SELECT id, name, utf8_text FROM query_test WHERE id = 8;", 152 | [] 153 | ) 154 | end 155 | 156 | test "query/3 executes INSERT ... RETURNING multiple columns", %{conn: conn} do 157 | sql = 158 | "INSERT INTO query_test (name, age, score, arbitrary_blob) VALUES (?1, ?2, ?3, ?4) RETURNING id, name, is_active, arbitrary_blob;" 159 | 160 | blob_param = <<0xEE>> 161 | params = ["Multi Return", 50, 100.0, blob_param] 162 | 163 | assert {:ok, 164 | %{ 165 | columns: ["id", "name", "is_active", "arbitrary_blob"], 166 | rows: [[inserted_id, "Multi Return", nil, ^blob_param]], 167 | num_rows: 1 168 | }} = 169 | NIF.query(conn, sql, params) 170 | 171 | {:ok, %{rows: [[max_id]]}} = NIF.query(conn, "SELECT MAX(id) FROM query_test;", []) 172 | assert inserted_id == max_id 173 | end 174 | 175 | # --- Error Cases for query/3 --- 176 | 177 | test "query/3 returns error for invalid SQL syntax", %{conn: conn} do 178 | assert {:error, {:cannot_prepare_statement, "SELEC * FROM query_test;", _reason}} = 179 | NIF.query(conn, "SELEC * FROM query_test;", []) 180 | end 181 | 182 | test "query/3 returns error for incorrect parameter count (positional)", %{conn: conn} do 183 | sql = "SELECT id FROM query_test WHERE age = ?1 AND name = ?2;" 184 | 185 | assert {:error, {:invalid_parameter_count, %{expected: 2, provided: 1}}} = 186 | NIF.query(conn, sql, [30]) 187 | 188 | assert {:error, {:invalid_parameter_count, %{expected: 2, provided: 3}}} = 189 | NIF.query(conn, sql, [30, "Alice", "Extra"]) 190 | end 191 | 192 | test "query/3 returns success for missing named parameter (unexpected)", %{conn: conn} do 193 | sql = "SELECT id FROM query_test WHERE age = :age AND name = :name;" 194 | # NOTE: Unexpectedly succeeds, possibly rusqlite treats unbound named params as NULL. 195 | assert {:ok, %{columns: ["id"], rows: [], num_rows: 0}} == 196 | NIF.query(conn, sql, age: 30) 197 | end 198 | 199 | test "query/3 returns error for invalid parameter name (named)", %{conn: conn} do 200 | sql = "SELECT id FROM query_test WHERE age = :age AND name = :name;" 201 | 202 | assert {:error, {:invalid_parameter_name, ":nombre"}} = 203 | NIF.query(conn, sql, age: 30, nombre: "Alice") 204 | end 205 | 206 | test "query/3 parameter type interactions (named vs positional)", %{conn: conn} do 207 | # SQL with both positional and named placeholders 208 | sql_mixed = "SELECT id FROM query_test WHERE age = ?1 AND name = :name;" 209 | 210 | # --- Case 1: Using NAMED parameters with MIXED SQL --- 211 | # This correctly fails because rusqlite tries to bind :age and :name, 212 | # but the SQL also contains "?1", leading to parameter name/index mismatches. 213 | named_params = [age: 30, name: "Alice"] 214 | 215 | assert {:error, {:invalid_parameter_name, ":age"}} = 216 | NIF.query(conn, sql_mixed, named_params) 217 | 218 | # --- Case 2: Using POSITIONAL parameters with MIXED SQL --- 219 | # NOTE: Unexpected Behavior: This query SUCCEEDS instead of failing due to 220 | # the mixed/invalid placeholders (?1 and :name). It appears that when 221 | # *positional* parameters are provided, rusqlite/SQLite successfully binds 222 | # the parameter to the positional placeholder (?1) and effectively IGNORES 223 | # the unbound named placeholder (:name) condition in the WHERE clause. 224 | # This behavior was confirmed consistent across :memory: and temporary file DBs. 225 | # age = 30 should match Alice (ID 1) 226 | positional_params = [30, "Alice"] 227 | expected_rows = [[1]] 228 | 229 | assert {:ok, %{columns: ["id"], rows: expected_rows, num_rows: 1}} == 230 | NIF.query(conn, sql_mixed, positional_params) 231 | 232 | # --- Case 3: Control - Using ONLY named placeholders with NAMED params --- 233 | # This should work correctly. 234 | sql_named_only = "SELECT id FROM query_test WHERE age = :age AND name = :name;" 235 | 236 | assert {:ok, %{columns: ["id"], rows: [[1]], num_rows: 1}} == 237 | NIF.query(conn, sql_named_only, age: 30, name: "Alice") 238 | 239 | # --- Case 4: Control - Using ONLY positional placeholders with POSITIONAL params --- 240 | # This should work correctly. 241 | sql_pos_only = "SELECT id FROM query_test WHERE age = ?1 AND name = ?2;" 242 | 243 | assert {:ok, %{columns: ["id"], rows: [[1]], num_rows: 1}} == 244 | NIF.query(conn, sql_pos_only, [30, "Alice"]) 245 | end 246 | 247 | test "query/3 returns error for invalid parameter type (unsupported)", %{conn: conn} do 248 | sql = "SELECT id FROM query_test WHERE age = ?1;" 249 | 250 | assert {:error, {:unsupported_data_type, :map}} = 251 | NIF.query(conn, sql, [%{invalid: :map}]) 252 | end 253 | 254 | test "query/3 returns error for NoSuchTable on SELECT", %{conn: conn} do 255 | # Try selecting from a table that doesn't exist 256 | sql = "SELECT * FROM non_existent_table;" 257 | # This fails during prepare, not execution 258 | assert {:error, {:cannot_prepare_statement, ^sql, reason}} = NIF.query(conn, sql, []) 259 | # Verify the reason message confirms the underlying issue 260 | assert String.contains?(reason || "", "no such table: non_existent_table") 261 | end 262 | end 263 | 264 | # end describe "using #{prefix}" 265 | end 266 | 267 | # end `for` loop 268 | 269 | # --- DB type-specific or other tests (outside the `for` loop) --- 270 | # None currently identified specifically for query logic 271 | end 272 | -------------------------------------------------------------------------------- /test/nif/stream_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.StreamTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | 7 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 8 | describe "#{prefix} streaming" do 9 | @describetag type_tag 10 | 11 | setup context do 12 | {mod, fun, args} = find_opener_mfa!(context) 13 | assert {:ok, conn} = apply(mod, fun, args) 14 | 15 | assert {:ok, 0} = 16 | NIF.execute( 17 | conn, 18 | "CREATE TABLE stream_items (id INTEGER PRIMARY KEY, name TEXT, price REAL);", 19 | [] 20 | ) 21 | 22 | for i <- 1..12 do 23 | assert {:ok, 1} = 24 | NIF.execute( 25 | conn, 26 | "INSERT INTO stream_items (id, name, price) VALUES (?1, ?2, ?3);", 27 | [i, "Item #{i}", i + 0.50] 28 | ) 29 | end 30 | 31 | on_exit(fn -> 32 | NIF.close(conn) 33 | end) 34 | 35 | {:ok, conn: conn} 36 | end 37 | 38 | # --- stream_open/4 Tests --- 39 | test "stream_open/4 with valid SQL returns a handle and correct columns", %{conn: conn} do 40 | sql = "SELECT id, name, price FROM stream_items;" 41 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 42 | assert is_reference(stream_handle) 43 | assert {:ok, ["id", "name", "price"]} == NIF.stream_get_columns(stream_handle) 44 | assert :ok == NIF.stream_close(stream_handle) 45 | end 46 | 47 | test "stream_open/4 with positional parameters returns a handle and correct columns", %{ 48 | conn: conn 49 | } do 50 | sql = "SELECT name, price FROM stream_items WHERE id = ?1;" 51 | params = [1] 52 | {:ok, stream_handle} = NIF.stream_open(conn, sql, params, []) 53 | assert is_reference(stream_handle) 54 | assert {:ok, ["name", "price"]} == NIF.stream_get_columns(stream_handle) 55 | assert :ok == NIF.stream_close(stream_handle) 56 | end 57 | 58 | test "stream_open/4 with named parameters returns a handle and correct columns", %{ 59 | conn: conn 60 | } do 61 | sql = "SELECT id FROM stream_items WHERE name = :item_name;" 62 | params = [item_name: "Item 2"] 63 | {:ok, stream_handle} = NIF.stream_open(conn, sql, params, []) 64 | assert is_reference(stream_handle) 65 | assert {:ok, ["id"]} == NIF.stream_get_columns(stream_handle) 66 | assert :ok == NIF.stream_close(stream_handle) 67 | end 68 | 69 | test "stream_open/4 with invalid SQL (syntax error) returns an error", %{conn: conn} do 70 | sql = "SELEKT id FROM stream_items;" 71 | assert {:error, error_details} = NIF.stream_open(conn, sql, [], []) 72 | 73 | assert match?({:sqlite_failure, _p_code, _e_code, _msg_str}, error_details) 74 | {:sqlite_failure, _, _, msg_str} = error_details 75 | assert is_binary(msg_str) 76 | assert String.contains?(msg_str, "syntax error") 77 | assert String.contains?(msg_str, "SELEKT") 78 | end 79 | 80 | test "stream_open/4 with SQL for non-existent table returns an error", %{conn: conn} do 81 | sql = "SELECT id FROM non_existent_table_for_stream;" 82 | assert {:error, error_details} = NIF.stream_open(conn, sql, [], []) 83 | 84 | assert match?({:no_such_table, _msg}, error_details) 85 | {:no_such_table, msg_str} = error_details 86 | assert is_binary(msg_str) 87 | assert String.contains?(msg_str, "no such table") 88 | end 89 | 90 | test "stream_open/4 with empty SQL string returns handle, empty columns, and is done", %{ 91 | conn: conn 92 | } do 93 | sql = "" 94 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 95 | assert is_reference(stream_handle) 96 | assert {:ok, []} == NIF.stream_get_columns(stream_handle) 97 | assert :done == NIF.stream_fetch(stream_handle, 1) 98 | assert :ok == NIF.stream_close(stream_handle) 99 | end 100 | 101 | test "stream_open/4 with comments-only SQL returns handle, empty columns, and is done", 102 | %{conn: conn} do 103 | sql = "-- This is just a comment;" 104 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 105 | assert is_reference(stream_handle) 106 | assert {:ok, []} == NIF.stream_get_columns(stream_handle) 107 | assert :done == NIF.stream_fetch(stream_handle, 1) 108 | assert :ok == NIF.stream_close(stream_handle) 109 | end 110 | 111 | test "stream_open/4 with invalid parameter name (named params) returns an error", %{ 112 | conn: conn 113 | } do 114 | sql = "SELECT id FROM stream_items WHERE name = :name;" 115 | params = [name: "Item 1", unexpected_param_name: "foo"] 116 | 117 | assert {:error, {:invalid_parameter_name, ":unexpected_param_name"}} == 118 | NIF.stream_open(conn, sql, params, []) 119 | end 120 | 121 | test "stream_open/4 with too few positional params returns handle and correct columns", 122 | %{conn: conn} do 123 | sql = "SELECT id, name FROM stream_items WHERE id = ?1 AND name = ?2;" 124 | params = [1] 125 | {:ok, stream_handle} = NIF.stream_open(conn, sql, params, []) 126 | assert is_reference(stream_handle) 127 | assert {:ok, ["id", "name"]} == NIF.stream_get_columns(stream_handle) 128 | assert :ok == NIF.stream_close(stream_handle) 129 | end 130 | 131 | test "stream_open/4 with too many positional params returns an error", %{conn: conn} do 132 | sql = "SELECT id FROM stream_items WHERE id = ?1;" 133 | params = [1, "extra_param"] 134 | 135 | assert {:error, {:sqlite_failure, _, 25, _msg}} = 136 | NIF.stream_open(conn, sql, params, []) 137 | end 138 | 139 | # --- stream_fetch/2 Tests --- 140 | test "stream_fetch/2 retrieves all rows in a single large batch", %{conn: conn} do 141 | sql = "SELECT id, name, price FROM stream_items ORDER BY id;" 142 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 143 | 144 | expected_rows = for i <- 1..12, do: [i, "Item #{i}", i + 0.50] 145 | assert {:ok, %{rows: actual_rows}} = NIF.stream_fetch(stream_handle, 20) 146 | assert actual_rows == expected_rows 147 | 148 | assert :done == NIF.stream_fetch(stream_handle, 1) 149 | assert :ok == NIF.stream_close(stream_handle) 150 | end 151 | 152 | test "stream_fetch/2 retrieves all rows in multiple smaller batches", %{conn: conn} do 153 | sql = "SELECT id FROM stream_items ORDER BY id;" 154 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 155 | 156 | assert {:ok, %{rows: [[1], [2], [3], [4], [5]]}} == NIF.stream_fetch(stream_handle, 5) 157 | assert {:ok, %{rows: [[6], [7], [8], [9], [10]]}} == NIF.stream_fetch(stream_handle, 5) 158 | assert {:ok, %{rows: [[11], [12]]}} == NIF.stream_fetch(stream_handle, 5) 159 | assert :done == NIF.stream_fetch(stream_handle, 1) 160 | assert :ok == NIF.stream_close(stream_handle) 161 | end 162 | 163 | test "stream_fetch/2 with invalid batch_size (0) returns an error", %{conn: conn} do 164 | sql = "SELECT id FROM stream_items LIMIT 2;" 165 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 166 | 167 | assert {:error, {:invalid_batch_size, %{provided: {:integer, 0}, minimum: 1}}} == 168 | NIF.stream_fetch(stream_handle, 0) 169 | 170 | # Stream should still be usable with valid batch size 171 | assert {:ok, %{rows: [[1], [2]]}} == NIF.stream_fetch(stream_handle, 2) 172 | assert :done == NIF.stream_fetch(stream_handle, 1) 173 | assert :ok == NIF.stream_close(stream_handle) 174 | end 175 | 176 | test "stream_fetch/2 with invalid batch_size (negative integer) returns an error", %{ 177 | conn: conn 178 | } do 179 | sql = "SELECT id FROM stream_items LIMIT 1;" 180 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 181 | 182 | assert {:error, {:invalid_batch_size, %{provided: {:integer, -5}, minimum: 1}}} == 183 | NIF.stream_fetch(stream_handle, -5) 184 | 185 | assert :ok == NIF.stream_close(stream_handle) 186 | end 187 | 188 | test "stream_fetch/2 with invalid batch_size (non-integer atom) returns an error", %{ 189 | conn: conn 190 | } do 191 | sql = "SELECT id FROM stream_items LIMIT 1;" 192 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 193 | 194 | assert {:error, 195 | {:invalid_batch_size, %{provided: {:atom, :not_an_integer}, minimum: 1}}} == 196 | NIF.stream_fetch(stream_handle, :not_an_integer) 197 | 198 | assert :ok == NIF.stream_close(stream_handle) 199 | end 200 | 201 | test "stream_fetch/2 with invalid batch_size (non-integer string) returns an error", %{ 202 | conn: conn 203 | } do 204 | sql = "SELECT id FROM stream_items LIMIT 1;" 205 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 206 | 207 | assert {:error, {:invalid_batch_size, %{provided: {:string, "invalid"}, minimum: 1}}} == 208 | NIF.stream_fetch(stream_handle, "invalid") 209 | 210 | assert :ok == NIF.stream_close(stream_handle) 211 | end 212 | 213 | test "stream_fetch/2 on an empty result set immediately returns :done", %{conn: conn} do 214 | sql = "SELECT id FROM stream_items WHERE id = 999;" 215 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 216 | assert :done == NIF.stream_fetch(stream_handle, 5) 217 | assert :ok == NIF.stream_close(stream_handle) 218 | end 219 | 220 | test "stream_fetch/2 after :done signal consistently returns :done", %{conn: conn} do 221 | sql = "SELECT id FROM stream_items LIMIT 1;" 222 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 223 | assert {:ok, %{rows: [[1]]}} == NIF.stream_fetch(stream_handle, 1) 224 | assert :done == NIF.stream_fetch(stream_handle, 1) 225 | assert :done == NIF.stream_fetch(stream_handle, 1) 226 | assert :ok == NIF.stream_close(stream_handle) 227 | end 228 | 229 | test "stream_fetch/2 after stream_close returns :done", %{conn: conn} do 230 | sql = "SELECT id FROM stream_items;" 231 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 232 | assert :ok == NIF.stream_close(stream_handle) 233 | assert :done == NIF.stream_fetch(stream_handle, 1) 234 | end 235 | 236 | test "stream_fetch/2 with too few positional params results in :done (due to NULL comparison)", 237 | %{conn: conn} do 238 | sql = "SELECT id, name FROM stream_items WHERE id = ?1 AND name = ?2;" 239 | params = [1] 240 | {:ok, stream_handle} = NIF.stream_open(conn, sql, params, []) 241 | assert :done == NIF.stream_fetch(stream_handle, 1) 242 | assert :ok == NIF.stream_close(stream_handle) 243 | end 244 | 245 | # --- stream_close/1 Tests --- 246 | test "stream_close/1 successfully closes an open stream (re-verify)", %{conn: conn} do 247 | sql = "SELECT id FROM stream_items;" 248 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 249 | assert :ok == NIF.stream_close(stream_handle) 250 | end 251 | 252 | test "stream_close/1 is idempotent (re-verify)", %{conn: conn} do 253 | sql = "SELECT id FROM stream_items;" 254 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 255 | :ok = NIF.stream_close(stream_handle) 256 | assert :ok == NIF.stream_close(stream_handle) 257 | end 258 | 259 | test "stream_get_columns/1 still returns columns after stream_close/1 (re-verify)", %{ 260 | conn: conn 261 | } do 262 | sql = "SELECT name FROM stream_items;" 263 | expected_columns = ["name"] 264 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 265 | # Pin here is fine if expected_columns is defined once 266 | assert {:ok, expected_columns} == NIF.stream_get_columns(stream_handle) 267 | 268 | assert :ok == NIF.stream_close(stream_handle) 269 | # Pin here is fine 270 | assert {:ok, expected_columns} == NIF.stream_get_columns(stream_handle) 271 | end 272 | 273 | test "stream_close/1 on an invalid handle type returns an error (re-verify)", %{ 274 | conn: conn 275 | } do 276 | assert {:error, {:invalid_stream_handle, _reason}} = NIF.stream_close(conn) 277 | end 278 | 279 | test "stream_close/1 on a dummy reference returns an error (re-verify)", %{conn: _conn} do 280 | dummy_ref = make_ref() 281 | assert {:error, {:invalid_stream_handle, _reason}} = NIF.stream_close(dummy_ref) 282 | end 283 | end 284 | end 285 | 286 | # --- Isolated Test Case (Updated for new batch_size contract) --- 287 | test "isolated: stream_fetch behavior with exhaustion and invalid batch_size" do 288 | assert {:ok, conn} = NIF.open_in_memory() 289 | assert {:ok, 0} = NIF.execute(conn, "CREATE TABLE iso_items (id INTEGER PRIMARY KEY);", []) 290 | assert {:ok, 1} = NIF.execute(conn, "INSERT INTO iso_items (id) VALUES (1);", []) 291 | assert {:ok, 1} = NIF.execute(conn, "INSERT INTO iso_items (id) VALUES (2);", []) 292 | 293 | sql = "SELECT id FROM iso_items ORDER BY id LIMIT 2;" 294 | {:ok, stream_handle} = NIF.stream_open(conn, sql, [], []) 295 | 296 | assert {:error, {:invalid_batch_size, %{provided: {:integer, 0}, minimum: 1}}} == 297 | NIF.stream_fetch(stream_handle, 0), 298 | "Fetch (batch 0 should error)" 299 | 300 | assert {:error, {:invalid_batch_size, %{provided: {:integer, -1}, minimum: 1}}} == 301 | NIF.stream_fetch(stream_handle, -1), 302 | "Fetch (batch -1 should error)" 303 | 304 | assert {:ok, %{rows: [[1], [2]]}} == NIF.stream_fetch(stream_handle, 2), 305 | "Fetch (batch 2 - consume all)" 306 | 307 | assert :done == NIF.stream_fetch(stream_handle, 1), "Fetch (batch 1 - after done)" 308 | 309 | assert :ok == NIF.stream_close(stream_handle) 310 | assert :ok == NIF.close(conn) 311 | end 312 | end 313 | -------------------------------------------------------------------------------- /test/nif/execution_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.NIF.ExecutionTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | 6 | alias XqliteNIF, as: NIF 7 | 8 | # Standard column definitions for reusable test table setup 9 | @exec_test_columns_sql """ 10 | ( 11 | id INTEGER PRIMARY KEY, 12 | name TEXT, 13 | val_int INTEGER, 14 | val_real REAL, 15 | val_blob BLOB, 16 | val_bool INTEGER -- Storing bools as 0/1 17 | ) 18 | """ 19 | 20 | # Creates a table with the standard test columns but allows specifying the name. 21 | defp setup_named_table(conn, table_name \\ "exec_test") do 22 | create_sql = "CREATE TABLE #{table_name} #{@exec_test_columns_sql};" 23 | # Pattern match ensures execute returns success, otherwise test fails here. 24 | {:ok, 0} = NIF.execute(conn, create_sql, []) 25 | conn 26 | end 27 | 28 | # --- Shared test code (generated via `for` loop for different DB types) --- 29 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 30 | describe "using #{prefix}" do 31 | @describetag type_tag 32 | 33 | # Setup uses a single helper to find the appropriate MFA based on context tag 34 | setup context do 35 | {mod, fun, args} = find_opener_mfa!(context) 36 | 37 | # Open connection 38 | assert {:ok, conn} = apply(mod, fun, args), 39 | "Failed to open connection for tag :#{context[:describetag]}" 40 | 41 | on_exit(fn -> NIF.close(conn) end) 42 | {:ok, conn: conn} 43 | end 44 | 45 | # --- Shared test cases applicable to all DB types follow --- 46 | # These tests inherit the simple atom tag (e.g. :memory_private or :file_temp etc.) 47 | 48 | test "execute/3 creates a table successfully", %{conn: conn} do 49 | sql = "CREATE TABLE simple_create (id INTEGER PRIMARY KEY);" 50 | # DDL usually returns 0 affected rows 51 | assert {:ok, 0} = NIF.execute(conn, sql, []) 52 | 53 | # Verify table exists by querying PRAGMA table_list 54 | assert {:ok, %{rows: [[_, "simple_create", _, _, _, _]], num_rows: 1}} = 55 | NIF.query(conn, "PRAGMA table_list;", []) 56 | |> then(fn {:ok, res} -> 57 | filtered_rows = 58 | Enum.filter(res.rows, fn [_schema, name, _, _, _, _] -> 59 | name == "simple_create" 60 | end) 61 | 62 | {:ok, %{res | rows: filtered_rows, num_rows: Enum.count(filtered_rows)}} 63 | end) 64 | end 65 | 66 | test "execute/3 inserts data with various parameter types", %{conn: conn} do 67 | setup_named_table(conn) 68 | 69 | sql = """ 70 | INSERT INTO exec_test (id, name, val_int, val_real, val_blob, val_bool) 71 | VALUES (?1, ?2, ?3, ?4, ?5, ?6); 72 | """ 73 | 74 | blob_data = <<1, 2, 3, 4, 5>> 75 | params = [1, "Test Name", 123, 99.9, blob_data, true] 76 | 77 | # INSERT affects 1 row 78 | assert {:ok, 1} = NIF.execute(conn, sql, params) 79 | 80 | # Verify insertion using query 81 | assert {:ok, %{rows: [[1, "Test Name", 123, 99.9, ^blob_data, 1]], num_rows: 1}} = 82 | NIF.query(conn, "SELECT * FROM exec_test WHERE id = 1;", []) 83 | end 84 | 85 | test "execute/3 inserts data with nil values", %{conn: conn} do 86 | setup_named_table(conn) 87 | 88 | sql = """ 89 | INSERT INTO exec_test (id, name, val_int, val_real, val_blob, val_bool) 90 | VALUES (?1, ?2, ?3, ?4, ?5, ?6); 91 | """ 92 | 93 | # Use Elixir nil 94 | params = [2, nil, nil, nil, nil, nil] 95 | 96 | assert {:ok, 1} = NIF.execute(conn, sql, params) 97 | # Verify nil values were inserted correctly 98 | assert {:ok, %{rows: [[2, nil, nil, nil, nil, nil]], num_rows: 1}} = 99 | NIF.query(conn, "SELECT * FROM exec_test WHERE id = 2;", []) 100 | end 101 | 102 | test "execute/3 handles boolean false parameter", %{conn: conn} do 103 | setup_named_table(conn) 104 | 105 | sql = """ 106 | INSERT INTO exec_test (id, name, val_bool) VALUES (?1, ?2, ?3); 107 | """ 108 | 109 | # Use Elixir false 110 | params = [3, "Bool False Test", false] 111 | 112 | assert {:ok, 1} = NIF.execute(conn, sql, params) 113 | # Verify boolean false was stored as integer 0 114 | assert {:ok, %{rows: [[3, "Bool False Test", 0]], num_rows: 1}} = 115 | NIF.query(conn, "SELECT id, name, val_bool FROM exec_test WHERE id = 3;", []) 116 | end 117 | 118 | test "execute/3 updates data", %{conn: conn} do 119 | setup_named_table(conn) 120 | # Insert initial row 121 | {:ok, 1} = 122 | NIF.execute( 123 | conn, 124 | "INSERT INTO exec_test (id, name, val_int) VALUES (1, 'Initial', 10);", 125 | [] 126 | ) 127 | 128 | update_sql = "UPDATE exec_test SET name = ?1, val_int = ?2 WHERE id = ?3;" 129 | update_params = ["Updated Name", 20, 1] 130 | 131 | assert {:ok, 1} = NIF.execute(conn, update_sql, update_params) 132 | 133 | # Verify update by querying the row 134 | assert {:ok, %{rows: [[1, "Updated Name", 20]], num_rows: 1}} = 135 | NIF.query(conn, "SELECT id, name, val_int FROM exec_test WHERE id = 1;", []) 136 | end 137 | 138 | test "execute/3 deletes data", %{conn: conn} do 139 | setup_named_table(conn) 140 | 141 | # Insert initial row 142 | {:ok, 1} = 143 | NIF.execute( 144 | conn, 145 | "INSERT INTO exec_test (id, name, val_int) VALUES (1, 'To Delete', 30);", 146 | [] 147 | ) 148 | 149 | # Verify it exists before delete 150 | assert {:ok, %{num_rows: 1}} = 151 | NIF.query(conn, "SELECT id FROM exec_test WHERE id = 1;", []) 152 | 153 | delete_sql = "DELETE FROM exec_test WHERE id = ?1;" 154 | delete_params = [1] 155 | 156 | # DELETE affects 1 row 157 | assert {:ok, 1} = NIF.execute(conn, delete_sql, delete_params) 158 | 159 | # Verify deletion by querying again 160 | assert {:ok, %{rows: [], num_rows: 0}} = 161 | NIF.query(conn, "SELECT id FROM exec_test WHERE id = 1;", []) 162 | end 163 | 164 | test "execute_batch/2 creates table and inserts data", %{conn: conn} do 165 | create_and_insert_sql = """ 166 | CREATE TABLE batch_exec_test ( id INTEGER PRIMARY KEY, label TEXT ); 167 | INSERT INTO batch_exec_test (id, label) VALUES (1, 'Batch Label 1'); 168 | INSERT INTO batch_exec_test (id, label) VALUES (2, 'Batch Label 2'); 169 | """ 170 | 171 | assert :ok = NIF.execute_batch(conn, create_and_insert_sql) 172 | 173 | assert {:ok, %{rows: [[1, "Batch Label 1"], [2, "Batch Label 2"]], num_rows: 2}} = 174 | NIF.query(conn, "SELECT * FROM batch_exec_test ORDER BY id;", []) 175 | end 176 | 177 | test "execute_batch/2 handles empty string", %{conn: conn} do 178 | assert :ok = NIF.execute_batch(conn, "") 179 | end 180 | 181 | test "execute_batch/2 handles string with only whitespace/comments", %{conn: conn} do 182 | assert :ok = NIF.execute_batch(conn, " -- comment \n ; \t ") 183 | end 184 | 185 | test "execute_batch/2 returns error on invalid SQL in batch", %{conn: conn} do 186 | bad_sql = """ 187 | CREATE TABLE ok_table (id INT); 188 | INSERT INTO ok_table VALUES (1); 189 | SELECT * FROM non_existent_table; -- This SELECT fails at runtime 190 | INSERT INTO ok_table VALUES (2); -- This won't run 191 | """ 192 | 193 | # Expect :no_such_table error from the SELECT statement 194 | assert {:error, {:no_such_table, msg}} = NIF.execute_batch(conn, bad_sql) 195 | assert String.contains?(msg || "", "no such table: non_existent_table") 196 | 197 | # Verify statements before the error might have executed 198 | assert {:ok, %{rows: [[1]], num_rows: 1}} = 199 | NIF.query(conn, "SELECT * FROM ok_table;", []) 200 | end 201 | 202 | test "execute/3 returns error for invalid SQL syntax", %{conn: conn} do 203 | assert {:error, {:sql_input_error, %{message: msg}}} = 204 | NIF.execute(conn, "CREATE TABLET bad (id INT);", []) 205 | 206 | assert String.contains?(msg, "syntax error") 207 | assert String.contains?(msg, "TABLET") 208 | end 209 | 210 | test "execute/3 returns error for NoSuchTable on INSERT", %{conn: conn} do 211 | # Try inserting into a table that doesn't exist 212 | sql = "INSERT INTO non_existent_table (col) VALUES (1);" 213 | assert {:error, {:no_such_table, msg}} = NIF.execute(conn, sql, []) 214 | assert String.contains?(msg || "", "no such table: non_existent_table") 215 | end 216 | 217 | test "execute/3 returns error for TableExists", %{conn: conn} do 218 | # Use distinct name 219 | table_name = "already_exists_test_exec" 220 | # Create the table first 221 | setup_named_table(conn, table_name) 222 | # Try creating it again 223 | create_sql = "CREATE TABLE #{table_name} (id INT);" 224 | assert {:error, {:table_exists, msg}} = NIF.execute(conn, create_sql, []) 225 | assert String.contains?(msg || "", "table #{table_name} already exists") 226 | end 227 | 228 | test "execute/3 returns error for IndexExists", %{conn: conn} do 229 | # Use distinct name 230 | table_name = "index_exists_test_exec" 231 | # Use distinct name 232 | index_name = "idx_exists_test_exec" 233 | setup_named_table(conn, table_name) 234 | # Create the index first 235 | create_index_sql = "CREATE INDEX #{index_name} ON #{table_name}(name);" 236 | assert {:ok, 0} = NIF.execute(conn, create_index_sql, []) 237 | # Try creating it again 238 | assert {:error, {:index_exists, msg}} = NIF.execute(conn, create_index_sql, []) 239 | assert String.contains?(msg || "", "index #{index_name} already exists") 240 | end 241 | 242 | test "execute/3 returns error for constraint violation (UNIQUE)", %{conn: conn} do 243 | # Use setup helper with a specific table name for isolation 244 | # Use distinct name from other tests 245 | table_name = "unique_test_exec" 246 | setup_named_table(conn, table_name) 247 | # Add unique index 248 | {:ok, 0} = 249 | NIF.execute( 250 | conn, 251 | "CREATE UNIQUE INDEX idx_unique_name_exec ON #{table_name}(name);", 252 | [] 253 | ) 254 | 255 | # Insert first row successfully 256 | assert {:ok, 1} = 257 | NIF.execute( 258 | conn, 259 | "INSERT INTO #{table_name} (id, name) VALUES (1, 'UniqueName');", 260 | [] 261 | ) 262 | 263 | # Attempt to insert duplicate name, expect constraint violation 264 | assert {:error, {:constraint_violation, :constraint_unique, _msg}} = 265 | NIF.execute( 266 | conn, 267 | "INSERT INTO #{table_name} (id, name) VALUES (2, 'UniqueName');", 268 | [] 269 | ) 270 | end 271 | 272 | test "execute/3 returns error for constraint violation (NOT NULL)", %{conn: conn} do 273 | table_name = "notnull_test_exec" 274 | 275 | create_notnull_sql = 276 | "CREATE TABLE #{table_name} (id INTEGER PRIMARY KEY, name TEXT NOT NULL);" 277 | 278 | assert {:ok, 0} = NIF.execute(conn, create_notnull_sql, []) 279 | # Attempt to insert NULL into the NOT NULL column 280 | assert {:error, {:constraint_violation, :constraint_not_null, _msg}} = 281 | NIF.execute( 282 | conn, 283 | "INSERT INTO #{table_name} (id, name) VALUES (1, NULL);", 284 | [] 285 | ) 286 | end 287 | 288 | test "execute/3 returns error for constraint violation (CHECK)", %{conn: conn} do 289 | # Use distinct name 290 | table_name = "check_test_exec" 291 | create_sql = "CREATE TABLE #{table_name} (id INT, val INT CHECK(val > 10));" 292 | assert {:ok, 0} = NIF.execute(conn, create_sql, []) 293 | assert {:ok, 1} = NIF.execute(conn, "INSERT INTO #{table_name} VALUES (1, 15);", []) 294 | # Attempt to insert an invalid row violating the CHECK 295 | assert {:error, {:constraint_violation, :constraint_check, _msg}} = 296 | NIF.execute(conn, "INSERT INTO #{table_name} VALUES (2, 5);", []) 297 | end 298 | 299 | test "execute/3 returns error for incorrect parameter count", %{conn: conn} do 300 | # Use setup helper with default table name 301 | setup_named_table(conn) 302 | sql = "INSERT INTO exec_test (id, name) VALUES (?1, ?2);" 303 | # Provide 1 param where 2 are expected 304 | assert {:error, {:invalid_parameter_count, %{expected: 2, provided: 1}}} = 305 | NIF.execute(conn, sql, [1]) 306 | 307 | # Provide 3 params where 2 are expected 308 | assert {:error, {:invalid_parameter_count, %{expected: 2, provided: 3}}} = 309 | NIF.execute(conn, sql, [1, "Name", 999]) 310 | end 311 | 312 | test "execute/3 returns error for invalid parameter type (unsupported)", %{conn: conn} do 313 | # Use setup helper with default table name 314 | setup_named_table(conn) 315 | sql = "INSERT INTO exec_test (id, name) VALUES (?1, ?2);" 316 | # Pass a map, which is not a supported type for direct binding 317 | assert {:error, {:unsupported_data_type, :map}} = 318 | NIF.execute(conn, sql, [1, %{invalid: :map}]) 319 | end 320 | 321 | test "execute/3 successfully stores and query/3 retrieves strings with NUL bytes", %{ 322 | conn: conn 323 | } do 324 | # Use setup helper with default table name 325 | setup_named_table(conn) 326 | 327 | nul_string = "String with\0embedded NUL" 328 | sql_insert = "INSERT INTO exec_test (id, name) VALUES (?1, ?2);" 329 | insert_params = [10, nul_string] 330 | 331 | # Assert that inserting the NUL-containing string succeeds 332 | assert {:ok, 1} = NIF.execute(conn, sql_insert, insert_params) 333 | 334 | # Verify that the stored string, when retrieved, contains the NUL byte 335 | sql_select = "SELECT name FROM exec_test WHERE id = ?1;" 336 | select_params = [10] 337 | 338 | # Assert the query succeeds and the retrieved string matches the original 339 | assert {:ok, %{columns: ["name"], rows: [[^nul_string]], num_rows: 1}} = 340 | NIF.query(conn, sql_select, select_params) 341 | 342 | # Double-check byte size to be sure NUL wasn't truncated 343 | assert byte_size(nul_string) == 24 344 | 345 | assert {:ok, %{rows: [[retrieved_string]]}} = 346 | NIF.query(conn, sql_select, select_params) 347 | 348 | assert byte_size(retrieved_string) == 24 349 | end 350 | 351 | test "execute/3 returns error when trying to execute non-query with RETURNING via execute", 352 | %{conn: conn} do 353 | # Use setup helper with default table name 354 | setup_named_table(conn) 355 | 356 | sql = "INSERT INTO exec_test (name, val_int) VALUES (?1, ?2) RETURNING id;" 357 | params = ["Should Fail", 99] 358 | assert {:error, :execute_returned_results} = NIF.execute(conn, sql, params) 359 | end 360 | end 361 | 362 | # end describe "using #{prefix}" 363 | end 364 | 365 | # end `for` loop 366 | 367 | # --- DB type-specific or other tests (outside the `for` loop) --- 368 | # None currently identified for execute/execute_batch 369 | end 370 | -------------------------------------------------------------------------------- /native/xqlitenif/src/util.rs: -------------------------------------------------------------------------------- 1 | use crate::error::XqliteError; 2 | use crate::nif::XqliteConn; 3 | use rusqlite::ffi; 4 | use rusqlite::{types::Value, Connection, Rows}; 5 | use rustler::{ 6 | resource_impl, 7 | types::{ 8 | atom::{error, false_, nil, ok, true_}, 9 | binary::OwnedBinary, 10 | }, 11 | Atom, Binary, Encoder, Env, Error as RustlerError, ListIterator, Resource, ResourceArc, 12 | Term, TermType, 13 | }; 14 | use std::fmt::Debug; 15 | use std::ops::DerefMut; 16 | use std::vec::Vec; 17 | 18 | #[derive(Debug)] 19 | pub(crate) struct BlobResource(pub(crate) Vec); 20 | #[resource_impl] 21 | impl Resource for BlobResource {} 22 | 23 | pub(crate) fn encode_val(env: Env<'_>, val: rusqlite::types::Value) -> Term<'_> { 24 | match val { 25 | Value::Null => nil().encode(env), 26 | Value::Integer(i) => i.encode(env), 27 | Value::Real(f) => f.encode(env), 28 | Value::Text(s) => s.encode(env), 29 | Value::Blob(owned_vec) => { 30 | let resource = ResourceArc::new(BlobResource(owned_vec)); 31 | resource 32 | .make_binary(env, |wrapper: &BlobResource| &wrapper.0) 33 | .encode(env) 34 | } 35 | } 36 | } 37 | 38 | pub(crate) fn term_to_tagged_elixir_value<'a>(env: Env<'a>, term: Term<'a>) -> Term<'a> { 39 | match term.get_type() { 40 | TermType::Atom => (crate::atom(), term).encode(env), // e.g., {:atom, :foo} 41 | TermType::Binary => { 42 | if let Ok(_s_val) = term.decode::() { 43 | // If it's a valid Elixir string, tag as :string and pass original term 44 | (crate::string(), term).encode(env) // e.g., {:string, "hello"} 45 | } else { 46 | // Otherwise, tag as :binary and pass original term 47 | (crate::binary(), term).encode(env) // e.g., {:binary, <<1,2,3>>} 48 | } 49 | } 50 | TermType::Integer => (crate::integer(), term).encode(env), // e.g., {:integer, 123} 51 | TermType::Float => (crate::float(), term).encode(env), // e.g., {:float, 1.23} 52 | TermType::List => (crate::list(), term).encode(env), // e.g., {:list, [1,2]} 53 | TermType::Map => (crate::map(), term).encode(env), // e.g., {:map, %{a: 1}} 54 | TermType::Fun => (crate::function(), term).encode(env), // e.g., {:function, &fun/0} (opaque) 55 | TermType::Pid => (crate::pid(), term).encode(env), // e.g., {:pid, #Pid<...>} (opaque) 56 | TermType::Port => (crate::port(), term).encode(env), // e.g., {:port, #Port<...>} (opaque) 57 | TermType::Ref => (crate::reference(), term).encode(env), // e.g., {:reference, #Reference<...>} (opaque) 58 | TermType::Tuple => (crate::tuple(), term).encode(env), // e.g., {:tuple, {1,2}} 59 | TermType::Unknown => { 60 | (crate::unknown(), format!("Unknown TermType: {term:?}")).encode(env) 61 | } 62 | } 63 | } 64 | 65 | #[inline] 66 | pub(crate) fn singular_ok_or_error_tuple<'a>( 67 | env: Env<'a>, 68 | operation_result: Result<(), XqliteError>, 69 | ) -> Term<'a> { 70 | match operation_result { 71 | // Returns only `:ok` to Elixir 72 | Ok(()) => ok().encode(env), 73 | // Returns `{:error, err}` to Elixir 74 | Err(err) => (error(), err).encode(env), 75 | } 76 | } 77 | 78 | pub(crate) fn process_rows<'a, 'rows>( 79 | env: Env<'a>, 80 | mut rows: Rows<'rows>, 81 | column_count: usize, 82 | ) -> Result>>, XqliteError> { 83 | let mut results: Vec>> = Vec::new(); 84 | 85 | loop { 86 | let row_option_result = rows.next(); 87 | 88 | match row_option_result { 89 | Ok(Some(row)) => { 90 | let mut row_values: Vec> = Vec::with_capacity(column_count); 91 | for i in 0..column_count { 92 | match row.get::(i) { 93 | Ok(val) => { 94 | let term = encode_val(env, val); 95 | row_values.push(term); 96 | } 97 | Err(e) => { 98 | // Check specifically for interruption *during column fetch* 99 | if e.to_string() == "interrupted" { 100 | return Err(XqliteError::OperationCancelled); 101 | } 102 | // Check specifically for Utf8Error 103 | if let rusqlite::Error::Utf8Error(utf8_err) = e { 104 | return Err(XqliteError::Utf8Error { 105 | reason: utf8_err.to_string(), 106 | }); 107 | } 108 | // Otherwise, map to CannotFetchRow 109 | return Err(XqliteError::CannotFetchRow(format!( 110 | "Error getting value for column {i}: {e}" 111 | ))); 112 | } 113 | }; 114 | } 115 | results.push(row_values); 116 | } 117 | Ok(None) => { 118 | break; // End of rows 119 | } 120 | Err(e) => { 121 | // Check specifically for interruption *during row iteration* 122 | if e.to_string() == "interrupted" { 123 | return Err(XqliteError::OperationCancelled); 124 | } 125 | // Check specifically for Utf8Error during iteration 126 | if let rusqlite::Error::Utf8Error(utf8_err) = e { 127 | return Err(XqliteError::Utf8Error { 128 | reason: utf8_err.to_string(), 129 | }); 130 | } 131 | // Otherwise, map other iteration errors to CannotFetchRow 132 | return Err(XqliteError::CannotFetchRow(format!( 133 | "Error advancing row iterator: {e}" 134 | ))); 135 | } 136 | } 137 | } 138 | Ok(results) 139 | } 140 | 141 | fn elixir_term_to_rusqlite_value<'a>( 142 | env: Env<'a>, 143 | term: Term<'a>, 144 | ) -> Result { 145 | let make_convert_error = |term: Term<'a>, err: RustlerError| -> XqliteError { 146 | XqliteError::CannotConvertToSqliteValue { 147 | value_str: format!("{term:?}"), 148 | reason: format!("{err:?}"), 149 | } 150 | }; 151 | let term_type = term.get_type(); 152 | match term_type { 153 | TermType::Atom => { 154 | if term == nil().to_term(env) { 155 | Ok(Value::Null) 156 | } else if term == true_().to_term(env) { 157 | Ok(Value::Integer(1)) 158 | } else if term == false_().to_term(env) { 159 | Ok(Value::Integer(0)) 160 | } else { 161 | Err(XqliteError::UnsupportedAtom { 162 | atom_value: term 163 | .atom_to_string() 164 | .unwrap_or_else(|_| format!("{term:?}")), 165 | }) 166 | } 167 | } 168 | TermType::Integer => term 169 | .decode::() 170 | .map(Value::Integer) 171 | .map_err(|e| make_convert_error(term, e)), 172 | TermType::Float => term 173 | .decode::() 174 | .map(Value::Real) 175 | .map_err(|e| make_convert_error(term, e)), 176 | TermType::Binary => match term.decode::() { 177 | Ok(s) => Ok(Value::Text(s)), 178 | Err(_string_decode_err) => match term.decode::() { 179 | Ok(bin) => Ok(Value::Blob(bin.as_slice().to_vec())), 180 | Err(binary_decode_err) => Err(make_convert_error(term, binary_decode_err)), 181 | }, 182 | }, 183 | _ => Err(XqliteError::UnsupportedDataType { term_type }), 184 | } 185 | } 186 | 187 | pub(crate) fn decode_exec_keyword_params<'a>( 188 | env: Env<'a>, 189 | list_term: Term<'a>, 190 | ) -> Result, XqliteError> { 191 | let iter: ListIterator<'a> = 192 | list_term 193 | .decode() 194 | .map_err(|_| XqliteError::ExpectedKeywordList { 195 | value_str: format!("{list_term:?}"), 196 | })?; 197 | let mut params: Vec<(String, Value)> = Vec::new(); 198 | for term_item in iter { 199 | let (key_atom, value_term): (Atom, Term<'a>) = 200 | term_item 201 | .decode() 202 | .map_err(|_| XqliteError::ExpectedKeywordTuple { 203 | value_str: format!("{term_item:?}"), 204 | })?; 205 | let mut key_string: String = key_atom 206 | .to_term(env) 207 | .atom_to_string() 208 | .map_err(|e| XqliteError::CannotConvertAtomToString(format!("{e:?}")))?; 209 | key_string.insert(0, ':'); 210 | let rusqlite_value = elixir_term_to_rusqlite_value(env, value_term)?; 211 | params.push((key_string, rusqlite_value)); 212 | } 213 | Ok(params) 214 | } 215 | 216 | pub(crate) fn decode_plain_list_params<'a>( 217 | env: Env<'a>, 218 | list_term: Term<'a>, 219 | ) -> Result, XqliteError> { 220 | let iter: ListIterator<'a> = 221 | list_term.decode().map_err(|_| XqliteError::ExpectedList { 222 | value_str: format!("{list_term:?}"), 223 | })?; 224 | let mut values = Vec::new(); 225 | for term in iter { 226 | values.push(elixir_term_to_rusqlite_value(env, term)?); 227 | } 228 | Ok(values) 229 | } 230 | 231 | pub(crate) fn format_term_for_pragma<'a>( 232 | env: Env<'a>, 233 | term: Term<'a>, 234 | ) -> Result { 235 | let term_type = term.get_type(); 236 | match term_type { 237 | TermType::Atom => { 238 | if term == nil().to_term(env) { 239 | Ok("NULL".to_string()) 240 | } else if term == true_().to_term(env) { 241 | Ok("ON".to_string()) 242 | } else if term == false_().to_term(env) { 243 | Ok("OFF".to_string()) 244 | } else { 245 | term.atom_to_string() 246 | .map_err(|e| XqliteError::CannotConvertAtomToString(format!("{e:?}"))) 247 | } 248 | } 249 | TermType::Integer => term.decode::().map(|i| i.to_string()).map_err(|e| { 250 | XqliteError::CannotConvertToSqliteValue { 251 | value_str: format!("{term:?}"), 252 | reason: format!("{e:?}"), 253 | } 254 | }), 255 | // Floats are usually not set via PRAGMA, but handle just in case 256 | TermType::Float => term.decode::().map(|f| f.to_string()).map_err(|e| { 257 | XqliteError::CannotConvertToSqliteValue { 258 | value_str: format!("{term:?}"), 259 | reason: format!("{e:?}"), 260 | } 261 | }), 262 | // Binaries interpreted as Strings, need single quotes 263 | TermType::Binary => term 264 | .decode::() 265 | .map(|s| format!("'{}'", s.replace('\'', "''"))) 266 | .map_err(|e| XqliteError::CannotConvertToSqliteValue { 267 | value_str: format!("{term:?}"), 268 | reason: format!("Failed to decode binary as string for PRAGMA: {e:?}"), 269 | }), 270 | _ => Err(XqliteError::UnsupportedDataType { term_type }), 271 | } 272 | } 273 | 274 | pub(crate) fn is_keyword<'a>(list_term: Term<'a>) -> bool { 275 | match list_term.decode::>() { 276 | Ok(mut iter) => match iter.next() { 277 | Some(first_el) => first_el.decode::<(Atom, Term<'a>)>().is_ok(), 278 | None => false, 279 | }, 280 | Err(_) => false, 281 | } 282 | } 283 | 284 | #[inline] 285 | pub(crate) fn quote_identifier(name: &str) -> String { 286 | format!("'{}'", name.replace('\'', "''")) 287 | } 288 | 289 | #[inline] 290 | pub(crate) fn quote_savepoint_name(name: &str) -> String { 291 | format!("'{}'", name.replace('\'', "''")) 292 | } 293 | 294 | // This function is marked unsafe because it dereferences raw pointers (stmt_ptr) 295 | // and calls FFI functions that are inherently unsafe. The caller (stream_fetch) 296 | // must ensure stmt_ptr is valid and points to a statement that has been 297 | // successfully stepped to SQLITE_ROW. 298 | pub(crate) unsafe fn sqlite_row_to_elixir_terms( 299 | env: Env<'_>, 300 | stmt_ptr: *mut ffi::sqlite3_stmt, 301 | column_count: usize, 302 | ) -> Result>, XqliteError> { 303 | let mut row_values = Vec::with_capacity(column_count); 304 | for i in 0..column_count { 305 | let col_idx = i as std::os::raw::c_int; 306 | let col_type = ffi::sqlite3_column_type(stmt_ptr, col_idx); 307 | let term = match col_type { 308 | ffi::SQLITE_INTEGER => { 309 | let val = ffi::sqlite3_column_int64(stmt_ptr, col_idx); 310 | val.encode(env) 311 | } 312 | ffi::SQLITE_FLOAT => { 313 | let val = ffi::sqlite3_column_double(stmt_ptr, col_idx); 314 | val.encode(env) 315 | } 316 | ffi::SQLITE_TEXT => { 317 | let s_ptr = ffi::sqlite3_column_text(stmt_ptr, col_idx); 318 | if s_ptr.is_null() { 319 | return Err(XqliteError::InternalEncodingError { 320 | context: format!( 321 | "SQLite TEXT column pointer was null for column index {i}" 322 | ), 323 | }); 324 | } 325 | let len = ffi::sqlite3_column_bytes(stmt_ptr, col_idx); 326 | let text_slice = std::slice::from_raw_parts(s_ptr, len as usize); 327 | match std::str::from_utf8(text_slice) { 328 | Ok(s) => s.encode(env), 329 | Err(utf8_err) => { 330 | return Err(XqliteError::Utf8Error { 331 | reason: format!( 332 | "Invalid UTF-8 sequence in TEXT column index {i}: {utf8_err}" 333 | ), 334 | }); 335 | } 336 | } 337 | } 338 | ffi::SQLITE_BLOB => { 339 | let b_ptr = ffi::sqlite3_column_blob(stmt_ptr, col_idx); 340 | let len = ffi::sqlite3_column_bytes(stmt_ptr, col_idx) as usize; 341 | if b_ptr.is_null() { 342 | if len == 0 { 343 | let empty_bin = OwnedBinary::new(0).ok_or_else(|| { 344 | XqliteError::InternalEncodingError { 345 | context: "Failed to allocate 0-byte OwnedBinary".to_string(), 346 | } 347 | })?; 348 | // For an empty OwnedBinary, no copy is needed after creation. 349 | empty_bin.release(env).encode(env) 350 | } else { 351 | return Err(XqliteError::InternalEncodingError { 352 | context: format!("SQLite BLOB column pointer was null for non-empty blob (column index {i})"), 353 | }); 354 | } 355 | } else { 356 | let data_slice = std::slice::from_raw_parts(b_ptr as *const u8, len); 357 | let mut bin = OwnedBinary::new(len).ok_or_else(|| { 358 | XqliteError::InternalEncodingError { 359 | context: format!( 360 | "Failed to allocate {len}-byte OwnedBinary for blob" 361 | ), 362 | } 363 | })?; 364 | // Use deref_mut to get &mut [u8] to copy into. 365 | bin.deref_mut().copy_from_slice(data_slice); 366 | bin.release(env).encode(env) 367 | } 368 | } 369 | ffi::SQLITE_NULL => nil().encode(env), // Corrected 370 | _ => { 371 | return Err(XqliteError::InternalEncodingError { 372 | context: format!( 373 | "Unknown SQLite column type: {col_type} for column index {i}" 374 | ), 375 | }); 376 | } 377 | }; 378 | row_values.push(term); 379 | } 380 | Ok(row_values) 381 | } 382 | 383 | pub(crate) fn with_conn( 384 | handle: &ResourceArc, 385 | func: F, 386 | ) -> Result 387 | where 388 | F: FnOnce(&Connection) -> Result, 389 | { 390 | let conn_guard = handle 391 | .0 392 | .lock() 393 | .map_err(|e| XqliteError::LockError(e.to_string()))?; 394 | func(&conn_guard) 395 | } 396 | -------------------------------------------------------------------------------- /test/schema_introspection_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Xqlite.SchemaIntrospectionTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Xqlite.TestUtil, only: [connection_openers: 0, find_opener_mfa!: 1] 5 | alias XqliteNIF, as: NIF 6 | alias Xqlite.Schema 7 | 8 | @schema_ddl ~S""" 9 | CREATE TABLE categories ( cat_id INTEGER PRIMARY KEY, name TEXT UNIQUE NOT NULL, description TEXT ); 10 | CREATE TABLE users ( user_id INTEGER PRIMARY KEY, category_id INTEGER REFERENCES categories(cat_id) ON DELETE SET NULL ON UPDATE CASCADE, full_name TEXT NOT NULL, email TEXT UNIQUE, balance REAL DEFAULT 0.0, config BLOB ); 11 | CREATE INDEX idx_users_email_desc ON users(email DESC); 12 | CREATE INDEX idx_users_name_lower ON users(LOWER(full_name)); 13 | CREATE TABLE items ( sku TEXT PRIMARY KEY, description TEXT, value REAL CHECK(value > 0) ) WITHOUT ROWID; 14 | CREATE TABLE user_items ( user_id INTEGER NOT NULL REFERENCES users(user_id) ON DELETE CASCADE, item_sku TEXT NOT NULL REFERENCES items(sku), quantity INTEGER DEFAULT 1, PRIMARY KEY (user_id, item_sku) ); 15 | CREATE VIEW person_view AS SELECT user_id, full_name FROM users; 16 | CREATE TRIGGER item_value_trigger AFTER UPDATE ON items BEGIN UPDATE items SET description = 'Updated' WHERE item_id = NEW.item_id; END; 17 | INSERT INTO categories (cat_id, name) VALUES (10, 'Electronics'), (20, 'Books'); 18 | INSERT INTO users (user_id, category_id, full_name, email, balance) VALUES (1, 10, 'Alice Alpha', 'alice@example.com', 100.50), (2, 20, 'Bob Beta', 'bob@example.com', 0.0); 19 | INSERT INTO items (sku, description, value) VALUES ('ITEM001', 'Laptop', 1200.00), ('ITEM002', 'Guide Book', 25.50); 20 | INSERT INTO user_items (user_id, item_sku, quantity) VALUES (1, 'ITEM002', 2); 21 | """ 22 | 23 | # --- Helper Functions --- 24 | defp sort_by_name(list), do: Enum.sort_by(list, & &1.name) 25 | defp sort_by_id_seq(list), do: Enum.sort_by(list, &{&1.id, &1.column_sequence}) 26 | # Removed unused sort_by_seq 27 | 28 | # --- Shared test code --- 29 | for {type_tag, prefix, _opener_mfa_ignored_here} <- connection_openers() do 30 | describe "using #{prefix}" do 31 | @describetag type_tag 32 | 33 | setup context do 34 | {mod, fun, args} = find_opener_mfa!(context) 35 | 36 | assert {:ok, conn} = apply(mod, fun, args), 37 | "Failed opening for :#{context[:describetag]}" 38 | 39 | assert :ok = NIF.execute_batch(conn, @schema_ddl) 40 | on_exit(fn -> NIF.close(conn) end) 41 | {:ok, conn: conn} 42 | end 43 | 44 | # --- Shared test cases --- 45 | 46 | test "schema_list_objects lists user tables and views", %{conn: conn} do 47 | # Expectation unchanged, was correct 48 | expected_objects = 49 | [ 50 | %Schema.SchemaObjectInfo{ 51 | schema: "main", 52 | name: "categories", 53 | object_type: :table, 54 | column_count: 3, 55 | is_without_rowid: false, 56 | strict: false 57 | }, 58 | %Schema.SchemaObjectInfo{ 59 | schema: "main", 60 | name: "items", 61 | object_type: :table, 62 | column_count: 3, 63 | is_without_rowid: true, 64 | strict: false 65 | }, 66 | %Schema.SchemaObjectInfo{ 67 | schema: "main", 68 | name: "person_view", 69 | object_type: :view, 70 | column_count: 2, 71 | is_without_rowid: false, 72 | strict: false 73 | }, 74 | %Schema.SchemaObjectInfo{ 75 | schema: "main", 76 | name: "user_items", 77 | object_type: :table, 78 | column_count: 3, 79 | is_without_rowid: false, 80 | strict: false 81 | }, 82 | %Schema.SchemaObjectInfo{ 83 | schema: "main", 84 | name: "users", 85 | object_type: :table, 86 | column_count: 6, 87 | is_without_rowid: false, 88 | strict: false 89 | } 90 | ] 91 | |> sort_by_name() 92 | 93 | assert {:ok, actual_objects_unsorted} = NIF.schema_list_objects(conn, "main") 94 | 95 | actual_objects_sorted = 96 | Enum.filter(actual_objects_unsorted, fn obj -> obj.name not in ["sqlite_schema"] end) 97 | |> sort_by_name() 98 | 99 | assert actual_objects_sorted == expected_objects 100 | end 101 | 102 | test "schema_columns returns info for 'users' table", %{conn: conn} do 103 | # Expectation unchanged, was correct 104 | expected_columns = [ 105 | %Schema.ColumnInfo{ 106 | column_id: 0, 107 | name: "user_id", 108 | type_affinity: :integer, 109 | declared_type: "INTEGER", 110 | nullable: true, 111 | default_value: nil, 112 | primary_key_index: 1, 113 | hidden_kind: :normal 114 | }, 115 | %Schema.ColumnInfo{ 116 | column_id: 1, 117 | name: "category_id", 118 | type_affinity: :integer, 119 | declared_type: "INTEGER", 120 | nullable: true, 121 | default_value: nil, 122 | primary_key_index: 0, 123 | hidden_kind: :normal 124 | }, 125 | %Schema.ColumnInfo{ 126 | column_id: 2, 127 | name: "full_name", 128 | type_affinity: :text, 129 | declared_type: "TEXT", 130 | nullable: false, 131 | default_value: nil, 132 | primary_key_index: 0, 133 | hidden_kind: :normal 134 | }, 135 | %Schema.ColumnInfo{ 136 | column_id: 3, 137 | name: "email", 138 | type_affinity: :text, 139 | declared_type: "TEXT", 140 | nullable: true, 141 | default_value: nil, 142 | primary_key_index: 0, 143 | hidden_kind: :normal 144 | }, 145 | %Schema.ColumnInfo{ 146 | column_id: 4, 147 | name: "balance", 148 | type_affinity: :float, 149 | declared_type: "REAL", 150 | nullable: true, 151 | default_value: "0.0", 152 | primary_key_index: 0, 153 | hidden_kind: :normal 154 | }, 155 | %Schema.ColumnInfo{ 156 | column_id: 5, 157 | name: "config", 158 | type_affinity: :binary, 159 | declared_type: "BLOB", 160 | nullable: true, 161 | default_value: nil, 162 | primary_key_index: 0, 163 | hidden_kind: :normal 164 | } 165 | ] 166 | 167 | assert {:ok, expected_columns} == NIF.schema_columns(conn, "users") 168 | end 169 | 170 | test "schema_columns returns info for WITHOUT ROWID table ('items')", %{conn: conn} do 171 | expected_columns = [ 172 | %Schema.ColumnInfo{ 173 | column_id: 0, 174 | name: "sku", 175 | type_affinity: :text, 176 | declared_type: "TEXT", 177 | nullable: false, 178 | default_value: nil, 179 | primary_key_index: 1, 180 | hidden_kind: :normal 181 | }, 182 | %Schema.ColumnInfo{ 183 | column_id: 1, 184 | name: "description", 185 | type_affinity: :text, 186 | declared_type: "TEXT", 187 | nullable: true, 188 | default_value: nil, 189 | primary_key_index: 0, 190 | hidden_kind: :normal 191 | }, 192 | # nullable: true for value column (CHECK > 0 doesn't imply NOT NULL) 193 | %Schema.ColumnInfo{ 194 | column_id: 2, 195 | name: "value", 196 | type_affinity: :float, 197 | declared_type: "REAL", 198 | nullable: true, 199 | default_value: nil, 200 | primary_key_index: 0, 201 | hidden_kind: :normal 202 | } 203 | ] 204 | 205 | assert {:ok, expected_columns} == NIF.schema_columns(conn, "items") 206 | end 207 | 208 | test "schema_foreign_keys returns info for join table ('user_items')", %{conn: conn} do 209 | expected_fks = [ 210 | # ID 0 actually references items 211 | %Schema.ForeignKeyInfo{ 212 | id: 0, 213 | column_sequence: 0, 214 | target_table: "items", 215 | from_column: "item_sku", 216 | to_column: "sku", 217 | on_update: :no_action, 218 | on_delete: :no_action, 219 | match_clause: :none 220 | }, 221 | # ID 1 actually references users 222 | %Schema.ForeignKeyInfo{ 223 | id: 1, 224 | column_sequence: 0, 225 | target_table: "users", 226 | from_column: "user_id", 227 | to_column: "user_id", 228 | on_update: :no_action, 229 | on_delete: :cascade, 230 | match_clause: :none 231 | } 232 | ] 233 | 234 | # Already sorted by {id, seq} because we define it that way 235 | 236 | assert {:ok, actual_fks} = NIF.schema_foreign_keys(conn, "user_items") 237 | # Sort actual results and compare to the pre-sorted expected list 238 | assert sort_by_id_seq(actual_fks) == expected_fks 239 | end 240 | 241 | test "schema_indexes returns info including implicit and explicit", %{conn: conn} do 242 | # Check 'users' table indexes 243 | expected_users_indexes = 244 | [ 245 | # UNIQUE(email) 246 | %Schema.IndexInfo{ 247 | name: "sqlite_autoindex_users_1", 248 | unique: true, 249 | origin: :unique_constraint, 250 | partial: false 251 | }, 252 | %Schema.IndexInfo{ 253 | name: "idx_users_email_desc", 254 | unique: false, 255 | origin: :create_index, 256 | partial: false 257 | }, 258 | %Schema.IndexInfo{ 259 | name: "idx_users_name_lower", 260 | unique: false, 261 | origin: :create_index, 262 | partial: false 263 | } 264 | ] 265 | |> sort_by_name() 266 | 267 | assert {:ok, actual_users_idx} = NIF.schema_indexes(conn, "users") 268 | assert sort_by_name(actual_users_idx) == expected_users_indexes 269 | 270 | # Check 'items' table indexes (WITHOUT ROWID PK) 271 | expected_items_indexes = [ 272 | %Schema.IndexInfo{ 273 | name: "sqlite_autoindex_items_1", 274 | unique: true, 275 | origin: :primary_key_constraint, 276 | partial: false 277 | } 278 | ] 279 | 280 | assert {:ok, ^expected_items_indexes} = NIF.schema_indexes(conn, "items") 281 | 282 | # Check 'user_items' table indexes (Compound PK) 283 | expected_user_items_indexes = [ 284 | %Schema.IndexInfo{ 285 | name: "sqlite_autoindex_user_items_1", 286 | unique: true, 287 | origin: :primary_key_constraint, 288 | partial: false 289 | } 290 | ] 291 | 292 | assert {:ok, ^expected_user_items_indexes} = NIF.schema_indexes(conn, "user_items") 293 | end 294 | 295 | test "schema_index_columns returns info for various index types", %{conn: conn} do 296 | # Explicit DESC index on users(email) 297 | expected_desc = [ 298 | %Schema.IndexColumnInfo{ 299 | index_column_sequence: 0, 300 | table_column_id: 3, 301 | name: "email", 302 | sort_order: :desc, 303 | collation: "BINARY", 304 | is_key_column: true 305 | }, 306 | %Schema.IndexColumnInfo{ 307 | index_column_sequence: 1, 308 | table_column_id: -1, 309 | name: nil, 310 | sort_order: :asc, 311 | collation: "BINARY", 312 | is_key_column: false 313 | } 314 | ] 315 | 316 | assert {:ok, ^expected_desc} = NIF.schema_index_columns(conn, "idx_users_email_desc") 317 | 318 | # Compound PK index on user_items(user_id, item_sku) 319 | expected_compound = [ 320 | %Schema.IndexColumnInfo{ 321 | index_column_sequence: 0, 322 | table_column_id: 0, 323 | name: "user_id", 324 | sort_order: :asc, 325 | collation: "BINARY", 326 | is_key_column: true 327 | }, 328 | %Schema.IndexColumnInfo{ 329 | index_column_sequence: 1, 330 | table_column_id: 1, 331 | name: "item_sku", 332 | sort_order: :asc, 333 | collation: "BINARY", 334 | is_key_column: true 335 | }, 336 | %Schema.IndexColumnInfo{ 337 | index_column_sequence: 2, 338 | table_column_id: -1, 339 | name: nil, 340 | sort_order: :asc, 341 | collation: "BINARY", 342 | is_key_column: false 343 | } 344 | ] 345 | 346 | assert {:ok, ^expected_compound} = 347 | NIF.schema_index_columns(conn, "sqlite_autoindex_user_items_1") 348 | 349 | # Index on expression users(LOWER(full_name)) 350 | expected_expr = [ 351 | # table_column_id for expression is -2 352 | %Schema.IndexColumnInfo{ 353 | index_column_sequence: 0, 354 | table_column_id: -2, 355 | name: nil, 356 | sort_order: :asc, 357 | collation: "BINARY", 358 | is_key_column: true 359 | }, 360 | %Schema.IndexColumnInfo{ 361 | index_column_sequence: 1, 362 | table_column_id: -1, 363 | name: nil, 364 | sort_order: :asc, 365 | collation: "BINARY", 366 | is_key_column: false 367 | } 368 | ] 369 | 370 | assert {:ok, ^expected_expr} = NIF.schema_index_columns(conn, "idx_users_name_lower") 371 | end 372 | 373 | test "get_create_sql returns original SQL for various objects", %{conn: conn} do 374 | # Table 375 | assert {:ok, sql_users} = NIF.get_create_sql(conn, "users") 376 | assert is_binary(sql_users) and String.starts_with?(sql_users, "CREATE TABLE users") 377 | # Index 378 | assert {:ok, sql_idx} = NIF.get_create_sql(conn, "idx_users_email_desc") 379 | 380 | assert is_binary(sql_idx) and 381 | String.contains?(sql_idx, "CREATE INDEX idx_users_email_desc") 382 | 383 | # View 384 | assert {:ok, sql_view} = NIF.get_create_sql(conn, "person_view") 385 | assert is_binary(sql_view) and String.starts_with?(sql_view, "CREATE VIEW") 386 | # Trigger 387 | assert {:ok, sql_trigger} = NIF.get_create_sql(conn, "item_value_trigger") 388 | assert is_binary(sql_trigger) and String.starts_with?(sql_trigger, "CREATE TRIGGER") 389 | end 390 | 391 | # --- Tests for "Not Found" cases --- 392 | test "schema_columns returns empty list for non-existent table", %{conn: conn} do 393 | assert {:ok, []} == NIF.schema_columns(conn, "non_existent_table") 394 | end 395 | 396 | test "schema_foreign_keys returns empty list for non-existent table", %{conn: conn} do 397 | assert {:ok, []} == NIF.schema_foreign_keys(conn, "non_existent_table") 398 | end 399 | 400 | test "schema_indexes returns empty list for non-existent table", %{conn: conn} do 401 | assert {:ok, []} == NIF.schema_indexes(conn, "non_existent_table") 402 | end 403 | 404 | test "schema_index_columns returns empty list for non-existent index", %{conn: conn} do 405 | assert {:ok, []} == NIF.schema_index_columns(conn, "non_existent_index") 406 | end 407 | 408 | test "get_create_sql returns nil for non-existent object", %{conn: conn} do 409 | assert {:ok, nil} == NIF.get_create_sql(conn, "non_existent_object") 410 | end 411 | 412 | test "schema_columns handles various declared types and resolves correct affinity", %{ 413 | conn: conn 414 | } do 415 | # This DDL tests SQLite's type affinity rules for columns with: 416 | # 1. No declared type ('c_no_type'): Should default to BLOB affinity. 417 | # PRAGMA table_xinfo reports its 'type' as an empty string. 418 | # 2. A common keyword not having specific affinity rules ('c_boolean_keyword BOOLEAN'): 419 | # Should default to NUMERIC affinity. 420 | # 3. Another common keyword without specific affinity ('c_datetime_keyword DATETIME'): 421 | # Should default to NUMERIC affinity. 422 | # 4. A completely custom/unrecognized type name ('c_funky_type "VERY STRANGE NAME"'): 423 | # Should also default to NUMERIC affinity. 424 | ddl = """ 425 | CREATE TABLE type_affinity_examples ( 426 | c_no_type, 427 | c_boolean_keyword BOOLEAN, 428 | c_datetime_keyword DATETIME, 429 | c_funky_type "VERY STRANGE NAME" 430 | ); 431 | """ 432 | 433 | # DDL execution can return non-zero for "rows affected" 434 | assert {:ok, _} = NIF.execute(conn, ddl, []) 435 | 436 | assert {:ok, columns_info} = NIF.schema_columns(conn, "type_affinity_examples") 437 | 438 | no_type_col = Enum.find(columns_info, &(&1.name == "c_no_type")) 439 | boolean_col = Enum.find(columns_info, &(&1.name == "c_boolean_keyword")) 440 | datetime_col = Enum.find(columns_info, &(&1.name == "c_datetime_keyword")) 441 | funky_col = Enum.find(columns_info, &(&1.name == "c_funky_type")) 442 | 443 | refute is_nil(no_type_col) 444 | assert no_type_col.declared_type == "" 445 | assert no_type_col.type_affinity == :binary 446 | 447 | refute is_nil(boolean_col) 448 | assert boolean_col.declared_type == "BOOLEAN" 449 | assert boolean_col.type_affinity == :numeric 450 | 451 | refute is_nil(datetime_col) 452 | assert datetime_col.declared_type == "DATETIME" 453 | assert datetime_col.type_affinity == :numeric 454 | 455 | refute is_nil(funky_col) 456 | assert funky_col.declared_type == "VERY STRANGE NAME" 457 | assert funky_col.type_affinity == :numeric 458 | end 459 | end 460 | 461 | # end describe "using #{prefix}" 462 | end 463 | 464 | # end `for` loop 465 | 466 | # --- DB type-specific or other tests (outside the `for` loop) --- 467 | # None currently identified for schema introspection 468 | end 469 | --------------------------------------------------------------------------------