├── data ├── db │ ├── fts.db │ ├── borked.db │ ├── count.db │ ├── float.db │ ├── rtree.db │ ├── sakila.db │ ├── zeropk.db │ ├── boolean.db │ ├── chinook.db │ ├── decimal.db │ ├── issue44.db │ ├── issue47.db │ ├── norowid.db │ ├── emptytype.db │ ├── longvarchar.db │ ├── negativepk.db │ └── mixed_data_numeric.db └── sql │ ├── tpch-export.duckdb │ ├── bench.sql │ ├── tpch-create.sqlite │ └── sakila-examples.sql ├── .gitignore ├── .gitmodules ├── test └── sql │ ├── scanner │ ├── longvarchar.test │ ├── float.test │ ├── decimal.test │ ├── count_star.test │ ├── emptytype.test │ ├── issue44.test │ ├── mixed_numeric.test │ ├── borked.test │ ├── tpch.test │ └── chinook.test │ └── storage │ ├── attach_checkpoint.test │ ├── fts.test │ ├── attach_zero_negative_pk.test │ ├── attach_norowid.test │ ├── issue47.test │ ├── attach_rowid.test │ ├── issue53.test │ ├── rtree.test │ ├── attach_create_if_exists.test │ ├── attach_concurrent_clients.test │ ├── attach_explain.test │ ├── attach_options.test │ ├── attach_use.test │ ├── attach_detach.test │ ├── attach_big.test_slow │ ├── attach_database_list.test │ ├── attach_in_memory.test │ ├── attach_drop.test │ ├── attach_mixed_numeric.test │ ├── attach_delete_issue.test │ ├── attach_simple.test │ ├── attach_prefix.test │ ├── attach_read_only.test │ ├── attach_on_conflict.test │ ├── attach_describe.test │ ├── attach_defaults.test │ ├── attach_keywords.test │ ├── attach_views.test │ ├── attach_catalog.test │ ├── attach_types.test │ ├── attach_create_index.test │ ├── attach_schema_functions.test │ ├── attach_delete.test │ ├── attach_alter.test │ ├── attach_update.test │ ├── attach_transactions.test │ └── attach_constraints.test ├── src ├── sqlite │ └── CMakeLists.txt ├── CMakeLists.txt ├── storage │ ├── CMakeLists.txt │ ├── sqlite_index_entry.cpp │ ├── sqlite_transaction_manager.cpp │ ├── sqlite_index.cpp │ ├── sqlite_table_entry.cpp │ ├── sqlite_catalog.cpp │ ├── sqlite_delete.cpp │ ├── sqlite_transaction.cpp │ ├── sqlite_update.cpp │ ├── sqlite_insert.cpp │ └── sqlite_schema_entry.cpp ├── include │ ├── sqlite_storage.hpp │ ├── storage │ │ ├── sqlite_options.hpp │ │ ├── sqlite_index_entry.hpp │ │ ├── sqlite_index.hpp │ │ ├── sqlite_table_entry.hpp │ │ ├── sqlite_transaction_manager.hpp │ │ ├── sqlite_transaction.hpp │ │ ├── sqlite_delete.hpp │ │ ├── sqlite_update.hpp │ │ ├── sqlite_insert.hpp │ │ ├── sqlite_schema_entry.hpp │ │ └── sqlite_catalog.hpp │ ├── sqlite_scanner_extension.hpp │ ├── sqlite_utils.hpp │ ├── sqlite_scanner.hpp │ ├── sqlite_db.hpp │ └── sqlite_stmt.hpp ├── sqlite_extension.cpp ├── sqlite_storage.cpp ├── sqlite_utils.cpp ├── sqlite_stmt.cpp ├── sqlite_db.cpp └── sqlite_scanner.cpp ├── CMakeLists.txt ├── LICENSE ├── .github ├── workflows │ ├── MainDistributionPipeline.yml │ ├── HighPriorityIssues.yml │ └── _extension_distribution.yml └── ISSUE_TEMPLATE │ └── bug_report.yml ├── Makefile └── README.md /data/db/fts.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/fts.db -------------------------------------------------------------------------------- /data/db/borked.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/borked.db -------------------------------------------------------------------------------- /data/db/count.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/count.db -------------------------------------------------------------------------------- /data/db/float.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/float.db -------------------------------------------------------------------------------- /data/db/rtree.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/rtree.db -------------------------------------------------------------------------------- /data/db/sakila.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/sakila.db -------------------------------------------------------------------------------- /data/db/zeropk.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/zeropk.db -------------------------------------------------------------------------------- /data/db/boolean.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/boolean.db -------------------------------------------------------------------------------- /data/db/chinook.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/chinook.db -------------------------------------------------------------------------------- /data/db/decimal.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/decimal.db -------------------------------------------------------------------------------- /data/db/issue44.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/issue44.db -------------------------------------------------------------------------------- /data/db/issue47.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/issue47.db -------------------------------------------------------------------------------- /data/db/norowid.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/norowid.db -------------------------------------------------------------------------------- /data/db/emptytype.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/emptytype.db -------------------------------------------------------------------------------- /data/db/longvarchar.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/longvarchar.db -------------------------------------------------------------------------------- /data/db/negativepk.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/negativepk.db -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | build 4 | cmake-build-debug 5 | sqlite/build 6 | tpch.db 7 | .clang-format 8 | *.tbl -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "duckdb"] 2 | path = duckdb 3 | url = https://github.com/duckdb/duckdb 4 | branch = main 5 | -------------------------------------------------------------------------------- /data/db/mixed_data_numeric.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/asg017/duckdb_sqlite_scanner/main/data/db/mixed_data_numeric.db -------------------------------------------------------------------------------- /test/sql/scanner/longvarchar.test: -------------------------------------------------------------------------------- 1 | # name: 2 | # description: 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/longvarchar.db'); 9 | 10 | query I 11 | select * from tbl; 12 | ---- 13 | abc 14 | def 15 | -------------------------------------------------------------------------------- /test/sql/storage/attach_checkpoint.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_checkpoint.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_checkpoint.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CHECKPOINT s -------------------------------------------------------------------------------- /test/sql/scanner/float.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/float.test 2 | # description: Test float 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/float.db'); 9 | 10 | query I 11 | select * from my; 12 | ---- 13 | 10.34 14 | 0.042 15 | NULL 16 | 42.0 17 | -------------------------------------------------------------------------------- /test/sql/storage/fts.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/fts.test 2 | # description: Test loading an FTS table 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/fts.db' AS sqlite (TYPE SQLITE) 9 | 10 | query III 11 | SELECT * FROM sqlite.email 12 | ---- 13 | test@gmail.com Title Body 14 | -------------------------------------------------------------------------------- /src/sqlite/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library( 2 | sqlite_lib OBJECT 3 | sqlite3.c) 4 | set(ALL_OBJECT_FILES 5 | ${ALL_OBJECT_FILES} $ 6 | PARENT_SCOPE) 7 | 8 | target_compile_definitions( 9 | sqlite_lib 10 | PUBLIC SQLITE_ENABLE_FTS5 SQLITE_ENABLE_FTS4 SQLITE_ENABLE_FTS3_PARENTHESIS 11 | SQLITE_ENABLE_RTREE) -------------------------------------------------------------------------------- /test/sql/scanner/decimal.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/decimal.test 2 | # description: Test decimal 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/decimal.db'); 9 | 10 | query IIIII 11 | select * from a; 12 | ---- 13 | NULL NULL NULL NULL NULL 14 | 1 0.1 1.2 123.45 12345678.90000 15 | -------------------------------------------------------------------------------- /test/sql/scanner/count_star.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/count_star.test 2 | # description: Test count star 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/count.db'); 9 | 10 | query I 11 | select count(*) from empty_tbl; 12 | ---- 13 | 0 14 | 15 | query I 16 | select count(*) from mysql_databases; 17 | ---- 18 | 10 19 | -------------------------------------------------------------------------------- /test/sql/scanner/emptytype.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/emptytype.test 2 | # description: Test emptytype 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | # load from a database with a column that has no type defined 8 | statement ok 9 | CALL sqlite_attach('data/db/emptytype.db'); 10 | 11 | query I 12 | select * from tbl; 13 | ---- 14 | 42 15 | hello 16 | NULL 17 | 0.5 18 | -------------------------------------------------------------------------------- /src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include_directories(include) 2 | 3 | add_subdirectory(sqlite) 4 | add_subdirectory(storage) 5 | 6 | add_library( 7 | sqlite_ext_library OBJECT 8 | sqlite_db.cpp sqlite_extension.cpp sqlite_scanner.cpp sqlite_stmt.cpp 9 | sqlite_storage.cpp sqlite_utils.cpp) 10 | set(ALL_OBJECT_FILES 11 | ${ALL_OBJECT_FILES} $ 12 | PARENT_SCOPE) 13 | -------------------------------------------------------------------------------- /test/sql/storage/attach_zero_negative_pk.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_zero_negative_pk.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/zeropk.db' AS s 9 | 10 | query II 11 | FROM s.tbl 12 | ---- 13 | 0 NULL 14 | 15 | statement ok 16 | ATTACH 'data/db/negativepk.db' AS s2 17 | 18 | query II 19 | FROM s2.tbl 20 | ---- 21 | -1 NULL 22 | -------------------------------------------------------------------------------- /test/sql/storage/attach_norowid.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_norowid.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/norowid.db' AS s (TYPE SQLITE, READ_ONLY) 9 | 10 | query II 11 | SELECT * FROM s.wordcount 12 | ---- 13 | hello 10 14 | world 5 15 | 16 | query II 17 | SELECT * FROM s.wordcount WHERE word='world' 18 | ---- 19 | world 5 20 | -------------------------------------------------------------------------------- /test/sql/storage/issue47.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/issue47.test 2 | # description: Test issue #47 - Mismatch type error despite sqlite_all_Varchar 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | SET GLOBAL sqlite_all_varchar=true; 9 | 10 | statement ok 11 | ATTACH 'data/db/issue47.db' as mydb (TYPE sqlite); 12 | 13 | query I 14 | SELECT * FROM mydb.table1; 15 | ---- 16 | 1598313600 17 | -------------------------------------------------------------------------------- /data/sql/tpch-export.duckdb: -------------------------------------------------------------------------------- 1 | CALL dbgen(sf=0.1); 2 | COPY nation TO 'nation.tbl' (HEADER FALSE); 3 | COPY region TO 'region.tbl' (HEADER FALSE); 4 | COPY part TO 'part.tbl' (HEADER FALSE); 5 | COPY supplier TO 'supplier.tbl' (HEADER FALSE); 6 | COPY partsupp TO 'partsupp.tbl' (HEADER FALSE); 7 | COPY customer TO 'customer.tbl' (HEADER FALSE); 8 | COPY orders TO 'orders.tbl' (HEADER FALSE); 9 | COPY lineitem TO 'lineitem.tbl' (HEADER FALSE); 10 | -------------------------------------------------------------------------------- /src/storage/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_library( 2 | sqlite_ext_storage OBJECT 3 | sqlite_catalog.cpp 4 | sqlite_delete.cpp 5 | sqlite_index.cpp 6 | sqlite_index_entry.cpp 7 | sqlite_insert.cpp 8 | sqlite_table_entry.cpp 9 | sqlite_schema_entry.cpp 10 | sqlite_transaction.cpp 11 | sqlite_transaction_manager.cpp 12 | sqlite_update.cpp) 13 | set(ALL_OBJECT_FILES 14 | ${ALL_OBJECT_FILES} $ 15 | PARENT_SCOPE) 16 | -------------------------------------------------------------------------------- /test/sql/storage/attach_rowid.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_rowid.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_rowid.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INT); 12 | 13 | statement ok 14 | INSERT INTO s1.test VALUES (42), (84), (100); 15 | 16 | query II 17 | SELECT rowid, * FROM s1.test 18 | ---- 19 | 1 42 20 | 2 84 21 | 3 100 22 | -------------------------------------------------------------------------------- /test/sql/storage/issue53.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/issue53.test 2 | # description: Test issue #53 - BOOLEAN data type is not supported 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/boolean.db' AS sqlite (TYPE SQLITE) 9 | 10 | query II 11 | SELECT name, type FROM pragma_table_info('sqlite.entry') 12 | ---- 13 | id BIGINT 14 | is_active BIGINT 15 | 16 | query II 17 | SELECT * FROM sqlite.entry 18 | ---- 19 | 44 0 20 | 55 1 21 | -------------------------------------------------------------------------------- /test/sql/storage/rtree.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/rtree.test 2 | # description: Test loading an RTree table 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/rtree.db' AS sqlite (TYPE SQLITE) 9 | 10 | query IIIII 11 | SELECT * FROM sqlite.demo_index 12 | ---- 13 | 28215 -80.7812271118164 -80.60470581054688 35.20881271362305 35.297367095947266 14 | 28216 -80.95728302001953 -80.84059143066406 35.23591995239258 35.367828369140625 15 | -------------------------------------------------------------------------------- /test/sql/storage/attach_create_if_exists.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_create_if_exists.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_create_if_exists.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INTEGER); 12 | 13 | statement ok 14 | CREATE TABLE IF NOT EXISTS s1.test(i INTEGER); 15 | 16 | statement ok 17 | CREATE OR REPLACE TABLE s1.test(j INTEGER); 18 | 19 | statement ok 20 | SELECT j FROM s1.test 21 | 22 | -------------------------------------------------------------------------------- /src/include/sqlite_storage.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // sqlite_storage.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/storage/storage_extension.hpp" 12 | 13 | namespace duckdb { 14 | 15 | class SQLiteStorageExtension : public StorageExtension { 16 | public: 17 | SQLiteStorageExtension(); 18 | }; 19 | 20 | } // namespace duckdb 21 | -------------------------------------------------------------------------------- /test/sql/scanner/issue44.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/issue44.test 2 | # description: Test issue #44 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | query IIIIIII 8 | SELECT * FROM sqlite_scan('data/db/issue44.db', 'clusters_and_visits') 9 | ---- 10 | 11 | query IIIIIII 12 | SELECT * FROM sqlite_scan('data/db/issue44.db', 'clusters') 13 | ---- 14 | 15 | statement ok 16 | ATTACH 'data/db/issue44.db' AS test_db 17 | 18 | query IIIIIII 19 | SELECT * FROM test_db.clusters_and_visits 20 | ---- 21 | 22 | query IIIIIII 23 | SELECT * FROM test_db.clusters 24 | ---- 25 | -------------------------------------------------------------------------------- /test/sql/storage/attach_concurrent_clients.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_concurrent_clients.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_concurrentclients.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.tbl(i INTEGER); 12 | 13 | # just run a bunch of queries - these may or may not work but they at least should not crash 14 | concurrentloop i 0 10 15 | 16 | loop k 0 10 17 | 18 | statement maybe 19 | INSERT INTO s1.tbl VALUES (${i} + ${k}) 20 | ---- 21 | database is locked 22 | 23 | endloop 24 | 25 | endloop 26 | -------------------------------------------------------------------------------- /test/sql/storage/attach_explain.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_explain.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_explain.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | EXPLAIN CREATE TABLE s1.test(i INTEGER); 12 | 13 | statement ok 14 | CREATE TABLE s1.test(i INTEGER); 15 | 16 | statement ok 17 | EXPLAIN INSERT INTO s1.test VALUES (3) 18 | 19 | statement ok 20 | INSERT INTO s1.test VALUES (3) 21 | 22 | statement ok 23 | EXPLAIN SELECT * FROM s1.test 24 | 25 | statement ok 26 | EXPLAIN CREATE TABLE s1.test AS SELECT 42 27 | -------------------------------------------------------------------------------- /test/sql/storage/attach_options.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_options.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement error 8 | ATTACH ':memory:' AS mem (TYPE SQLITE, BUSY_TIMEOUT 'hello') 9 | ---- 10 | Could not convert string 11 | 12 | statement error 13 | ATTACH ':memory:' AS mem (TYPE SQLITE, BUSY_TIMEOUT 99999999999) 14 | ---- 15 | busy_timeout out of range 16 | 17 | statement ok 18 | ATTACH ':memory:' AS mem (TYPE SQLITE, BUSY_TIMEOUT 0) 19 | 20 | statement ok 21 | DETACH mem 22 | 23 | statement ok 24 | ATTACH ':memory:' AS mem (TYPE SQLITE, JOURNAL_MODE 'WAL') 25 | -------------------------------------------------------------------------------- /src/storage/sqlite_index_entry.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_index_entry.hpp" 2 | #include "duckdb/catalog/catalog_entry/schema_catalog_entry.hpp" 3 | 4 | namespace duckdb { 5 | 6 | SQLiteIndexEntry::SQLiteIndexEntry(Catalog &catalog, SchemaCatalogEntry &schema, CreateIndexInfo &info, 7 | string table_name_p) 8 | : IndexCatalogEntry(catalog, schema, info), table_name(std::move(table_name_p)) { 9 | } 10 | 11 | string SQLiteIndexEntry::GetSchemaName() const { 12 | return schema.name; 13 | } 14 | 15 | string SQLiteIndexEntry::GetTableName() const { 16 | return table_name; 17 | } 18 | 19 | } // namespace duckdb 20 | -------------------------------------------------------------------------------- /test/sql/storage/attach_use.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_use.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_use.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | USE s; 12 | 13 | statement ok 14 | CREATE TABLE t(column1 INTEGER); 15 | 16 | statement ok 17 | INSERT INTO t VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9); 18 | 19 | statement ok 20 | CREATE TABLE big AS SELECT cast(t1.column1 as int) c FROM t t1, t t2, t t3; 21 | 22 | query I 23 | SELECT COUNT(*) FROM big; 24 | ---- 25 | 1000 26 | 27 | query I 28 | SELECT SUM(c) FROM big; 29 | ---- 30 | 4500 -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | set(TARGET_NAME sqlite_scanner) 3 | set(EXTENSION_NAME ${TARGET_NAME}_extension) 4 | project(${TARGET_NAME}) 5 | 6 | include_directories(src/sqlite) 7 | 8 | add_subdirectory(src) 9 | set(EXTENSION_OBJECT_FILES ${ALL_OBJECT_FILES}) 10 | add_library(${EXTENSION_NAME} STATIC ${EXTENSION_OBJECT_FILES}) 11 | 12 | set(PARAMETERS "-warnings") 13 | build_loadable_extension(${TARGET_NAME} ${PARAMETERS} ${EXTENSION_OBJECT_FILES}) 14 | 15 | install( 16 | TARGETS ${EXTENSION_NAME} 17 | EXPORT "${DUCKDB_EXPORT_SET}" 18 | LIBRARY DESTINATION "${INSTALL_LIB_DIR}" 19 | ARCHIVE DESTINATION "${INSTALL_LIB_DIR}") 20 | -------------------------------------------------------------------------------- /test/sql/scanner/mixed_numeric.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/mixed_numeric.test 2 | # description: Test mixed type numeric columns 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | # load from a database with a numeric column that has mixed types 8 | 9 | # Invalid type in column "a": expected float or integer, found "hello" of type "text" instead. 10 | statement error 11 | select * from sqlite_scan('data/db/mixed_data_numeric.db', 'tbl'); 12 | ---- 13 | hello 14 | 15 | statement ok 16 | SET sqlite_all_varchar=true 17 | 18 | query I 19 | select * from sqlite_scan('data/db/mixed_data_numeric.db', 'tbl'); 20 | ---- 21 | 42 22 | hello 23 | NULL 24 | -------------------------------------------------------------------------------- /test/sql/scanner/borked.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/borked.test 2 | # description: Test with borked database 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/borked.db'); 9 | 10 | # this has a string in an integer column 11 | statement error 12 | select * from a; 13 | ---- 14 | Mismatch Type Error: Invalid type in column "i": column was declared as integer, found "Hello" of type "text" instead. 15 | 16 | # this has a string in a double column 17 | statement error 18 | select * from b; 19 | ---- 20 | Mismatch Type Error: Invalid type in column "i": expected float or integer, found "hello" of type "text" instead. 21 | -------------------------------------------------------------------------------- /data/sql/bench.sql: -------------------------------------------------------------------------------- 1 | LOAD 'build/release/sqlite_scanner.duckdb_extension'; 2 | CALL sqlite_attach('lineitem-sf10.db'); 3 | 4 | select 5 | l_returnflag, 6 | l_linestatus, 7 | sum(l_quantity) as sum_qty, 8 | sum(l_extendedprice) as sum_base_price, 9 | sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, 10 | sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, 11 | avg(l_quantity) as avg_qty, 12 | avg(l_extendedprice) as avg_price, 13 | avg(l_discount) as avg_disc, 14 | count(*) as count_order 15 | from 16 | lineitem 17 | where 18 | l_shipdate <= '1998-09-02' 19 | group by 20 | l_returnflag, 21 | l_linestatus 22 | order by 23 | l_returnflag, 24 | l_linestatus; -------------------------------------------------------------------------------- /src/include/storage/sqlite_options.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_options.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/common/common.hpp" 12 | #include "duckdb/common/enums/access_mode.hpp" 13 | 14 | namespace duckdb { 15 | 16 | struct SQLiteOpenOptions { 17 | // access mode 18 | AccessMode access_mode = AccessMode::READ_WRITE; 19 | // busy time-out in ms 20 | idx_t busy_timeout = 5000; 21 | // journal mode 22 | string journal_mode; 23 | }; 24 | 25 | 26 | } // namespace duckdb 27 | -------------------------------------------------------------------------------- /test/sql/storage/attach_detach.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_detach.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_detach.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s1.test VALUES (1), (2), (3), (NULL); 15 | 16 | statement ok 17 | DETACH s1 18 | 19 | statement error 20 | SELECT * FROM s1.test 21 | ---- 22 | Catalog Error: Table with name test does not exist! 23 | 24 | statement ok 25 | ATTACH '__TEST_DIR__/attach_detach.db' AS s1 (TYPE SQLITE) 26 | 27 | query I 28 | SELECT * FROM s1.test 29 | ---- 30 | 1 31 | 2 32 | 3 33 | NULL 34 | -------------------------------------------------------------------------------- /test/sql/storage/attach_big.test_slow: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_big.test_slow 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_big.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | USE s; 12 | 13 | statement ok 14 | CREATE TABLE t(column1 INTEGER); 15 | 16 | statement ok 17 | INSERT INTO t VALUES (0), (1), (2), (3), (4), (5), (6), (7), (8), (9); 18 | 19 | query I 20 | CREATE TABLE big AS SELECT cast(t1.column1 as int) c FROM t t1, t t2, t t3, t t4, t t5, t t6, t t7; 21 | ---- 22 | 10000000 23 | 24 | query I 25 | SELECT COUNT(*) FROM big; 26 | ---- 27 | 10000000 28 | 29 | query I 30 | SELECT SUM(c) FROM big; 31 | ---- 32 | 45000000 -------------------------------------------------------------------------------- /test/sql/storage/attach_database_list.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_database_list.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_dblist1.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | ATTACH '__TEST_DIR__/attach_dblist2.db' AS s2 (TYPE SQLITE) 12 | 13 | statement ok 14 | SELECT * FROM duckdb_databases(); 15 | 16 | query I 17 | SELECT COUNT(*) FROM duckdb_databases() WHERE database_name IN ('s1', 's2'); 18 | ---- 19 | 2 20 | 21 | query II 22 | SELECT name, split(split(file, '/')[-1], '\')[-1] FROM pragma_database_list WHERE Name IN ('s1', 's2') ORDER BY name; 23 | ---- 24 | s1 attach_dblist1.db 25 | s2 attach_dblist2.db 26 | -------------------------------------------------------------------------------- /test/sql/storage/attach_in_memory.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_in_memory.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH ':memory:' AS mem (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE mem.integers(i INTEGER) 12 | 13 | statement ok 14 | INSERT INTO mem.integers VALUES (42) 15 | 16 | query I 17 | SELECT * FROM mem.integers 18 | ---- 19 | 42 20 | 21 | statement ok 22 | BEGIN 23 | 24 | statement ok 25 | INSERT INTO mem.integers FROM range(3) 26 | 27 | query I 28 | SELECT * FROM mem.integers 29 | ---- 30 | 42 31 | 0 32 | 1 33 | 2 34 | 35 | statement ok 36 | ROLLBACK 37 | 38 | query I 39 | SELECT * FROM mem.integers 40 | ---- 41 | 42 42 | -------------------------------------------------------------------------------- /test/sql/storage/attach_drop.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_drop.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_drop.db' AS simple (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE simple.test(i INTEGER); 12 | 13 | statement ok 14 | SELECT * FROM simple.test; 15 | 16 | statement ok 17 | DROP TABLE simple.test 18 | 19 | # verify the drop was successful 20 | statement error 21 | SELECT * FROM simple.test; 22 | ---- 23 | Catalog Error: Table with name test does not exist! 24 | 25 | statement error 26 | DROP TABLE simple.testx 27 | ---- 28 | Table with name testx does not exist 29 | 30 | statement ok 31 | DROP TABLE IF EXISTS simple.testx -------------------------------------------------------------------------------- /test/sql/storage/attach_mixed_numeric.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_mixed_numeric.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | # load from a database with a numeric column that has mixed types 8 | statement ok 9 | ATTACH 'data/db/mixed_data_numeric.db' as mydb (TYPE sqlite); 10 | 11 | statement error 12 | select * from mydb.tbl; 13 | ---- 14 | Invalid type in column "a" 15 | 16 | statement ok 17 | DETACH mydb; 18 | 19 | statement ok 20 | SET sqlite_all_varchar=true 21 | 22 | # attaching using the all_varchar setting 23 | statement ok 24 | ATTACH 'data/db/mixed_data_numeric.db' as mydb (TYPE sqlite); 25 | 26 | query I 27 | select * from mydb.tbl; 28 | ---- 29 | 42 30 | hello 31 | NULL 32 | -------------------------------------------------------------------------------- /src/include/sqlite_scanner_extension.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DUCKDB_BUILD_LOADABLE_EXTENSION 2 | #define DUCKDB_BUILD_LOADABLE_EXTENSION 3 | #endif 4 | #include "duckdb.hpp" 5 | 6 | #include "duckdb/catalog/catalog.hpp" 7 | #include "duckdb/parser/parsed_data/create_table_function_info.hpp" 8 | 9 | using namespace duckdb; 10 | 11 | class SqliteScannerExtension : public Extension { 12 | public: 13 | std::string Name() override { 14 | return "sqlite_scanner"; 15 | } 16 | void Load(DuckDB &db) override; 17 | }; 18 | 19 | extern "C" { 20 | DUCKDB_EXTENSION_API void sqlite_scanner_init(duckdb::DatabaseInstance &db); 21 | DUCKDB_EXTENSION_API const char *sqlite_scanner_version(); 22 | DUCKDB_EXTENSION_API void sqlite_scanner_storage_init(DBConfig &config); 23 | } -------------------------------------------------------------------------------- /src/include/storage/sqlite_index_entry.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_index_entry.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/catalog/catalog_entry/index_catalog_entry.hpp" 12 | 13 | namespace duckdb { 14 | 15 | class SQLiteIndexEntry : public IndexCatalogEntry { 16 | public: 17 | SQLiteIndexEntry(Catalog &catalog, SchemaCatalogEntry &schema, CreateIndexInfo &info, string table_name); 18 | 19 | string table_name; 20 | 21 | public: 22 | string GetSchemaName() const override; 23 | string GetTableName() const override; 24 | }; 25 | 26 | } // namespace duckdb 27 | -------------------------------------------------------------------------------- /test/sql/storage/attach_delete_issue.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_delete_issue.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/sqlite_db.db' AS sqlite_db (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE sqlite_db.tbl(id INTEGER, name VARCHAR); 12 | 13 | statement ok 14 | INSERT INTO sqlite_db.tbl VALUES (42, 'DuckDB'); 15 | 16 | statement ok 17 | INSERT INTO sqlite_db.tbl VALUES (42, 'DuckDB'); 18 | 19 | statement ok 20 | INSERT INTO sqlite_db.tbl VALUES (42, 'DuckDB'); 21 | 22 | query II 23 | SELECT * FROM sqlite_db.tbl; 24 | ---- 25 | 42 DuckDB 26 | 42 DuckDB 27 | 42 DuckDB 28 | 29 | statement ok 30 | DELETE FROM sqlite_db.tbl WHERE id=42; 31 | 32 | query II 33 | SELECT * FROM sqlite_db.tbl; 34 | ---- 35 | -------------------------------------------------------------------------------- /test/sql/storage/attach_simple.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_simple.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_simple.db' AS simple (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE simple.test(i INTEGER); 12 | 13 | query I 14 | INSERT INTO simple.test VALUES (42); 15 | ---- 16 | 1 17 | 18 | query I 19 | SELECT * FROM simple.test 20 | ---- 21 | 42 22 | 23 | # insert into a non-existent table 24 | statement error 25 | INSERT INTO tst VALUES (84) 26 | ---- 27 | simple.test 28 | 29 | statement error 30 | INSERT INTO tst VALUES (84) 31 | ---- 32 | test 33 | 34 | # create table as 35 | statement ok 36 | CREATE TABLE simple.test2 AS SELECT 84 37 | 38 | query I 39 | SELECT * FROM simple.test2 40 | ---- 41 | 84 42 | -------------------------------------------------------------------------------- /test/sql/storage/attach_prefix.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_prefix.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'sqlite:__TEST_DIR__/attach_replacement.db' AS s 9 | 10 | statement ok 11 | CREATE TABLE s.integers(i INTEGER) 12 | 13 | statement ok 14 | INSERT INTO s.integers VALUES (42) 15 | 16 | statement ok 17 | DETACH s 18 | 19 | # attach automatically finds out this is a sqlite file 20 | statement ok 21 | ATTACH '__TEST_DIR__/attach_replacement.db' AS s 22 | 23 | query I 24 | SELECT * FROM s.integers 25 | ---- 26 | 42 27 | 28 | statement ok 29 | DETACH s 30 | 31 | statement ok 32 | ATTACH '__TEST_DIR__/attach_replacement.db' AS s (TYPE sqlite) 33 | 34 | query I 35 | SELECT * FROM s.integers 36 | ---- 37 | 42 38 | 39 | statement ok 40 | DETACH s 41 | -------------------------------------------------------------------------------- /src/include/sqlite_utils.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // sqlite_utils.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb.hpp" 12 | #include "sqlite3.h" 13 | 14 | namespace duckdb { 15 | 16 | class SQLiteUtils { 17 | public: 18 | static void Check(int rc, sqlite3 *db); 19 | static string TypeToString(int sqlite_type); 20 | static LogicalType TypeToLogicalType(const string &sqlite_type); 21 | static string SanitizeString(const string &table_name); 22 | static string SanitizeIdentifier(const string &table_name); 23 | static LogicalType ToSQLiteType(const LogicalType &input); 24 | string ToSQLiteTypeAlias(const LogicalType &input); 25 | }; 26 | 27 | } // namespace duckdb 28 | -------------------------------------------------------------------------------- /test/sql/storage/attach_read_only.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_read_only.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH 'data/db/chinook.db' (TYPE SQLITE, READ_ONLY) 9 | 10 | query III 11 | SELECT * FROM chinook.albums LIMIT 10; 12 | ---- 13 | 1 For Those About To Rock We Salute You 1 14 | 2 Balls to the Wall 2 15 | 3 Restless and Wild 2 16 | 4 Let There Be Rock 1 17 | 5 Big Ones 3 18 | 6 Jagged Little Pill 4 19 | 7 Facelift 5 20 | 8 Warner 25 Anos 6 21 | 9 Plays Metallica By Four Cellos 7 22 | 10 Audioslave 8 23 | 24 | statement error 25 | SELECT * FROM chinook.albumz 26 | ---- 27 | albums 28 | 29 | statement error 30 | CREATE TABLE chinook.test(i INTEGER) 31 | ---- 32 | read-only mode 33 | 34 | statement error 35 | INSERT INTO chinook.albums VALUES (NULL, NULL, NULL); 36 | ---- 37 | read-only mode 38 | -------------------------------------------------------------------------------- /test/sql/storage/attach_on_conflict.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_on_conflict.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_on_conflict.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s.tbl(i INTEGER PRIMARY KEY) 12 | 13 | statement ok 14 | INSERT INTO s.tbl VALUES (1), (2), (3) 15 | 16 | statement error 17 | INSERT INTO s.tbl VALUES (1) 18 | ---- 19 | UNIQUE constraint failed 20 | 21 | statement error 22 | INSERT OR IGNORE INTO s.tbl VALUES (1) 23 | ---- 24 | ON CONFLICT clause not yet supported for insertion into SQLite table 25 | 26 | # INSERT OR IGNORE in a table without primary key constraints 27 | statement ok 28 | CREATE TABLE s.tbl2(i INTEGER) 29 | 30 | statement error 31 | INSERT OR REPLACE INTO s.tbl2 VALUES (1) 32 | ---- 33 | There are no UNIQUE/PRIMARY KEY Indexes 34 | -------------------------------------------------------------------------------- /src/include/sqlite_scanner.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // sqlite_scanner.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb.hpp" 12 | 13 | namespace duckdb { 14 | class SQLiteDB; 15 | 16 | struct SqliteBindData : public TableFunctionData { 17 | string file_name; 18 | string table_name; 19 | 20 | vector names; 21 | vector types; 22 | 23 | idx_t max_rowid = 0; 24 | bool all_varchar = false; 25 | 26 | idx_t rows_per_group = 122880; 27 | SQLiteDB *global_db; 28 | }; 29 | 30 | class SqliteScanFunction : public TableFunction { 31 | public: 32 | SqliteScanFunction(); 33 | }; 34 | 35 | class SqliteAttachFunction : public TableFunction { 36 | public: 37 | SqliteAttachFunction(); 38 | }; 39 | 40 | } // namespace duckdb 41 | -------------------------------------------------------------------------------- /test/sql/storage/attach_describe.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_describe.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_describe.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i BIGINT PRIMARY KEY, j BIGINT DEFAULT 42); 12 | 13 | query IIIIII 14 | DESCRIBE s1.test 15 | ---- 16 | i BIGINT YES PRI NULL NULL 17 | j BIGINT YES NULL 42 NULL 18 | 19 | query ITTTTT 20 | PRAGMA table_info('s1.test'); 21 | ---- 22 | 0 i BIGINT 0 NULL 1 23 | 1 j BIGINT 0 42 0 24 | 25 | # more complex default 26 | statement ok 27 | CREATE TABLE s1.test2(s VARCHAR DEFAULT 'hello, '||'world'); 28 | 29 | query ITTTTT 30 | PRAGMA table_info('s1.test2'); 31 | ---- 32 | 0 s VARCHAR false ('hello, ' || 'world') false 33 | 34 | # DEFAULT in INSERT 35 | statement ok 36 | INSERT INTO s1.test2 VALUES (DEFAULT); 37 | 38 | query I 39 | SELECT * FROM s1.test2 40 | ---- 41 | hello, world 42 | -------------------------------------------------------------------------------- /test/sql/scanner/tpch.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/tpch.test 2 | # description: test tpch database 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | require-env SQLITE_TPCH_GENERATED 8 | 9 | statement ok 10 | CALL sqlite_attach('data/db/tpch.db'); 11 | 12 | require tpch 13 | 14 | loop i 1 9 15 | 16 | query I 17 | PRAGMA tpch(${i}) 18 | ---- 19 | :duckdb/extension/tpch/dbgen/answers/sf0.1/q0${i}.csv 20 | 21 | endloop 22 | 23 | 24 | loop i 10 15 25 | 26 | query I 27 | PRAGMA tpch(${i}) 28 | ---- 29 | :duckdb/extension/tpch/dbgen/answers/sf0.1/q${i}.csv 30 | 31 | endloop 32 | 33 | loop i 16 23 34 | 35 | query I 36 | PRAGMA tpch(${i}) 37 | ---- 38 | :duckdb/extension/tpch/dbgen/answers/sf0.1/q${i}.csv 39 | 40 | endloop 41 | 42 | # run Q15 single-threaded: it is not deterministic when run on doubles with multi-threading enabled 43 | statement ok 44 | SET threads=1 45 | 46 | query I 47 | PRAGMA tpch(15) 48 | ---- 49 | :duckdb/extension/tpch/dbgen/answers/sf0.1/q15.csv 50 | -------------------------------------------------------------------------------- /test/sql/storage/attach_defaults.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_defaults.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_defaults.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INTEGER DEFAULT 42, j INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s1.test (i) VALUES (3) 15 | 16 | statement ok 17 | INSERT INTO s1.test (j) VALUES (84) 18 | 19 | query II 20 | SELECT * FROM s1.test 21 | ---- 22 | 3 NULL 23 | 42 84 24 | 25 | statement ok 26 | INSERT INTO s1.test (j, i) VALUES (1, 10) 27 | 28 | statement ok 29 | INSERT INTO s1.test (i, j) VALUES (100, 1000) 30 | 31 | query II 32 | SELECT * FROM s1.test 33 | ---- 34 | 3 NULL 35 | 42 84 36 | 10 1 37 | 100 1000 38 | 39 | statement error 40 | INSERT INTO s1.test (zzz) VALUES (3) 41 | ---- 42 | does not have a column with name "zzz" 43 | 44 | statement error 45 | INSERT INTO s1.test (j, j, j) VALUES (1, 2, 3) 46 | ---- 47 | Duplicate column name "j" 48 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_index.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_index.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/execution/physical_operator.hpp" 12 | #include "duckdb/parser/parsed_data/create_index_info.hpp" 13 | 14 | namespace duckdb { 15 | 16 | //! PhysicalCreateSequence represents a CREATE SEQUENCE command 17 | class SQLiteCreateIndex : public PhysicalOperator { 18 | public: 19 | explicit SQLiteCreateIndex(unique_ptr info, TableCatalogEntry &table); 20 | 21 | unique_ptr info; 22 | TableCatalogEntry &table; 23 | 24 | public: 25 | // Source interface 26 | SourceResultType GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const override; 27 | 28 | bool IsSource() const override { 29 | return true; 30 | } 31 | }; 32 | 33 | } // namespace duckdb 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2018-2024 Stichting DuckDB Foundation 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_table_entry.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_table_entry.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp" 12 | 13 | namespace duckdb { 14 | 15 | class SQLiteTableEntry : public TableCatalogEntry { 16 | public: 17 | SQLiteTableEntry(Catalog &catalog, SchemaCatalogEntry &schema, CreateTableInfo &info, bool all_varchar); 18 | 19 | bool all_varchar; 20 | 21 | public: 22 | unique_ptr GetStatistics(ClientContext &context, column_t column_id) override; 23 | 24 | TableFunction GetScanFunction(ClientContext &context, unique_ptr &bind_data) override; 25 | 26 | TableStorageInfo GetStorageInfo(ClientContext &context) override; 27 | 28 | void BindUpdateConstraints(Binder &binder, LogicalGet &get, LogicalProjection &proj, LogicalUpdate &update, 29 | ClientContext &context) override; 30 | }; 31 | 32 | } // namespace duckdb 33 | -------------------------------------------------------------------------------- /test/sql/storage/attach_keywords.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_keywords.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_keywords.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s."TaBlE"("TABLE" INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s.table VALUES (42); 15 | 16 | query I 17 | SELECT "TABLE" FROM s."TaBlE" 18 | ---- 19 | 42 20 | 21 | query I 22 | SELECT "table" FROM s."table" 23 | ---- 24 | 42 25 | 26 | statement ok 27 | CREATE TABLE s."this 'name' contains ""escaped quotes"""("this 'name' contains ""escaped quotes""" INTEGER); 28 | 29 | statement ok 30 | INSERT INTO s."this 'name' contains ""escaped quotes""" VALUES (84); 31 | 32 | query I 33 | SELECT "this 'name' contains ""escaped quotes""" FROM s."this 'name' contains ""escaped quotes""" 34 | ---- 35 | 84 36 | 37 | statement ok 38 | ALTER TABLE s."this 'name' contains ""escaped quotes""" DROP COLUMN IF EXISTS "hello""world" 39 | 40 | statement error 41 | ALTER TABLE s."this 'name' contains ""escaped quotes""" DROP COLUMN "this 'name' contains ""escaped quotes""" 42 | ---- 43 | no other columns exist 44 | -------------------------------------------------------------------------------- /.github/workflows/MainDistributionPipeline.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This workflow calls the main distribution pipeline from DuckDB to build, test and (optionally) release the extension 3 | # 4 | name: Main Extension Distribution Pipeline 5 | on: 6 | push: 7 | pull_request: 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | duckdb-stable-build: 16 | name: Build extension binaries 17 | uses: ./.github/workflows/_extension_distribution.yml 18 | with: 19 | duckdb_version: main 20 | extension_name: sqlite_scanner 21 | 22 | duckdb-stable-deploy: 23 | name: Deploy extension binaries 24 | needs: duckdb-stable-build 25 | uses: duckdb/extension-ci-tools/.github/workflows/_extension_deploy.yml@main 26 | secrets: inherit 27 | with: 28 | duckdb_version: main 29 | extension_name: sqlite_scanner 30 | deploy_latest: ${{ startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' }} 31 | deploy_versioned: ${{ startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' }} 32 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_transaction_manager.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_transaction_manager.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/transaction/transaction_manager.hpp" 12 | #include "storage/sqlite_catalog.hpp" 13 | #include "storage/sqlite_transaction.hpp" 14 | #include "duckdb/common/reference_map.hpp" 15 | 16 | namespace duckdb { 17 | 18 | class SQLiteTransactionManager : public TransactionManager { 19 | public: 20 | SQLiteTransactionManager(AttachedDatabase &db_p, SQLiteCatalog &sqlite_catalog); 21 | 22 | Transaction &StartTransaction(ClientContext &context) override; 23 | ErrorData CommitTransaction(ClientContext &context, Transaction &transaction) override; 24 | void RollbackTransaction(Transaction &transaction) override; 25 | 26 | void Checkpoint(ClientContext &context, bool force = false) override; 27 | 28 | private: 29 | SQLiteCatalog &sqlite_catalog; 30 | mutex transaction_lock; 31 | reference_map_t> transactions; 32 | }; 33 | 34 | } // namespace duckdb 35 | -------------------------------------------------------------------------------- /test/sql/storage/attach_views.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_views.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_views.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | USE s; 12 | 13 | statement ok 14 | CREATE VIEW v1 AS SELECT 42; 15 | 16 | query I 17 | SELECT * FROM v1 18 | ---- 19 | 42 20 | 21 | statement error 22 | CREATE VIEW v1 AS SELECT 84; 23 | ---- 24 | view v1 already exists 25 | 26 | statement ok 27 | CREATE VIEW IF NOT EXISTS v1 AS SELECT 84; 28 | 29 | statement ok 30 | CREATE OR REPLACE VIEW v1 AS SELECT 84; 31 | 32 | query I 33 | SELECT * FROM v1 34 | ---- 35 | 84 36 | 37 | statement ok 38 | DROP VIEW v1 39 | 40 | statement error 41 | SELECT * FROM v1 42 | ---- 43 | Table with name v1 does not exist 44 | 45 | statement error 46 | DROP VIEW v1 47 | ---- 48 | View with name v1 does not exist 49 | 50 | statement ok 51 | CREATE VIEW v1(a) AS SELECT 99; 52 | 53 | query I 54 | SELECT a FROM v1 55 | ---- 56 | 99 57 | 58 | # special names 59 | statement ok 60 | CREATE VIEW "table "" table '' table"("column "" column '' column") AS SELECT 3 61 | 62 | query I 63 | SELECT "column "" column '' column" FROM "table "" table '' table" 64 | ---- 65 | 3 66 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_transaction.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_transaction.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/transaction/transaction.hpp" 12 | #include "duckdb/common/case_insensitive_map.hpp" 13 | #include "sqlite_db.hpp" 14 | 15 | namespace duckdb { 16 | class SQLiteCatalog; 17 | class SQLiteTableEntry; 18 | 19 | class SQLiteTransaction : public Transaction { 20 | public: 21 | SQLiteTransaction(SQLiteCatalog &sqlite_catalog, TransactionManager &manager, ClientContext &context); 22 | ~SQLiteTransaction() override; 23 | 24 | void Start(); 25 | void Commit(); 26 | void Rollback(); 27 | 28 | SQLiteDB &GetDB(); 29 | optional_ptr GetCatalogEntry(const string &table_name); 30 | void DropEntry(CatalogType type, const string &table_name, bool cascade); 31 | void ClearTableEntry(const string &table_name); 32 | 33 | static SQLiteTransaction &Get(ClientContext &context, Catalog &catalog); 34 | 35 | private: 36 | SQLiteCatalog &sqlite_catalog; 37 | SQLiteDB *db; 38 | SQLiteDB owned_db; 39 | case_insensitive_map_t> catalog_entries; 40 | }; 41 | 42 | } // namespace duckdb 43 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_delete.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_delete.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/execution/physical_operator.hpp" 12 | 13 | namespace duckdb { 14 | 15 | class SQLiteDelete : public PhysicalOperator { 16 | public: 17 | SQLiteDelete(LogicalOperator &op, TableCatalogEntry &table, idx_t row_id_index); 18 | 19 | //! The table to delete from 20 | TableCatalogEntry &table; 21 | idx_t row_id_index; 22 | 23 | public: 24 | // Source interface 25 | SourceResultType GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const override; 26 | 27 | bool IsSource() const override { 28 | return true; 29 | } 30 | 31 | public: 32 | // Sink interface 33 | unique_ptr GetGlobalSinkState(ClientContext &context) const override; 34 | SinkResultType Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const override; 35 | 36 | bool IsSink() const override { 37 | return true; 38 | } 39 | 40 | bool ParallelSink() const override { 41 | return false; 42 | } 43 | 44 | string GetName() const override; 45 | string ParamsToString() const override; 46 | }; 47 | 48 | } // namespace duckdb 49 | -------------------------------------------------------------------------------- /test/sql/storage/attach_catalog.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_catalog.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_catalog.db' AS s (TYPE SQLITE) 9 | 10 | # schemas 11 | statement error 12 | CREATE SCHEMA s.s1 13 | ---- 14 | SQLite databases do not support creating new schemas 15 | 16 | statement error 17 | DROP SCHEMA s.s1 18 | ---- 19 | SQLite databases do not support dropping schemas 20 | 21 | # macros 22 | statement error 23 | CREATE MACRO s.plus1(a) AS a+1 24 | ---- 25 | SQLite databases do not support creating functions 26 | 27 | statement error 28 | DROP MACRO s.plus1 29 | ---- 30 | does not exist 31 | 32 | statement error 33 | SELECT s.plus1(42); 34 | ---- 35 | does not exist 36 | 37 | statement error 38 | SELECT s.plus1(42) over (); 39 | ---- 40 | does not exist 41 | 42 | # sequences 43 | statement error 44 | CREATE SEQUENCE s.seq 45 | ---- 46 | SQLite databases do not support creating sequences 47 | 48 | statement error 49 | SELECT nextval('s.seq') 50 | ---- 51 | does not exist 52 | 53 | statement error 54 | DROP SEQUENCE s.seq 55 | ---- 56 | does not exist 57 | 58 | statement ok 59 | DROP SEQUENCE IF EXISTS s.seq 60 | 61 | # types 62 | statement error 63 | CREATE TYPE s.my_type as BLOB; 64 | ---- 65 | SQLite databases do not support creating types 66 | 67 | statement error 68 | SELECT 'a'::"s.my_type" 69 | ---- 70 | does not exist 71 | -------------------------------------------------------------------------------- /test/sql/storage/attach_types.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_types.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_types.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s.types(i INTEGER, j BIGINT, k DOUBLE, l VARCHAR); 12 | 13 | statement ok 14 | INSERT INTO s.types VALUES (42, 84, 0.5, 'hello world this is my string'); 15 | 16 | statement ok 17 | INSERT INTO s.types VALUES (NULL, NULL, NULL, NULL); 18 | 19 | statement ok 20 | SELECT * FROM s.types 21 | 22 | #halt 23 | 24 | query IIII 25 | SELECT * FROM s.types 26 | ---- 27 | 42 84 0.5 hello world this is my string 28 | NULL NULL NULL NULL 29 | 30 | # test all types 31 | statement ok 32 | CREATE TABLE s.all_types AS SELECT * FROM test_all_types(); 33 | 34 | query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII nosort r1 35 | SELECT COLUMNS(* EXCLUDE (float, double))::VARCHAR FROM s.all_types; 36 | ---- 37 | 38 | statement ok 39 | CREATE TEMPORARY TABLE all_types_modified AS SELECT * EXCLUDE (float, double) REPLACE (bool::INT AS bool) FROM test_all_types() 40 | 41 | query IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII nosort r1 42 | SELECT COLUMNS(*)::VARCHAR FROM all_types_modified; 43 | ---- 44 | 45 | statement ok 46 | UPDATE s.all_types SET timestamp=TIMESTAMP '2022-01-01 01:02:03'; 47 | 48 | query I 49 | SELECT timestamp FROM s.all_types 50 | ---- 51 | 2022-01-01 01:02:03 52 | 2022-01-01 01:02:03 53 | 2022-01-01 01:02:03 54 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_update.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_update.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/execution/physical_operator.hpp" 12 | #include "duckdb/common/index_vector.hpp" 13 | 14 | namespace duckdb { 15 | 16 | class SQLiteUpdate : public PhysicalOperator { 17 | public: 18 | SQLiteUpdate(LogicalOperator &op, TableCatalogEntry &table, vector columns); 19 | 20 | //! The table to delete from 21 | TableCatalogEntry &table; 22 | //! The set of columns to update 23 | vector columns; 24 | 25 | public: 26 | // Source interface 27 | SourceResultType GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const override; 28 | 29 | bool IsSource() const override { 30 | return true; 31 | } 32 | 33 | public: 34 | // Sink interface 35 | unique_ptr GetGlobalSinkState(ClientContext &context) const override; 36 | SinkResultType Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const override; 37 | 38 | bool IsSink() const override { 39 | return true; 40 | } 41 | 42 | bool ParallelSink() const override { 43 | return false; 44 | } 45 | 46 | string GetName() const override; 47 | string ParamsToString() const override; 48 | }; 49 | 50 | } // namespace duckdb 51 | -------------------------------------------------------------------------------- /src/sqlite_extension.cpp: -------------------------------------------------------------------------------- 1 | #ifndef DUCKDB_BUILD_LOADABLE_EXTENSION 2 | #define DUCKDB_BUILD_LOADABLE_EXTENSION 3 | #endif 4 | #include "duckdb.hpp" 5 | 6 | #include "sqlite_scanner.hpp" 7 | #include "sqlite_storage.hpp" 8 | #include "sqlite_scanner_extension.hpp" 9 | 10 | #include "duckdb/catalog/catalog.hpp" 11 | #include "duckdb/main/extension_util.hpp" 12 | #include "duckdb/parser/parsed_data/create_table_function_info.hpp" 13 | 14 | using namespace duckdb; 15 | 16 | extern "C" { 17 | 18 | static void LoadInternal(DatabaseInstance &db) { 19 | SqliteScanFunction sqlite_fun; 20 | ExtensionUtil::RegisterFunction(db, sqlite_fun); 21 | 22 | SqliteAttachFunction attach_func; 23 | ExtensionUtil::RegisterFunction(db, attach_func); 24 | 25 | auto &config = DBConfig::GetConfig(db); 26 | config.AddExtensionOption("sqlite_all_varchar", "Load all SQLite columns as VARCHAR columns", LogicalType::BOOLEAN); 27 | 28 | config.storage_extensions["sqlite_scanner"] = make_uniq(); 29 | } 30 | 31 | void SqliteScannerExtension::Load(DuckDB &db) { 32 | LoadInternal(*db.instance); 33 | } 34 | 35 | DUCKDB_EXTENSION_API void sqlite_scanner_init(duckdb::DatabaseInstance &db) { 36 | LoadInternal(db); 37 | } 38 | 39 | DUCKDB_EXTENSION_API const char *sqlite_scanner_version() { 40 | return DuckDB::LibraryVersion(); 41 | } 42 | 43 | DUCKDB_EXTENSION_API void sqlite_scanner_storage_init(DBConfig &config) { 44 | config.storage_extensions["sqlite_scanner"] = make_uniq(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/storage/sqlite_transaction_manager.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_transaction_manager.hpp" 2 | #include "duckdb/main/attached_database.hpp" 3 | 4 | namespace duckdb { 5 | 6 | SQLiteTransactionManager::SQLiteTransactionManager(AttachedDatabase &db_p, SQLiteCatalog &sqlite_catalog) 7 | : TransactionManager(db_p), sqlite_catalog(sqlite_catalog) { 8 | } 9 | 10 | Transaction &SQLiteTransactionManager::StartTransaction(ClientContext &context) { 11 | auto transaction = make_uniq(sqlite_catalog, *this, context); 12 | transaction->Start(); 13 | auto &result = *transaction; 14 | lock_guard l(transaction_lock); 15 | transactions[result] = std::move(transaction); 16 | return result; 17 | } 18 | 19 | ErrorData SQLiteTransactionManager::CommitTransaction(ClientContext &context, Transaction &transaction) { 20 | auto &sqlite_transaction = transaction.Cast(); 21 | sqlite_transaction.Commit(); 22 | lock_guard l(transaction_lock); 23 | transactions.erase(transaction); 24 | return ErrorData(); 25 | } 26 | 27 | void SQLiteTransactionManager::RollbackTransaction(Transaction &transaction) { 28 | auto &sqlite_transaction = transaction.Cast(); 29 | sqlite_transaction.Rollback(); 30 | lock_guard l(transaction_lock); 31 | transactions.erase(transaction); 32 | } 33 | 34 | void SQLiteTransactionManager::Checkpoint(ClientContext &context, bool force) { 35 | auto &transaction = SQLiteTransaction::Get(context, db.GetCatalog()); 36 | auto &db = transaction.GetDB(); 37 | db.Execute("PRAGMA wal_checkpoint"); 38 | } 39 | 40 | } // namespace duckdb 41 | -------------------------------------------------------------------------------- /.github/workflows/HighPriorityIssues.yml: -------------------------------------------------------------------------------- 1 | name: Create Internal issue when the "High Priority" label is applied 2 | on: 3 | issues: 4 | types: 5 | - labeled 6 | 7 | env: 8 | GH_TOKEN: ${{ secrets.DUCKDBLABS_BOT_TOKEN }} 9 | # an event triggering this workflow is either an issue or a pull request, 10 | # hence only one of the numbers will be filled in the TITLE_PREFIX 11 | TITLE_PREFIX: "[sqlite_scanner/#${{ github.event.issue.number }}]" 12 | PUBLIC_ISSUE_TITLE: ${{ github.event.issue.title }} 13 | 14 | jobs: 15 | create_or_label_issue: 16 | if: github.event.label.name == 'High Priority' 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Get mirror issue number 20 | run: | 21 | gh issue list --repo duckdblabs/duckdb-internal --search "${TITLE_PREFIX}" --json title,number --jq ".[] | select(.title | startswith(\"$TITLE_PREFIX\")).number" > mirror_issue_number.txt 22 | echo "MIRROR_ISSUE_NUMBER=$(cat mirror_issue_number.txt)" >> $GITHUB_ENV 23 | 24 | - name: Print whether mirror issue exists 25 | run: | 26 | if [ "$MIRROR_ISSUE_NUMBER" == "" ]; then 27 | echo "Mirror issue with title prefix '$TITLE_PREFIX' does not exist yet" 28 | else 29 | echo "Mirror issue with title prefix '$TITLE_PREFIX' exists with number $MIRROR_ISSUE_NUMBER" 30 | fi 31 | 32 | - name: Create or label issue 33 | run: | 34 | if [ "$MIRROR_ISSUE_NUMBER" == "" ]; then 35 | gh issue create --repo duckdblabs/duckdb-internal --label "extension" --label "High Priority" --title "$TITLE_PREFIX - $PUBLIC_ISSUE_TITLE" --body "See https://github.com/duckdb/sqlite_scanner/issues/${{ github.event.issue.number }}" 36 | fi 37 | -------------------------------------------------------------------------------- /test/sql/storage/attach_create_index.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_create_index.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_create_index.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s.test(i INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s.test VALUES (1), (2), (3); 15 | 16 | # TODO: fix needing to specify the schema here 17 | statement ok 18 | CREATE INDEX i_index ON s.main.test(i); 19 | 20 | query I 21 | SELECT * FROM s.test WHERE i=2 22 | ---- 23 | 2 24 | 25 | statement error 26 | DROP INDEX i_index; 27 | ---- 28 | s.i_index 29 | 30 | statement ok 31 | DROP INDEX s.i_index; 32 | 33 | statement error 34 | DROP INDEX s.i_index; 35 | ---- 36 | Index with name i_index does not exist 37 | 38 | statement ok 39 | DROP INDEX IF EXISTS s.i_index; 40 | 41 | statement ok 42 | DROP TABLE s.test; 43 | 44 | # multi-dimensional index 45 | statement ok 46 | CREATE TABLE s.test(i INTEGER, j INTEGER); 47 | 48 | statement ok 49 | INSERT INTO s.test VALUES (1, 10), (2, 20), (3, 30); 50 | 51 | statement ok 52 | CREATE INDEX i_index ON s.main.test(i, j); 53 | 54 | query II 55 | SELECT * FROM s.test WHERE i=2 AND j=20 56 | ---- 57 | 2 20 58 | 59 | statement ok 60 | DROP TABLE s.test CASCADE 61 | 62 | # index with a function 63 | statement ok 64 | CREATE TABLE s.test(s VARCHAR); 65 | 66 | statement ok 67 | INSERT INTO s.test VALUES ('HELLO'), ('hello') 68 | 69 | statement error 70 | CREATE UNIQUE INDEX i_index ON s.main.test(LOWER(s)) 71 | ---- 72 | UNIQUE constraint failed 73 | 74 | statement ok 75 | CREATE INDEX i_index ON s.main.test(LOWER(s)) 76 | 77 | query I 78 | SELECT * FROM s.test WHERE LOWER(s)='hello' 79 | ---- 80 | HELLO 81 | hello 82 | -------------------------------------------------------------------------------- /test/sql/storage/attach_schema_functions.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_schema_functions.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_schema_functions.db' AS s (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s.integers(i BIGINT PRIMARY KEY, j BIGINT); 12 | 13 | statement ok 14 | INSERT INTO s.integers VALUES (1, 1), (2, 2), (3, 3); 15 | 16 | # TODO: fix needing to specify the schema here 17 | statement ok 18 | CREATE INDEX i_index ON s.main.integers(j) 19 | 20 | statement ok 21 | CREATE VIEW s.v1 AS SELECT 42 AS a; 22 | 23 | query IIII 24 | SELECT * EXCLUDE (sql) FROM sqlite_master ORDER BY 1, 2 25 | ---- 26 | index i_index integers 0 27 | index sqlite_autoindex_integers_1 integers 0 28 | table integers integers 0 29 | view v1 v1 0 30 | 31 | query IIII 32 | SELECT database_name, table_name, has_primary_key, estimated_size FROM duckdb_tables() 33 | ---- 34 | s integers true 3 35 | 36 | statement ok 37 | SELECT * FROM duckdb_schemas() 38 | 39 | query II 40 | SELECT database_name, view_name FROM duckdb_views 41 | ---- 42 | s v1 43 | 44 | query III 45 | SELECT database_name, index_name, table_name FROM duckdb_indexes 46 | ---- 47 | s sqlite_autoindex_integers_1 integers 48 | s i_index integers 49 | 50 | query III 51 | SELECT database_name, table_name, column_name FROM duckdb_columns 52 | ---- 53 | s integers i 54 | s integers j 55 | 56 | statement ok 57 | SELECT * FROM duckdb_constraints() 58 | 59 | statement ok 60 | SELECT * FROM duckdb_functions() 61 | 62 | statement ok 63 | PRAGMA database_size 64 | 65 | statement ok 66 | USE s 67 | 68 | statement ok 69 | PRAGMA table_info('integers') 70 | 71 | statement ok 72 | PRAGMA storage_info('integers') 73 | -------------------------------------------------------------------------------- /test/sql/storage/attach_delete.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_delete.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_delete.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s1.test VALUES (1), (2), (3), (NULL); 15 | 16 | query II 17 | SELECT rowid, * FROM s1.test 18 | ---- 19 | 1 1 20 | 2 2 21 | 3 3 22 | 4 NULL 23 | 24 | # simple delete 25 | query I 26 | DELETE FROM s1.test WHERE i=2 27 | ---- 28 | 1 29 | 30 | query II 31 | SELECT rowid, * FROM s1.test 32 | ---- 33 | 1 1 34 | 3 3 35 | 4 NULL 36 | 37 | # no matches 38 | query I 39 | DELETE FROM s1.test WHERE i=999 40 | ---- 41 | 0 42 | 43 | query II 44 | SELECT rowid, * FROM s1.test 45 | ---- 46 | 1 1 47 | 3 3 48 | 4 NULL 49 | 50 | # delete without parameters 51 | query I 52 | DELETE FROM s1.test 53 | ---- 54 | 3 55 | 56 | query I 57 | SELECT * FROM s1.test 58 | 59 | query I 60 | DELETE FROM s1.test 61 | ---- 62 | 0 63 | 64 | # RETURNING statement 65 | statement error 66 | DELETE FROM s1.test RETURNING *; 67 | ---- 68 | not yet supported 69 | 70 | statement ok 71 | INSERT INTO s1.test VALUES (1), (2), (3), (NULL); 72 | 73 | statement ok 74 | CREATE TABLE duckdb_table AS SELECT 1 i UNION ALL SELECT 3 UNION ALL SELECT 1 75 | 76 | # DELETE with join on another table 77 | statement ok 78 | DELETE FROM s1.test USING duckdb_table WHERE test.i = duckdb_table.i 79 | 80 | query I 81 | SELECT * FROM s1.test 82 | ---- 83 | 2 84 | NULL 85 | 86 | # DELETE with subquery referring to itself 87 | query I 88 | DELETE FROM s1.test WHERE i=(SELECT MIN(i) FROM s1.test) 89 | ---- 90 | 1 91 | 92 | query I 93 | SELECT * FROM s1.test 94 | ---- 95 | NULL 96 | -------------------------------------------------------------------------------- /src/sqlite_storage.cpp: -------------------------------------------------------------------------------- 1 | #include "duckdb.hpp" 2 | 3 | #include "sqlite3.h" 4 | #include "sqlite_utils.hpp" 5 | #include "sqlite_storage.hpp" 6 | #include "storage/sqlite_catalog.hpp" 7 | #include "storage/sqlite_transaction_manager.hpp" 8 | #include "duckdb/parser/parsed_data/attach_info.hpp" 9 | #include "duckdb/transaction/transaction_manager.hpp" 10 | #include "duckdb/catalog/catalog_entry/schema_catalog_entry.hpp" 11 | #include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp" 12 | 13 | namespace duckdb { 14 | 15 | static unique_ptr SQLiteAttach(StorageExtensionInfo *storage_info, ClientContext &context, 16 | AttachedDatabase &db, const string &name, AttachInfo &info, 17 | AccessMode access_mode) { 18 | SQLiteOpenOptions options; 19 | options.access_mode = access_mode; 20 | for(auto &entry : info.options) { 21 | if (StringUtil::CIEquals(entry.first, "busy_timeout")) { 22 | options.busy_timeout = entry.second.GetValue(); 23 | } else if (StringUtil::CIEquals(entry.first, "journal_mode")) { 24 | options.journal_mode = entry.second.ToString(); 25 | } 26 | } 27 | return make_uniq(db, info.path, std::move(options)); 28 | } 29 | 30 | static unique_ptr SQLiteCreateTransactionManager(StorageExtensionInfo *storage_info, 31 | AttachedDatabase &db, Catalog &catalog) { 32 | auto &sqlite_catalog = catalog.Cast(); 33 | return make_uniq(db, sqlite_catalog); 34 | } 35 | 36 | SQLiteStorageExtension::SQLiteStorageExtension() { 37 | attach = SQLiteAttach; 38 | create_transaction_manager = SQLiteCreateTransactionManager; 39 | } 40 | 41 | } // namespace duckdb 42 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_insert.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_insert.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/execution/physical_operator.hpp" 12 | #include "duckdb/common/index_vector.hpp" 13 | 14 | namespace duckdb { 15 | 16 | class SQLiteInsert : public PhysicalOperator { 17 | public: 18 | //! INSERT INTO 19 | SQLiteInsert(LogicalOperator &op, TableCatalogEntry &table, physical_index_vector_t column_index_map); 20 | //! CREATE TABLE AS 21 | SQLiteInsert(LogicalOperator &op, SchemaCatalogEntry &schema, unique_ptr info); 22 | 23 | //! The table to insert into 24 | optional_ptr table; 25 | //! Table schema, in case of CREATE TABLE AS 26 | optional_ptr schema; 27 | //! Create table info, in case of CREATE TABLE AS 28 | unique_ptr info; 29 | //! column_index_map 30 | physical_index_vector_t column_index_map; 31 | 32 | public: 33 | // Source interface 34 | SourceResultType GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const override; 35 | 36 | bool IsSource() const override { 37 | return true; 38 | } 39 | 40 | public: 41 | // Sink interface 42 | unique_ptr GetGlobalSinkState(ClientContext &context) const override; 43 | SinkResultType Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const override; 44 | 45 | bool IsSink() const override { 46 | return true; 47 | } 48 | 49 | bool ParallelSink() const override { 50 | return false; 51 | } 52 | 53 | string GetName() const override; 54 | string ParamsToString() const override; 55 | }; 56 | 57 | } // namespace duckdb 58 | -------------------------------------------------------------------------------- /src/include/sqlite_db.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // sqlite_db.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "sqlite_utils.hpp" 12 | #include "storage/sqlite_options.hpp" 13 | 14 | namespace duckdb { 15 | class SQLiteStatement; 16 | struct IndexInfo; 17 | 18 | class SQLiteDB { 19 | public: 20 | SQLiteDB(); 21 | SQLiteDB(sqlite3 *db); 22 | ~SQLiteDB(); 23 | // disable copy constructors 24 | SQLiteDB(const SQLiteDB &other) = delete; 25 | SQLiteDB &operator=(const SQLiteDB &) = delete; 26 | //! enable move constructors 27 | SQLiteDB(SQLiteDB &&other) noexcept; 28 | SQLiteDB &operator=(SQLiteDB &&) noexcept; 29 | 30 | sqlite3 *db; 31 | 32 | public: 33 | static SQLiteDB Open(const string &path, const SQLiteOpenOptions &options, bool is_shared = false); 34 | bool TryPrepare(const string &query, SQLiteStatement &result); 35 | SQLiteStatement Prepare(const string &query); 36 | void Execute(const string &query); 37 | vector GetTables(); 38 | 39 | vector GetEntries(string entry_type); 40 | CatalogType GetEntryType(const string &name); 41 | void GetTableInfo(const string &table_name, ColumnList &columns, vector> &constraints, 42 | bool all_varchar); 43 | void GetViewInfo(const string &view_name, string &sql); 44 | void GetIndexInfo(const string &index_name, string &sql, string &table_name); 45 | idx_t RunPragma(string pragma_name); 46 | //! Gets the max row id of a table, returns false if the table does not have a 47 | //! rowid column 48 | bool GetMaxRowId(const string &table_name, idx_t &row_id); 49 | bool ColumnExists(const string &table_name, const string &column_name); 50 | vector GetIndexInfo(const string &table_name); 51 | 52 | bool IsOpen(); 53 | void Close(); 54 | }; 55 | 56 | } // namespace duckdb 57 | -------------------------------------------------------------------------------- /test/sql/storage/attach_alter.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_alter.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_alter.db' AS s1 (TYPE SQLITE) 9 | 10 | # add column 11 | statement ok 12 | CREATE TABLE s1.test(i INTEGER); 13 | 14 | statement ok 15 | INSERT INTO s1.test VALUES (42); 16 | 17 | statement ok 18 | ALTER TABLE s1.test ADD COLUMN j INTEGER; 19 | 20 | query II 21 | SELECT i, j FROM s1.test 22 | ---- 23 | 42 NULL 24 | 25 | # duplicate column in rename 26 | statement error 27 | ALTER TABLE s1.test RENAME j TO i 28 | ---- 29 | duplicate column name 30 | 31 | # rename column 32 | statement ok 33 | ALTER TABLE s1.test RENAME j TO k 34 | 35 | query II 36 | SELECT i, k FROM s1.test 37 | ---- 38 | 42 NULL 39 | 40 | # drop column 41 | statement ok 42 | ALTER TABLE s1.test DROP COLUMN k 43 | 44 | query I 45 | SELECT * FROM s1.test 46 | ---- 47 | 42 48 | 49 | # rename table 50 | statement ok 51 | ALTER TABLE s1.test RENAME TO test2 52 | 53 | query I 54 | SELECT * FROM s1.test2 55 | ---- 56 | 42 57 | 58 | # non-existent table 59 | statement error 60 | ALTER TABLE s1.bla ADD COLUMN j INTEGER 61 | ---- 62 | does not exist 63 | 64 | # if exists 65 | statement ok 66 | ALTER TABLE IF EXISTS s1.bla ADD COLUMN j INTEGER 67 | 68 | # drop column if not exists 69 | statement error 70 | ALTER TABLE s1.test2 DROP COLUMN z 71 | ---- 72 | no such column 73 | 74 | statement ok 75 | ALTER TABLE s1.test2 DROP COLUMN IF EXISTS z 76 | 77 | # add column if exists 78 | statement error 79 | ALTER TABLE s1.test2 ADD COLUMN i INTEGER 80 | ---- 81 | duplicate column name 82 | 83 | statement ok 84 | ALTER TABLE s1.test2 ADD COLUMN IF NOT EXISTS i INTEGER 85 | 86 | # unsupported alter table type 87 | statement error 88 | ALTER TABLE s1.test2 ALTER COLUMN i DROP NOT NULL 89 | ---- 90 | Unsupported ALTER TABLE type 91 | 92 | # rename column does not exist 93 | statement error 94 | ALTER TABLE s1.test2 RENAME zz TO i 95 | ---- 96 | no such column 97 | -------------------------------------------------------------------------------- /test/sql/storage/attach_update.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_update.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_update.db' AS s1 (TYPE SQLITE) 9 | 10 | statement ok 11 | CREATE TABLE s1.test(i INTEGER); 12 | 13 | statement ok 14 | INSERT INTO s1.test VALUES (1), (2), (3), (NULL); 15 | 16 | # global update 17 | statement ok 18 | UPDATE s1.test SET i = i + 1; 19 | 20 | query II 21 | SELECT rowid, * FROM s1.test 22 | ---- 23 | 1 2 24 | 2 3 25 | 3 4 26 | 4 NULL 27 | 28 | # update with WHERE statement 29 | statement ok 30 | UPDATE s1.test SET i = i + 100 WHERE i = 3 31 | 32 | query II 33 | SELECT rowid, * FROM s1.test 34 | ---- 35 | 1 2 36 | 2 103 37 | 3 4 38 | 4 NULL 39 | 40 | # update with NULL value 41 | statement ok 42 | UPDATE s1.test SET i = NULL WHERE i = 2 43 | 44 | query II 45 | SELECT rowid, * FROM s1.test 46 | ---- 47 | 1 NULL 48 | 2 103 49 | 3 4 50 | 4 NULL 51 | 52 | # update with DEFAULT clause 53 | statement error 54 | UPDATE s1.test SET i = DEFAULT WHERE i = 4 55 | ---- 56 | not yet supported 57 | 58 | # multi column update in different orders 59 | statement ok 60 | CREATE OR REPLACE TABLE s1.test(i INTEGER PRIMARY KEY, j INTEGER, k INTEGER); 61 | 62 | query I 63 | INSERT INTO s1.test VALUES (1, 10, 100), (2, NULL, 200), (3, 30, NULL), (4, 40, 400); 64 | ---- 65 | 4 66 | 67 | query III 68 | SELECT * FROM s1.test 69 | ---- 70 | 1 10 100 71 | 2 NULL 200 72 | 3 30 NULL 73 | 4 40 400 74 | 75 | statement ok 76 | UPDATE s1.test SET k=990 + i, i=i, j=99 WHERE i=2 OR i=4 77 | 78 | query III 79 | SELECT * FROM s1.test 80 | ---- 81 | 1 10 100 82 | 2 99 992 83 | 3 30 NULL 84 | 4 99 994 85 | 86 | # duplicates in SET statements 87 | statement error 88 | UPDATE s1.test SET j=k, j=i 89 | ---- 90 | Multiple assignments to same column 91 | 92 | # RETURNING statement 93 | statement error 94 | UPDATE s1.test SET i=42 RETURNING *; 95 | ---- 96 | not yet supported 97 | 98 | # UPDATE with join on another table 99 | # UPDATE with subquery referring -------------------------------------------------------------------------------- /test/sql/storage/attach_transactions.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_transactions.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_transactions.db' AS s (TYPE SQLITE) 9 | 10 | 11 | # roll back create table 12 | statement ok 13 | BEGIN 14 | 15 | statement ok 16 | CREATE TABLE s.test(i INTEGER); 17 | 18 | statement ok 19 | ROLLBACK 20 | 21 | statement error 22 | SELECT * FROM s.test 23 | ---- 24 | Table with name test does not exist 25 | 26 | statement ok 27 | CREATE TABLE s.test(i INTEGER); 28 | 29 | # roll back insert 30 | statement ok 31 | BEGIN 32 | 33 | statement ok 34 | INSERT INTO s.test VALUES (42) 35 | 36 | query I 37 | SELECT * FROM s.test 38 | ---- 39 | 42 40 | 41 | statement ok 42 | ROLLBACK 43 | 44 | query I 45 | SELECT * FROM s.test 46 | ---- 47 | 48 | # commit insert 49 | statement ok 50 | BEGIN 51 | 52 | statement ok 53 | INSERT INTO s.test VALUES (1), (2), (3) 54 | 55 | statement ok 56 | COMMIT 57 | 58 | query I 59 | SELECT * FROM s.test 60 | ---- 61 | 1 62 | 2 63 | 3 64 | 65 | # rollback delete 66 | statement ok 67 | BEGIN 68 | 69 | statement ok 70 | DELETE FROM s.test WHERE i=2 71 | 72 | query I 73 | SELECT * FROM s.test 74 | ---- 75 | 1 76 | 3 77 | 78 | statement ok 79 | ROLLBACK 80 | 81 | query I 82 | SELECT * FROM s.test 83 | ---- 84 | 1 85 | 2 86 | 3 87 | 88 | # rollback update 89 | statement ok 90 | BEGIN 91 | 92 | statement ok 93 | UPDATE s.test SET i=i+100 94 | 95 | query I 96 | SELECT * FROM s.test 97 | ---- 98 | 101 99 | 102 100 | 103 101 | 102 | statement ok 103 | ROLLBACK 104 | 105 | query I 106 | SELECT * FROM s.test 107 | ---- 108 | 1 109 | 2 110 | 3 111 | 112 | # rollback alter table 113 | statement ok 114 | BEGIN 115 | 116 | statement ok 117 | ALTER TABLE s.test ADD COLUMN b INTEGER 118 | 119 | query II 120 | SELECT * FROM s.test 121 | ---- 122 | 1 NULL 123 | 2 NULL 124 | 3 NULL 125 | 126 | statement ok 127 | UPDATE s.test SET b=i+100 WHERE i!=2 128 | 129 | query II 130 | SELECT * FROM s.test 131 | ---- 132 | 1 101 133 | 2 NULL 134 | 3 103 135 | 136 | statement ok 137 | ROLLBACK 138 | 139 | query I 140 | SELECT * FROM s.test 141 | ---- 142 | 1 143 | 2 144 | 3 145 | -------------------------------------------------------------------------------- /test/sql/storage/attach_constraints.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/storage/attach_constraints.test 2 | # description: 3 | # group: [sqlite_storage] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | ATTACH '__TEST_DIR__/attach_constraints.db' AS s1 (TYPE SQLITE) 9 | 10 | # FIXME: 11 | #statement ok 12 | #PRAGMA enable_verification 13 | 14 | # primary keys 15 | statement ok 16 | CREATE TABLE s1.test(i INTEGER PRIMARY KEY); 17 | 18 | statement ok 19 | INSERT INTO s1.test VALUES (42); 20 | 21 | statement error 22 | INSERT INTO s1.test VALUES (42); 23 | ---- 24 | UNIQUE constraint 25 | 26 | # SQLite accepts NULL values in primary key tables 27 | statement ok 28 | INSERT INTO s1.test VALUES (NULL); 29 | 30 | query I 31 | SELECT * FROM s1.test 32 | ---- 33 | 42 34 | NULL 35 | 36 | # unique constraint 37 | statement ok 38 | CREATE TABLE s1.test2(i INTEGER UNIQUE); 39 | 40 | statement ok 41 | INSERT INTO s1.test2 VALUES (42); 42 | 43 | statement error 44 | INSERT INTO s1.test2 VALUES (42); 45 | ---- 46 | UNIQUE constraint failed 47 | 48 | statement ok 49 | INSERT INTO s1.test2 VALUES (NULL); 50 | 51 | query I 52 | SELECT * FROM s1.test2 53 | ---- 54 | 42 55 | NULL 56 | 57 | # compound primary key/ constraints 58 | statement ok 59 | CREATE TABLE s1.test3(i INTEGER, j INTEGER, PRIMARY KEY(i, j)); 60 | 61 | statement ok 62 | INSERT INTO s1.test3 VALUES (1, 1); 63 | 64 | statement ok 65 | INSERT INTO s1.test3 VALUES (1, 2); 66 | 67 | statement error 68 | INSERT INTO s1.test3 VALUES (1, 1); 69 | ---- 70 | UNIQUE constraint failed 71 | 72 | query II 73 | SELECT * FROM s1.test3 74 | ---- 75 | 1 1 76 | 1 2 77 | 78 | # check constraints 79 | statement ok 80 | CREATE TABLE s1.test4(i INTEGER CHECK(i < 100)); 81 | 82 | statement ok 83 | INSERT INTO s1.test4 VALUES (42); 84 | 85 | statement error 86 | INSERT INTO s1.test4 VALUES (142); 87 | ---- 88 | CHECK constraint failed 89 | 90 | query I 91 | SELECT * FROM s1.test4 92 | ---- 93 | 42 94 | 95 | # NOT NULL constraints 96 | statement ok 97 | CREATE TABLE s1.test5(i INTEGER NOT NULL); 98 | 99 | statement ok 100 | INSERT INTO s1.test5 VALUES (42); 101 | 102 | statement error 103 | INSERT INTO s1.test5 VALUES (NULL); 104 | ---- 105 | NOT NULL constraint 106 | 107 | query I 108 | SELECT * FROM s1.test5 109 | ---- 110 | 42 111 | -------------------------------------------------------------------------------- /src/include/sqlite_stmt.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // sqlite_utils.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "sqlite_utils.hpp" 12 | 13 | #include 14 | 15 | namespace duckdb { 16 | struct SqliteBindData; 17 | 18 | class SQLiteStatement { 19 | public: 20 | SQLiteStatement(); 21 | SQLiteStatement(sqlite3 *db, sqlite3_stmt *stmt); 22 | ~SQLiteStatement(); 23 | // disable copy constructors 24 | SQLiteStatement(const SQLiteStatement &other) = delete; 25 | SQLiteStatement &operator=(const SQLiteStatement &) = delete; 26 | //! enable move constructors 27 | SQLiteStatement(SQLiteStatement &&other) noexcept; 28 | SQLiteStatement &operator=(SQLiteStatement &&) noexcept; 29 | 30 | sqlite3 *db; 31 | sqlite3_stmt *stmt; 32 | 33 | public: 34 | int Step(); 35 | template 36 | T GetValue(idx_t col) { 37 | throw InternalException("Unsupported type for SQLiteStatement::GetValue"); 38 | } 39 | template 40 | void Bind(idx_t col, T value) { 41 | throw InternalException("Unsupported type for SQLiteStatement::Bind"); 42 | } 43 | void BindText(idx_t col, const string_t &value); 44 | void BindBlob(idx_t col, const string_t &value); 45 | void BindValue(Vector &col, idx_t c, idx_t r); 46 | int GetType(idx_t col); 47 | bool IsOpen(); 48 | void Close(); 49 | void CheckTypeMatches(const SqliteBindData &bind_data, sqlite3_value *val, int sqlite_column_type, 50 | int expected_type, idx_t col_idx); 51 | void CheckTypeIsFloatOrInteger(sqlite3_value *val, int sqlite_column_type, idx_t col_idx); 52 | void Reset(); 53 | }; 54 | 55 | template <> 56 | string SQLiteStatement::GetValue(idx_t col); 57 | template <> 58 | int SQLiteStatement::GetValue(idx_t col); 59 | template <> 60 | int64_t SQLiteStatement::GetValue(idx_t col); 61 | template <> 62 | sqlite3_value *SQLiteStatement::GetValue(idx_t col); 63 | 64 | template <> 65 | void SQLiteStatement::Bind(idx_t col, int32_t value); 66 | template <> 67 | void SQLiteStatement::Bind(idx_t col, int64_t value); 68 | template <> 69 | void SQLiteStatement::Bind(idx_t col, double value); 70 | template <> 71 | void SQLiteStatement::Bind(idx_t col, std::nullptr_t value); 72 | 73 | } // namespace duckdb 74 | -------------------------------------------------------------------------------- /src/storage/sqlite_index.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_catalog.hpp" 2 | #include "storage/sqlite_index.hpp" 3 | #include "duckdb/parser/statement/create_statement.hpp" 4 | #include "duckdb/planner/operator/logical_extension_operator.hpp" 5 | #include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp" 6 | 7 | namespace duckdb { 8 | 9 | SQLiteCreateIndex::SQLiteCreateIndex(unique_ptr info, TableCatalogEntry &table) 10 | : PhysicalOperator(PhysicalOperatorType::EXTENSION, {LogicalType::BIGINT}, 1), info(std::move(info)), table(table) { 11 | } 12 | 13 | //===--------------------------------------------------------------------===// 14 | // Source 15 | //===--------------------------------------------------------------------===// 16 | SourceResultType SQLiteCreateIndex::GetData(ExecutionContext &context, DataChunk &chunk, 17 | OperatorSourceInput &input) const { 18 | auto &catalog = table.catalog; 19 | auto &schema = catalog.GetSchema(context.client, info->schema); 20 | auto transaction = schema.GetCatalogTransaction(context.client); 21 | schema.CreateIndex(transaction, *info, table); 22 | 23 | return SourceResultType::FINISHED; 24 | } 25 | 26 | //===--------------------------------------------------------------------===// 27 | // Logical Operator 28 | //===--------------------------------------------------------------------===// 29 | class LogicalSQLiteCreateIndex : public LogicalExtensionOperator { 30 | public: 31 | LogicalSQLiteCreateIndex(unique_ptr info_p, TableCatalogEntry &table) 32 | : info(std::move(info_p)), table(table) { 33 | } 34 | 35 | unique_ptr info; 36 | TableCatalogEntry &table; 37 | 38 | unique_ptr CreatePlan(ClientContext &context, PhysicalPlanGenerator &generator) override { 39 | return make_uniq(std::move(info), table); 40 | } 41 | 42 | void Serialize(Serializer &writer) const override { 43 | throw InternalException("Cannot serialize SQLite Create index"); 44 | } 45 | 46 | void ResolveTypes() override { 47 | types = {LogicalType::BIGINT}; 48 | } 49 | }; 50 | 51 | unique_ptr SQLiteCatalog::BindCreateIndex(Binder &binder, CreateStatement &stmt, 52 | TableCatalogEntry &table, unique_ptr plan) { 53 | return make_uniq(unique_ptr_cast(std::move(stmt.info)), 54 | table); 55 | } 56 | 57 | } // namespace duckdb 58 | -------------------------------------------------------------------------------- /src/storage/sqlite_table_entry.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_catalog.hpp" 2 | #include "storage/sqlite_table_entry.hpp" 3 | #include "storage/sqlite_transaction.hpp" 4 | #include "duckdb/storage/statistics/base_statistics.hpp" 5 | #include "sqlite_scanner.hpp" 6 | #include "duckdb/storage/table_storage_info.hpp" 7 | 8 | namespace duckdb { 9 | 10 | SQLiteTableEntry::SQLiteTableEntry(Catalog &catalog, SchemaCatalogEntry &schema, CreateTableInfo &info, 11 | bool all_varchar) 12 | : TableCatalogEntry(catalog, schema, info), all_varchar(all_varchar) { 13 | } 14 | 15 | unique_ptr SQLiteTableEntry::GetStatistics(ClientContext &context, column_t column_id) { 16 | return nullptr; 17 | } 18 | 19 | void SQLiteTableEntry::BindUpdateConstraints(Binder &, LogicalGet &, LogicalProjection &, LogicalUpdate &, 20 | ClientContext &) { 21 | } 22 | 23 | TableFunction SQLiteTableEntry::GetScanFunction(ClientContext &context, unique_ptr &bind_data) { 24 | auto result = make_uniq(); 25 | for (auto &col : columns.Logical()) { 26 | result->names.push_back(col.GetName()); 27 | result->types.push_back(col.GetType()); 28 | } 29 | auto &sqlite_catalog = catalog.Cast(); 30 | result->file_name = sqlite_catalog.path; 31 | result->table_name = name; 32 | result->all_varchar = all_varchar; 33 | 34 | auto &transaction = Transaction::Get(context, catalog).Cast(); 35 | auto &db = transaction.GetDB(); 36 | 37 | if (!db.GetMaxRowId(name, result->max_rowid)) { 38 | result->max_rowid = idx_t(-1); 39 | result->rows_per_group = idx_t(-1); 40 | } 41 | if (!transaction.IsReadOnly() || sqlite_catalog.InMemory()) { 42 | // for in-memory databases or if we have transaction-local changes we can 43 | // only do a single-threaded scan set up the transaction's connection object 44 | // as the global db 45 | result->global_db = &db; 46 | result->rows_per_group = idx_t(-1); 47 | } 48 | 49 | bind_data = std::move(result); 50 | return SqliteScanFunction(); 51 | } 52 | 53 | TableStorageInfo SQLiteTableEntry::GetStorageInfo(ClientContext &context) { 54 | auto &transaction = Transaction::Get(context, catalog).Cast(); 55 | auto &db = transaction.GetDB(); 56 | TableStorageInfo result; 57 | 58 | idx_t cardinality; 59 | if (!db.GetMaxRowId(name, cardinality)) { 60 | // probably 61 | result.cardinality = 10000; 62 | } 63 | result.cardinality = cardinality; 64 | 65 | result.index_info = db.GetIndexInfo(name); 66 | return result; 67 | } 68 | 69 | } // namespace duckdb 70 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_schema_entry.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_schema_entry.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/catalog/catalog_entry/schema_catalog_entry.hpp" 12 | 13 | namespace duckdb { 14 | class SQLiteTransaction; 15 | 16 | class SQLiteSchemaEntry : public SchemaCatalogEntry { 17 | public: 18 | SQLiteSchemaEntry(Catalog &catalog, CreateSchemaInfo &info); 19 | 20 | public: 21 | optional_ptr CreateTable(CatalogTransaction transaction, BoundCreateTableInfo &info) override; 22 | optional_ptr CreateFunction(CatalogTransaction transaction, CreateFunctionInfo &info) override; 23 | optional_ptr CreateIndex(CatalogTransaction transaction, CreateIndexInfo &info, 24 | TableCatalogEntry &table) override; 25 | optional_ptr CreateView(CatalogTransaction transaction, CreateViewInfo &info) override; 26 | optional_ptr CreateSequence(CatalogTransaction transaction, CreateSequenceInfo &info) override; 27 | optional_ptr CreateTableFunction(CatalogTransaction transaction, 28 | CreateTableFunctionInfo &info) override; 29 | optional_ptr CreateCopyFunction(CatalogTransaction transaction, 30 | CreateCopyFunctionInfo &info) override; 31 | optional_ptr CreatePragmaFunction(CatalogTransaction transaction, 32 | CreatePragmaFunctionInfo &info) override; 33 | optional_ptr CreateCollation(CatalogTransaction transaction, CreateCollationInfo &info) override; 34 | optional_ptr CreateType(CatalogTransaction transaction, CreateTypeInfo &info) override; 35 | void Alter(CatalogTransaction transaction, AlterInfo &info) override; 36 | void Scan(ClientContext &context, CatalogType type, const std::function &callback) override; 37 | void Scan(CatalogType type, const std::function &callback) override; 38 | void DropEntry(ClientContext &context, DropInfo &info) override; 39 | optional_ptr GetEntry(CatalogTransaction transaction, CatalogType type, const string &name) override; 40 | 41 | private: 42 | void AlterTable(SQLiteTransaction &transaction, RenameTableInfo &info); 43 | void AlterTable(SQLiteTransaction &transaction, RenameColumnInfo &info); 44 | void AlterTable(SQLiteTransaction &transaction, AddColumnInfo &info); 45 | void AlterTable(SQLiteTransaction &transaction, RemoveColumnInfo &info); 46 | 47 | void TryDropEntry(ClientContext &context, CatalogType catalog_type, const string &name); 48 | }; 49 | 50 | } // namespace duckdb 51 | -------------------------------------------------------------------------------- /src/include/storage/sqlite_catalog.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // DuckDB 3 | // 4 | // storage/sqlite_catalog.hpp 5 | // 6 | // 7 | //===----------------------------------------------------------------------===// 8 | 9 | #pragma once 10 | 11 | #include "duckdb/catalog/catalog.hpp" 12 | #include "sqlite_options.hpp" 13 | #include "sqlite_db.hpp" 14 | 15 | namespace duckdb { 16 | class SQLiteSchemaEntry; 17 | 18 | class SQLiteCatalog : public Catalog { 19 | public: 20 | explicit SQLiteCatalog(AttachedDatabase &db_p, const string &path, SQLiteOpenOptions options); 21 | ~SQLiteCatalog(); 22 | 23 | string path; 24 | SQLiteOpenOptions options; 25 | 26 | public: 27 | void Initialize(bool load_builtin) override; 28 | string GetCatalogType() override { 29 | return "sqlite"; 30 | } 31 | 32 | optional_ptr CreateSchema(CatalogTransaction transaction, CreateSchemaInfo &info) override; 33 | 34 | void ScanSchemas(ClientContext &context, std::function callback) override; 35 | 36 | optional_ptr GetSchema(CatalogTransaction transaction, const string &schema_name, 37 | OnEntryNotFound if_not_found, 38 | QueryErrorContext error_context = QueryErrorContext()) override; 39 | 40 | SQLiteSchemaEntry &GetMainSchema() { 41 | return *main_schema; 42 | } 43 | 44 | unique_ptr PlanInsert(ClientContext &context, LogicalInsert &op, 45 | unique_ptr plan) override; 46 | unique_ptr PlanCreateTableAs(ClientContext &context, LogicalCreateTable &op, 47 | unique_ptr plan) override; 48 | unique_ptr PlanDelete(ClientContext &context, LogicalDelete &op, 49 | unique_ptr plan) override; 50 | unique_ptr PlanUpdate(ClientContext &context, LogicalUpdate &op, 51 | unique_ptr plan) override; 52 | unique_ptr BindCreateIndex(Binder &binder, CreateStatement &stmt, TableCatalogEntry &table, 53 | unique_ptr plan) override; 54 | 55 | DatabaseSize GetDatabaseSize(ClientContext &context) override; 56 | 57 | //! Whether or not this is an in-memory SQLite database 58 | bool InMemory() override; 59 | string GetDBPath() override; 60 | 61 | //! Returns a reference to the in-memory database (if any) 62 | SQLiteDB *GetInMemoryDatabase(); 63 | //! Release the in-memory database (if there is any) 64 | void ReleaseInMemoryDatabase(); 65 | 66 | private: 67 | void DropSchema(ClientContext &context, DropInfo &info) override; 68 | 69 | private: 70 | unique_ptr main_schema; 71 | //! Whether or not the database is in-memory 72 | bool in_memory; 73 | //! In-memory database - if any 74 | SQLiteDB in_memory_db; 75 | //! The lock maintaing access to the in-memory database 76 | mutex in_memory_lock; 77 | //! Whether or not there is any active transaction on the in-memory database 78 | bool active_in_memory; 79 | }; 80 | 81 | } // namespace duckdb 82 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug report 2 | description: Create a report to help us improve 3 | body: 4 | - type: markdown 5 | attributes: 6 | value: > 7 | DuckDB has several repositories for different components, please make sure you're raising your issue in the correct one: 8 | * [Our docs/website](https://github.com/duckdb/duckdb-web/issues/new) 9 | * [Postgres scanner](https://github.com/duckdblabs/postgres_scanner/issues/new) 10 | * [DuckDB Core](https://github.com/duckdb/duckdb/issues/new) 11 | 12 | If none of the above repositories are applicable, feel free to raise it in this one 13 | 14 | - type: textarea 15 | attributes: 16 | label: What happens? 17 | description: A short, clear and concise description of what the bug is. 18 | validations: 19 | required: true 20 | 21 | - type: textarea 22 | attributes: 23 | label: To Reproduce 24 | description: Steps to reproduce the behavior. Bonus points if those are only SQL queries. 25 | validations: 26 | required: true 27 | 28 | - type: markdown 29 | attributes: 30 | value: "# Environment (please complete the following information):" 31 | - type: input 32 | attributes: 33 | label: "OS:" 34 | placeholder: e.g. iOS 35 | description: Please include operating system version and architecture (eg, aarch64, x86, x64, etc) 36 | validations: 37 | required: true 38 | - type: input 39 | attributes: 40 | label: "SQLite Version:" 41 | placeholder: e.g. 3.39.4 42 | validations: 43 | required: true 44 | - type: input 45 | attributes: 46 | label: "DuckDB Version:" 47 | placeholder: e.g. 22 48 | validations: 49 | required: true 50 | - type: input 51 | attributes: 52 | label: "DuckDB Client:" 53 | placeholder: e.g. Python 54 | validations: 55 | required: true 56 | 57 | - type: markdown 58 | attributes: 59 | value: "# Identity Disclosure:" 60 | - type: input 61 | attributes: 62 | label: "Full Name:" 63 | placeholder: e.g. John Doe 64 | validations: 65 | required: true 66 | - type: input 67 | attributes: 68 | label: "Affiliation:" 69 | placeholder: e.g. Oracle 70 | validations: 71 | required: true 72 | 73 | - type: markdown 74 | attributes: 75 | value: | 76 | If the above is not given and is not obvious from your GitHub profile page, we might close your issue without further review. Please refer to the [reasoning behind this rule](https://berthub.eu/articles/posts/anonymous-help/) if you have questions. 77 | 78 | # Before Submitting 79 | 80 | - type: checkboxes 81 | attributes: 82 | label: Have you tried this on the latest `main` branch? 83 | description: | 84 | * **Python**: `pip install duckdb --upgrade --pre` 85 | * **R**: `install.packages('duckdb', repos=c('https://duckdb.r-universe.dev', 'https://cloud.r-project.org'))` 86 | * **Other Platforms**: You can find links to binaries [here](https://duckdb.org/docs/installation/) or compile from source. 87 | 88 | options: 89 | - label: I agree 90 | required: true 91 | 92 | - type: checkboxes 93 | attributes: 94 | label: Have you tried the steps to reproduce? Do they include all relevant data and configuration? Does the issue you report still appear there? 95 | options: 96 | - label: I agree 97 | required: true 98 | -------------------------------------------------------------------------------- /src/storage/sqlite_catalog.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_catalog.hpp" 2 | #include "duckdb/parser/parsed_data/create_schema_info.hpp" 3 | #include "duckdb/storage/database_size.hpp" 4 | #include "sqlite_db.hpp" 5 | #include "storage/sqlite_schema_entry.hpp" 6 | #include "storage/sqlite_transaction.hpp" 7 | #include "duckdb/common/exception/transaction_exception.hpp" 8 | 9 | namespace duckdb { 10 | 11 | SQLiteCatalog::SQLiteCatalog(AttachedDatabase &db_p, const string &path, SQLiteOpenOptions options_p) 12 | : Catalog(db_p), path(path), options(std::move(options_p)), in_memory(path == ":memory:"), active_in_memory(false) { 13 | if (InMemory()) { 14 | in_memory_db = SQLiteDB::Open(path, options, true); 15 | } 16 | } 17 | 18 | SQLiteCatalog::~SQLiteCatalog() { 19 | } 20 | 21 | void SQLiteCatalog::Initialize(bool load_builtin) { 22 | CreateSchemaInfo info; 23 | main_schema = make_uniq(*this, info); 24 | } 25 | 26 | optional_ptr SQLiteCatalog::CreateSchema(CatalogTransaction transaction, CreateSchemaInfo &info) { 27 | throw BinderException("SQLite databases do not support creating new schemas"); 28 | } 29 | 30 | void SQLiteCatalog::ScanSchemas(ClientContext &context, std::function callback) { 31 | callback(*main_schema); 32 | } 33 | 34 | optional_ptr SQLiteCatalog::GetSchema(CatalogTransaction transaction, const string &schema_name, 35 | OnEntryNotFound if_not_found, 36 | QueryErrorContext error_context) { 37 | if (schema_name == DEFAULT_SCHEMA || schema_name == INVALID_SCHEMA) { 38 | return main_schema.get(); 39 | } 40 | if (if_not_found == OnEntryNotFound::RETURN_NULL) { 41 | return nullptr; 42 | } 43 | throw BinderException("SQLite databases only have a single schema - \"%s\"", DEFAULT_SCHEMA); 44 | } 45 | 46 | bool SQLiteCatalog::InMemory() { 47 | return in_memory; 48 | } 49 | 50 | string SQLiteCatalog::GetDBPath() { 51 | return path; 52 | } 53 | 54 | SQLiteDB *SQLiteCatalog::GetInMemoryDatabase() { 55 | if (!InMemory()) { 56 | throw InternalException("GetInMemoryDatabase() called on a non-in-memory database"); 57 | } 58 | lock_guard l(in_memory_lock); 59 | if (active_in_memory) { 60 | throw TransactionException("Only a single transaction can be active on an " 61 | "in-memory SQLite database at a time"); 62 | } 63 | active_in_memory = true; 64 | return &in_memory_db; 65 | } 66 | 67 | void SQLiteCatalog::ReleaseInMemoryDatabase() { 68 | if (!InMemory()) { 69 | return; 70 | } 71 | lock_guard l(in_memory_lock); 72 | if (!active_in_memory) { 73 | throw InternalException("ReleaseInMemoryDatabase called but there is no " 74 | "active transaction on an in-memory database"); 75 | } 76 | active_in_memory = false; 77 | } 78 | 79 | void SQLiteCatalog::DropSchema(ClientContext &context, DropInfo &info) { 80 | throw BinderException("SQLite databases do not support dropping schemas"); 81 | } 82 | 83 | DatabaseSize SQLiteCatalog::GetDatabaseSize(ClientContext &context) { 84 | DatabaseSize result; 85 | 86 | auto &transaction = SQLiteTransaction::Get(context, *this); 87 | auto &db = transaction.GetDB(); 88 | result.total_blocks = db.RunPragma("page_count"); 89 | result.block_size = db.RunPragma("page_size"); 90 | result.free_blocks = db.RunPragma("freelist_count"); 91 | result.used_blocks = result.total_blocks - result.free_blocks; 92 | result.bytes = result.total_blocks * result.block_size; 93 | result.wal_size = idx_t(-1); 94 | return result; 95 | } 96 | 97 | } // namespace duckdb 98 | -------------------------------------------------------------------------------- /src/sqlite_utils.cpp: -------------------------------------------------------------------------------- 1 | #include "sqlite_utils.hpp" 2 | 3 | namespace duckdb { 4 | 5 | void SQLiteUtils::Check(int rc, sqlite3 *db) { 6 | if (rc != SQLITE_OK) { 7 | throw std::runtime_error(string(sqlite3_errmsg(db))); 8 | } 9 | } 10 | 11 | string SQLiteUtils::TypeToString(int sqlite_type) { 12 | switch (sqlite_type) { 13 | case SQLITE_ANY: 14 | return "any"; 15 | case SQLITE_INTEGER: 16 | return "integer"; 17 | case SQLITE_TEXT: 18 | return "text"; 19 | case SQLITE_BLOB: 20 | return "blob"; 21 | case SQLITE_FLOAT: 22 | return "float"; 23 | default: 24 | return "unknown"; 25 | } 26 | } 27 | 28 | string SQLiteUtils::SanitizeString(const string &table_name) { 29 | return StringUtil::Replace(table_name, "'", "''"); 30 | } 31 | 32 | string SQLiteUtils::SanitizeIdentifier(const string &table_name) { 33 | return StringUtil::Replace(table_name, "\"", "\"\""); 34 | } 35 | 36 | LogicalType SQLiteUtils::ToSQLiteType(const LogicalType &input) { 37 | switch (input.id()) { 38 | case LogicalTypeId::BOOLEAN: 39 | case LogicalTypeId::TINYINT: 40 | case LogicalTypeId::SMALLINT: 41 | case LogicalTypeId::INTEGER: 42 | case LogicalTypeId::BIGINT: 43 | case LogicalTypeId::UTINYINT: 44 | case LogicalTypeId::USMALLINT: 45 | case LogicalTypeId::UINTEGER: 46 | return LogicalType::BIGINT; 47 | case LogicalTypeId::FLOAT: 48 | case LogicalTypeId::DOUBLE: 49 | return LogicalType::DOUBLE; 50 | case LogicalTypeId::BLOB: 51 | return LogicalType::BLOB; 52 | default: 53 | return LogicalType::VARCHAR; 54 | } 55 | } 56 | 57 | LogicalType SQLiteUtils::TypeToLogicalType(const string &sqlite_type) { 58 | // type affinity rules are taken from here: 59 | // https://www.sqlite.org/datatype3.html 60 | 61 | // If the declared type contains the string "INT" then it is assigned INTEGER 62 | // affinity. 63 | if (StringUtil::Contains(sqlite_type, "int")) { 64 | return LogicalType::BIGINT; 65 | } 66 | 67 | // boolean 68 | if (StringUtil::Contains(sqlite_type, "bool")) { 69 | return LogicalType::BIGINT; 70 | } 71 | 72 | // If the declared type of the column contains any of the strings "CHAR", 73 | // "CLOB", or "TEXT" then that column has TEXT affinity. Notice that the type 74 | // VARCHAR contains the string "CHAR" and is thus assigned TEXT affinity. 75 | if (StringUtil::Contains(sqlite_type, "char") || StringUtil::Contains(sqlite_type, "clob") || 76 | StringUtil::Contains(sqlite_type, "text")) { 77 | return LogicalType::VARCHAR; 78 | } 79 | 80 | // If the declared type for a column contains the string "BLOB" or if no type 81 | // is specified then the column has affinity BLOB. 82 | if (StringUtil::Contains(sqlite_type, "blob") || sqlite_type.empty()) { 83 | return LogicalType::BLOB; 84 | } 85 | 86 | // If the declared type for a column contains any of the strings "REAL", 87 | // "FLOA", or "DOUB" then the column has REAL affinity. 88 | if (StringUtil::Contains(sqlite_type, "real") || StringUtil::Contains(sqlite_type, "floa") || 89 | StringUtil::Contains(sqlite_type, "doub")) { 90 | return LogicalType::DOUBLE; 91 | } 92 | // Otherwise, the affinity is NUMERIC. 93 | // now numeric sounds simple, but it is rather complex: 94 | // A column with NUMERIC affinity may contain values using all five storage 95 | // classes. 96 | // ... 97 | // we add some more extra rules to try to be somewhat sane 98 | if (sqlite_type == "date") { 99 | return LogicalType::DATE; 100 | } 101 | 102 | // datetime, timestamp 103 | if (StringUtil::Contains(sqlite_type, "time")) { 104 | return LogicalType::TIMESTAMP; 105 | } 106 | 107 | // decimal, numeric 108 | if (StringUtil::Contains(sqlite_type, "dec") || StringUtil::Contains(sqlite_type, "num")) { 109 | return LogicalType::DOUBLE; 110 | } 111 | 112 | // alright, give up and fallback to varchar 113 | return LogicalType::VARCHAR; 114 | } 115 | 116 | } // namespace duckdb 117 | -------------------------------------------------------------------------------- /src/storage/sqlite_delete.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_delete.hpp" 2 | #include "storage/sqlite_table_entry.hpp" 3 | #include "duckdb/planner/operator/logical_delete.hpp" 4 | #include "duckdb/planner/expression/bound_reference_expression.hpp" 5 | #include "storage/sqlite_catalog.hpp" 6 | #include "storage/sqlite_transaction.hpp" 7 | #include "sqlite_db.hpp" 8 | #include "sqlite_stmt.hpp" 9 | 10 | namespace duckdb { 11 | 12 | SQLiteDelete::SQLiteDelete(LogicalOperator &op, TableCatalogEntry &table, idx_t row_id_index) 13 | : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(table), row_id_index(row_id_index) { 14 | } 15 | 16 | //===--------------------------------------------------------------------===// 17 | // States 18 | //===--------------------------------------------------------------------===// 19 | class SQLiteDeleteGlobalState : public GlobalSinkState { 20 | public: 21 | explicit SQLiteDeleteGlobalState(SQLiteTableEntry &table) : table(table), delete_count(0) { 22 | } 23 | 24 | SQLiteTableEntry &table; 25 | SQLiteStatement statement; 26 | idx_t delete_count; 27 | }; 28 | 29 | string GetDeleteSQL(const string &table_name) { 30 | string result; 31 | result = "DELETE FROM " + KeywordHelper::WriteOptionallyQuoted(table_name); 32 | result += " WHERE rowid = ?"; 33 | return result; 34 | } 35 | 36 | unique_ptr SQLiteDelete::GetGlobalSinkState(ClientContext &context) const { 37 | auto &sqlite_table = table.Cast(); 38 | 39 | auto &transaction = SQLiteTransaction::Get(context, sqlite_table.catalog); 40 | auto result = make_uniq(sqlite_table); 41 | result->statement = transaction.GetDB().Prepare(GetDeleteSQL(sqlite_table.name)); 42 | return std::move(result); 43 | } 44 | 45 | //===--------------------------------------------------------------------===// 46 | // Sink 47 | //===--------------------------------------------------------------------===// 48 | SinkResultType SQLiteDelete::Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const { 49 | auto &gstate = input.global_state.Cast(); 50 | 51 | chunk.Flatten(); 52 | auto &row_identifiers = chunk.data[row_id_index]; 53 | auto row_data = FlatVector::GetData(row_identifiers); 54 | for (idx_t i = 0; i < chunk.size(); i++) { 55 | gstate.statement.Bind(0, row_data[i]); 56 | gstate.statement.Step(); 57 | gstate.statement.Reset(); 58 | } 59 | gstate.delete_count += chunk.size(); 60 | return SinkResultType::NEED_MORE_INPUT; 61 | } 62 | 63 | //===--------------------------------------------------------------------===// 64 | // GetData 65 | //===--------------------------------------------------------------------===// 66 | SourceResultType SQLiteDelete::GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const { 67 | auto &insert_gstate = sink_state->Cast(); 68 | chunk.SetCardinality(1); 69 | chunk.SetValue(0, 0, Value::BIGINT(insert_gstate.delete_count)); 70 | 71 | return SourceResultType::FINISHED; 72 | } 73 | 74 | //===--------------------------------------------------------------------===// 75 | // Helpers 76 | //===--------------------------------------------------------------------===// 77 | string SQLiteDelete::GetName() const { 78 | return "DELETE"; 79 | } 80 | 81 | string SQLiteDelete::ParamsToString() const { 82 | return table.name; 83 | } 84 | 85 | //===--------------------------------------------------------------------===// 86 | // Plan 87 | //===--------------------------------------------------------------------===// 88 | unique_ptr SQLiteCatalog::PlanDelete(ClientContext &context, LogicalDelete &op, 89 | unique_ptr plan) { 90 | if (op.return_chunk) { 91 | throw BinderException("RETURNING clause not yet supported for deletion of a SQLite table"); 92 | } 93 | auto &bound_ref = op.expressions[0]->Cast(); 94 | auto insert = make_uniq(op, op.table, bound_ref.index); 95 | insert->children.push_back(std::move(plan)); 96 | return std::move(insert); 97 | } 98 | 99 | } // namespace duckdb 100 | -------------------------------------------------------------------------------- /data/sql/tpch-create.sqlite: -------------------------------------------------------------------------------- 1 | CREATE TABLE NATION ( N_NATIONKEY INTEGER NOT NULL, 2 | N_NAME CHAR(25) NOT NULL, 3 | N_REGIONKEY INTEGER NOT NULL, 4 | N_COMMENT VARCHAR(152)); 5 | 6 | CREATE TABLE REGION ( R_REGIONKEY INTEGER NOT NULL, 7 | R_NAME CHAR(25) NOT NULL, 8 | R_COMMENT VARCHAR(152)); 9 | 10 | CREATE TABLE PART ( P_PARTKEY INTEGER NOT NULL, 11 | P_NAME VARCHAR(55) NOT NULL, 12 | P_MFGR CHAR(25) NOT NULL, 13 | P_BRAND CHAR(10) NOT NULL, 14 | P_TYPE VARCHAR(25) NOT NULL, 15 | P_SIZE INTEGER NOT NULL, 16 | P_CONTAINER CHAR(10) NOT NULL, 17 | P_RETAILPRICE DECIMAL(15,2) NOT NULL, 18 | P_COMMENT VARCHAR(23) NOT NULL ); 19 | 20 | CREATE TABLE SUPPLIER ( S_SUPPKEY INTEGER NOT NULL, 21 | S_NAME CHAR(25) NOT NULL, 22 | S_ADDRESS VARCHAR(40) NOT NULL, 23 | S_NATIONKEY INTEGER NOT NULL, 24 | S_PHONE CHAR(15) NOT NULL, 25 | S_ACCTBAL DECIMAL(15,2) NOT NULL, 26 | S_COMMENT VARCHAR(101) NOT NULL); 27 | 28 | CREATE TABLE PARTSUPP ( PS_PARTKEY INTEGER NOT NULL, 29 | PS_SUPPKEY INTEGER NOT NULL, 30 | PS_AVAILQTY INTEGER NOT NULL, 31 | PS_SUPPLYCOST DECIMAL(15,2) NOT NULL, 32 | PS_COMMENT VARCHAR(199) NOT NULL ); 33 | 34 | CREATE TABLE CUSTOMER ( C_CUSTKEY INTEGER NOT NULL, 35 | C_NAME VARCHAR(25) NOT NULL, 36 | C_ADDRESS VARCHAR(40) NOT NULL, 37 | C_NATIONKEY INTEGER NOT NULL, 38 | C_PHONE CHAR(15) NOT NULL, 39 | C_ACCTBAL DECIMAL(15,2) NOT NULL, 40 | C_MKTSEGMENT CHAR(10) NOT NULL, 41 | C_COMMENT VARCHAR(117) NOT NULL); 42 | 43 | CREATE TABLE ORDERS ( O_ORDERKEY INTEGER NOT NULL, 44 | O_CUSTKEY INTEGER NOT NULL, 45 | O_ORDERSTATUS CHAR(1) NOT NULL, 46 | O_TOTALPRICE DECIMAL(15,2) NOT NULL, 47 | O_ORDERDATE DATE NOT NULL, 48 | O_ORDERPRIORITY CHAR(15) NOT NULL, 49 | O_CLERK CHAR(15) NOT NULL, 50 | O_SHIPPRIORITY INTEGER NOT NULL, 51 | O_COMMENT VARCHAR(79) NOT NULL); 52 | 53 | CREATE TABLE LINEITEM ( L_ORDERKEY INTEGER NOT NULL, 54 | L_PARTKEY INTEGER NOT NULL, 55 | L_SUPPKEY INTEGER NOT NULL, 56 | L_LINENUMBER INTEGER NOT NULL, 57 | L_QUANTITY INTEGER NOT NULL, 58 | L_EXTENDEDPRICE DECIMAL(15,2) NOT NULL, 59 | L_DISCOUNT DECIMAL(15,2) NOT NULL, 60 | L_TAX DECIMAL(15,2) NOT NULL, 61 | L_RETURNFLAG CHAR(1) NOT NULL, 62 | L_LINESTATUS CHAR(1) NOT NULL, 63 | L_SHIPDATE DATE NOT NULL, 64 | L_COMMITDATE DATE NOT NULL, 65 | L_RECEIPTDATE DATE NOT NULL, 66 | L_SHIPINSTRUCT CHAR(25) NOT NULL, 67 | L_SHIPMODE CHAR(10) NOT NULL, 68 | L_COMMENT VARCHAR(44) NOT NULL); 69 | 70 | .headers off 71 | .sep , 72 | .import nation.tbl nation 73 | .import region.tbl region 74 | .import part.tbl part 75 | .import supplier.tbl supplier 76 | .import partsupp.tbl partsupp 77 | .import customer.tbl customer 78 | .import orders.tbl orders 79 | .import lineitem.tbl lineitem 80 | -------------------------------------------------------------------------------- /src/storage/sqlite_transaction.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_transaction.hpp" 2 | #include "storage/sqlite_catalog.hpp" 3 | #include "storage/sqlite_index_entry.hpp" 4 | #include "storage/sqlite_schema_entry.hpp" 5 | #include "storage/sqlite_table_entry.hpp" 6 | #include "duckdb/parser/parsed_data/create_table_info.hpp" 7 | #include "duckdb/parser/parsed_data/create_view_info.hpp" 8 | #include "duckdb/catalog/catalog_entry/index_catalog_entry.hpp" 9 | #include "duckdb/catalog/catalog_entry/view_catalog_entry.hpp" 10 | 11 | namespace duckdb { 12 | 13 | SQLiteTransaction::SQLiteTransaction(SQLiteCatalog &sqlite_catalog, TransactionManager &manager, ClientContext &context) 14 | : Transaction(manager, context), sqlite_catalog(sqlite_catalog) { 15 | if (sqlite_catalog.InMemory()) { 16 | // in-memory database - get a reference to the in-memory connection 17 | db = sqlite_catalog.GetInMemoryDatabase(); 18 | } else { 19 | // on-disk database - open a new database connection 20 | owned_db = SQLiteDB::Open(sqlite_catalog.path, sqlite_catalog.options, true); 21 | db = &owned_db; 22 | } 23 | } 24 | 25 | SQLiteTransaction::~SQLiteTransaction() { 26 | sqlite_catalog.ReleaseInMemoryDatabase(); 27 | } 28 | 29 | void SQLiteTransaction::Start() { 30 | db->Execute("BEGIN TRANSACTION"); 31 | } 32 | void SQLiteTransaction::Commit() { 33 | db->Execute("COMMIT"); 34 | } 35 | void SQLiteTransaction::Rollback() { 36 | db->Execute("ROLLBACK"); 37 | } 38 | 39 | SQLiteDB &SQLiteTransaction::GetDB() { 40 | return *db; 41 | } 42 | 43 | SQLiteTransaction &SQLiteTransaction::Get(ClientContext &context, Catalog &catalog) { 44 | return Transaction::Get(context, catalog).Cast(); 45 | } 46 | 47 | optional_ptr SQLiteTransaction::GetCatalogEntry(const string &entry_name) { 48 | auto entry = catalog_entries.find(entry_name); 49 | if (entry != catalog_entries.end()) { 50 | return entry->second.get(); 51 | } 52 | // catalog entry not found - look up table in main SQLite database 53 | auto type = db->GetEntryType(entry_name); 54 | if (type == CatalogType::INVALID) { 55 | // no table or view found 56 | return nullptr; 57 | } 58 | unique_ptr result; 59 | switch (type) { 60 | case CatalogType::TABLE_ENTRY: { 61 | CreateTableInfo info(sqlite_catalog.GetMainSchema(), entry_name); 62 | bool all_varchar = false; 63 | Value sqlite_all_varchar; 64 | if (context.lock()->TryGetCurrentSetting("sqlite_all_varchar", sqlite_all_varchar)) { 65 | all_varchar = BooleanValue::Get(sqlite_all_varchar); 66 | } 67 | db->GetTableInfo(entry_name, info.columns, info.constraints, all_varchar); 68 | D_ASSERT(!info.columns.empty()); 69 | 70 | result = make_uniq(sqlite_catalog, sqlite_catalog.GetMainSchema(), info, all_varchar); 71 | break; 72 | } 73 | case CatalogType::VIEW_ENTRY: { 74 | string sql; 75 | db->GetViewInfo(entry_name, sql); 76 | 77 | auto view_info = CreateViewInfo::FromCreateView(*context.lock(), sql); 78 | view_info->internal = false; 79 | result = make_uniq(sqlite_catalog, sqlite_catalog.GetMainSchema(), *view_info); 80 | break; 81 | } 82 | case CatalogType::INDEX_ENTRY: { 83 | CreateIndexInfo info; 84 | info.index_name = entry_name; 85 | 86 | string table_name; 87 | string sql; 88 | db->GetIndexInfo(entry_name, sql, table_name); 89 | 90 | auto index_entry = 91 | make_uniq(sqlite_catalog, sqlite_catalog.GetMainSchema(), info, std::move(table_name)); 92 | index_entry->sql = std::move(sql); 93 | result = std::move(index_entry); 94 | break; 95 | } 96 | default: 97 | throw InternalException("Unrecognized catalog entry type"); 98 | } 99 | auto result_ptr = result.get(); 100 | catalog_entries[entry_name] = std::move(result); 101 | return result_ptr; 102 | } 103 | 104 | void SQLiteTransaction::ClearTableEntry(const string &table_name) { 105 | catalog_entries.erase(table_name); 106 | } 107 | 108 | string GetDropSQL(CatalogType type, const string &table_name, bool cascade) { 109 | string result; 110 | result = "DROP "; 111 | switch (type) { 112 | case CatalogType::TABLE_ENTRY: 113 | result += "TABLE "; 114 | break; 115 | case CatalogType::VIEW_ENTRY: 116 | result += "VIEW "; 117 | break; 118 | case CatalogType::INDEX_ENTRY: 119 | result += "INDEX "; 120 | break; 121 | default: 122 | throw InternalException("Unsupported type for drop"); 123 | } 124 | result += KeywordHelper::WriteOptionallyQuoted(table_name); 125 | return result; 126 | } 127 | 128 | void SQLiteTransaction::DropEntry(CatalogType type, const string &table_name, bool cascade) { 129 | catalog_entries.erase(table_name); 130 | db->Execute(GetDropSQL(type, table_name, cascade)); 131 | } 132 | 133 | } // namespace duckdb 134 | -------------------------------------------------------------------------------- /src/storage/sqlite_update.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_update.hpp" 2 | #include "storage/sqlite_table_entry.hpp" 3 | #include "duckdb/planner/operator/logical_update.hpp" 4 | #include "storage/sqlite_catalog.hpp" 5 | #include "storage/sqlite_transaction.hpp" 6 | #include "sqlite_db.hpp" 7 | #include "sqlite_stmt.hpp" 8 | 9 | namespace duckdb { 10 | 11 | SQLiteUpdate::SQLiteUpdate(LogicalOperator &op, TableCatalogEntry &table, vector columns_p) 12 | : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(table), columns(std::move(columns_p)) { 13 | } 14 | 15 | //===--------------------------------------------------------------------===// 16 | // States 17 | //===--------------------------------------------------------------------===// 18 | class SQLiteUpdateGlobalState : public GlobalSinkState { 19 | public: 20 | explicit SQLiteUpdateGlobalState(SQLiteTableEntry &table) : table(table), update_count(0) { 21 | } 22 | 23 | SQLiteTableEntry &table; 24 | SQLiteStatement statement; 25 | idx_t update_count; 26 | }; 27 | 28 | string GetUpdateSQL(SQLiteTableEntry &table, const vector &index) { 29 | string result; 30 | result = "UPDATE " + KeywordHelper::WriteOptionallyQuoted(table.name); 31 | result += " SET "; 32 | for (idx_t i = 0; i < index.size(); i++) { 33 | if (i > 0) { 34 | result += ", "; 35 | } 36 | auto &col = table.GetColumn(LogicalIndex(index[i].index)); 37 | result += KeywordHelper::WriteOptionallyQuoted(col.GetName()); 38 | result += " = ?"; 39 | } 40 | result += " WHERE rowid = ?"; 41 | return result; 42 | } 43 | 44 | unique_ptr SQLiteUpdate::GetGlobalSinkState(ClientContext &context) const { 45 | auto &sqlite_table = table.Cast(); 46 | 47 | auto &transaction = SQLiteTransaction::Get(context, sqlite_table.catalog); 48 | auto result = make_uniq(sqlite_table); 49 | result->statement = transaction.GetDB().Prepare(GetUpdateSQL(sqlite_table, columns)); 50 | return std::move(result); 51 | } 52 | 53 | //===--------------------------------------------------------------------===// 54 | // Sink 55 | //===--------------------------------------------------------------------===// 56 | SinkResultType SQLiteUpdate::Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const { 57 | auto &gstate = input.global_state.Cast(); 58 | 59 | chunk.Flatten(); 60 | auto &row_identifiers = chunk.data[chunk.ColumnCount() - 1]; 61 | auto row_data = FlatVector::GetData(row_identifiers); 62 | auto &stmt = gstate.statement; 63 | auto update_columns = chunk.ColumnCount() - 1; 64 | for (idx_t r = 0; r < chunk.size(); r++) { 65 | // bind the SET values 66 | for (idx_t c = 0; c < update_columns; c++) { 67 | auto &col = chunk.data[c]; 68 | stmt.BindValue(col, c, r); 69 | } 70 | // bind the row identifier 71 | stmt.Bind(update_columns, row_data[r]); 72 | stmt.Step(); 73 | stmt.Reset(); 74 | } 75 | gstate.update_count += chunk.size(); 76 | return SinkResultType::NEED_MORE_INPUT; 77 | } 78 | 79 | //===--------------------------------------------------------------------===// 80 | // GetData 81 | //===--------------------------------------------------------------------===// 82 | SourceResultType SQLiteUpdate::GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const { 83 | auto &insert_gstate = sink_state->Cast(); 84 | chunk.SetCardinality(1); 85 | chunk.SetValue(0, 0, Value::BIGINT(insert_gstate.update_count)); 86 | 87 | return SourceResultType::FINISHED; 88 | } 89 | 90 | //===--------------------------------------------------------------------===// 91 | // Helpers 92 | //===--------------------------------------------------------------------===// 93 | string SQLiteUpdate::GetName() const { 94 | return "UPDATE"; 95 | } 96 | 97 | string SQLiteUpdate::ParamsToString() const { 98 | return table.name; 99 | } 100 | 101 | //===--------------------------------------------------------------------===// 102 | // Plan 103 | //===--------------------------------------------------------------------===// 104 | unique_ptr SQLiteCatalog::PlanUpdate(ClientContext &context, LogicalUpdate &op, 105 | unique_ptr plan) { 106 | if (op.return_chunk) { 107 | throw BinderException("RETURNING clause not yet supported for updates of a SQLite table"); 108 | } 109 | for (auto &expr : op.expressions) { 110 | if (expr->type == ExpressionType::VALUE_DEFAULT) { 111 | throw BinderException("SET DEFAULT is not yet supported for updates of a SQLite table"); 112 | } 113 | } 114 | auto insert = make_uniq(op, op.table, std::move(op.columns)); 115 | insert->children.push_back(std::move(plan)); 116 | return std::move(insert); 117 | } 118 | 119 | } // namespace duckdb 120 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean format debug release duckdb_debug duckdb_release pull update wasm_mvp wasm_eh wasm_threads 2 | 3 | all: release 4 | 5 | MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) 6 | PROJ_DIR := $(dir $(MKFILE_PATH)) 7 | 8 | TEST_PATH="/test/unittest" 9 | DUCKDB_PATH="/duckdb" 10 | 11 | DUCKDB_SRCDIR ?= "./duckdb/" 12 | 13 | # For non-MinGW windows the path is slightly different 14 | ifeq ($(OS),Windows_NT) 15 | ifneq ($(CXX),g++) 16 | TEST_PATH="/test/Release/unittest.exe" 17 | DUCKDB_PATH="/Release/duckdb.exe" 18 | endif 19 | endif 20 | 21 | #### OSX config 22 | OSX_BUILD_FLAG= 23 | ifneq (${OSX_BUILD_ARCH}, "") 24 | OSX_BUILD_FLAG=-DOSX_BUILD_ARCH=${OSX_BUILD_ARCH} 25 | endif 26 | 27 | #### VCPKG config 28 | VCPKG_TOOLCHAIN_PATH?= 29 | ifneq ("${VCPKG_TOOLCHAIN_PATH}", "") 30 | TOOLCHAIN_FLAGS:=${TOOLCHAIN_FLAGS} -DVCPKG_MANIFEST_DIR='${PROJ_DIR}' -DVCPKG_BUILD=1 -DCMAKE_TOOLCHAIN_FILE='${VCPKG_TOOLCHAIN_PATH}' 31 | endif 32 | ifneq ("${VCPKG_TARGET_TRIPLET}", "") 33 | TOOLCHAIN_FLAGS:=${TOOLCHAIN_FLAGS} -DVCPKG_TARGET_TRIPLET='${VCPKG_TARGET_TRIPLET}' 34 | endif 35 | 36 | #### Enable Ninja as generator 37 | ifeq ($(GEN),ninja) 38 | GENERATOR=-G "Ninja" -DFORCE_COLORED_OUTPUT=1 39 | endif 40 | 41 | EXT_NAME=sqlite_scanner 42 | 43 | #### Configuration for this extension 44 | EXTENSION_NAME=SQLITE_SCANNER 45 | EXTENSION_FLAGS=\ 46 | -DDUCKDB_EXTENSION_NAMES="${EXT_NAME}" \ 47 | -DDUCKDB_EXTENSION_${EXTENSION_NAME}_PATH="$(PROJ_DIR)" \ 48 | -DDUCKDB_EXTENSION_${EXTENSION_NAME}_SHOULD_LINK=0 \ 49 | -DDUCKDB_EXTENSION_${EXTENSION_NAME}_LOAD_TESTS=1 \ 50 | -DDUCKDB_EXTENSION_${EXTENSION_NAME}_INCLUDE_PATH="$(PROJ_DIR)src/include" \ 51 | -DDUCKDB_EXTENSION_${EXTENSION_NAME}_TEST_PATH=$(PROJ_DIR)test \ 52 | 53 | BUILD_FLAGS=-DEXTENSION_STATIC_BUILD=1 -DBUILD_EXTENSIONS="tpch" ${OSX_BUILD_FLAG} -DDUCKDB_EXPLICIT_PLATFORM='${DUCKDB_PLATFORM}' 54 | 55 | ifeq ($(DUCKDB_PLATFORM_RTOOLS),1) 56 | BUILD_FLAGS:=${BUILD_FLAGS} -DCMAKE_CXX_FLAGS="-DDUCKDB_PLATFORM_RTOOLS=1" 57 | endif 58 | 59 | CLIENT_FLAGS := 60 | pull: 61 | git submodule init 62 | git submodule update --recursive --remote 63 | 64 | clean: 65 | rm -rf build 66 | cd duckdb && make clean 67 | 68 | # Main build 69 | debug: 70 | mkdir -p build/debug && \ 71 | cmake $(GENERATOR) $(FORCE_COLOR) $(EXTENSION_FLAGS) ${CLIENT_FLAGS} -DEXTENSION_STATIC_BUILD=1 -DCMAKE_BUILD_TYPE=Debug ${BUILD_FLAGS} -S ./duckdb/ -B build/debug && \ 72 | cmake --build build/debug --config Debug 73 | 74 | release: 75 | mkdir -p build/release && \ 76 | cmake $(GENERATOR) $(FORCE_COLOR) $(EXTENSION_FLAGS) ${CLIENT_FLAGS} -DEXTENSION_STATIC_BUILD=1 -DCMAKE_BUILD_TYPE=Release ${BUILD_FLAGS} -S ./duckdb/ -B build/release && \ 77 | cmake --build build/release --config Release 78 | 79 | data/db/tpch.db: release 80 | command -v sqlite3 || (command -v brew && brew install sqlite) || (command -v choco && choco install sqlite -y) || (command -v apt-get && apt-get install -y sqlite3) || echo "no sqlite3" 81 | ./build/release/$(DUCKDB_PATH) < data/sql/tpch-export.duckdb || tree ./build/release || echo "neither tree not duck" 82 | sqlite3 data/db/tpch.db < data/sql/tpch-create.sqlite 83 | 84 | # Main tests 85 | test: test_release 86 | test_release: release data/db/tpch.db 87 | SQLITE_TPCH_GENERATED=1 ./build/release/$(TEST_PATH) "$(PROJ_DIR)test/*" 88 | test_debug: debug data/db/tpch.db 89 | SQLITE_TPCH_GENERATED=1 ./build/debug/$(TEST_PATH) "$(PROJ_DIR)test/*" 90 | 91 | format: 92 | cp duckdb/.clang-format . 93 | find src/ -iname *.hpp -o -iname *.cpp | xargs clang-format --sort-includes=0 -style=file -i 94 | cmake-format -i CMakeLists.txt 95 | rm .clang-format 96 | 97 | update: 98 | git submodule update --remote --merge 99 | 100 | VCPKG_EMSDK_FLAGS=-DVCPKG_CHAINLOAD_TOOLCHAIN_FILE=$(EMSDK)/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake 101 | WASM_COMPILE_TIME_COMMON_FLAGS=-DWASM_LOADABLE_EXTENSIONS=1 -DBUILD_EXTENSIONS_ONLY=1 -DSKIP_EXTENSIONS="parquet;json" $(VCPKG_EMSDK_FLAGS) 102 | WASM_CXX_MVP_FLAGS= 103 | WASM_CXX_EH_FLAGS=$(WASM_CXX_MVP_FLAGS) -fwasm-exceptions -DWEBDB_FAST_EXCEPTIONS=1 104 | WASM_CXX_THREADS_FLAGS=$(WASM_COMPILE_TIME_EH_FLAGS) -DWITH_WASM_THREADS=1 -DWITH_WASM_SIMD=1 -DWITH_WASM_BULK_MEMORY=1 -pthread 105 | WASM_LINK_TIME_FLAGS=-O3 -sSIDE_MODULE=2 -sEXPORTED_FUNCTIONS="_${EXT_NAME}_version,_${EXT_NAME}_init" 106 | 107 | wasm_mvp: 108 | mkdir -p build/wasm_mvp 109 | emcmake cmake $(GENERATOR) $(EXTENSION_FLAGS) $(WASM_COMPILE_TIME_COMMON_FLAGS) -Bbuild/wasm_mvp -DCMAKE_CXX_FLAGS="$(WASM_CXX_MVP_FLAGS)" -S $(DUCKDB_SRCDIR) -DDUCKDB_EXPLICIT_PLATFORM=wasm_mvp -DDUCKDB_CUSTOM_PLATFORM=wasm_mvp 110 | emmake make -j8 -Cbuild/wasm_mvp 111 | 112 | wasm_eh: 113 | mkdir -p build/wasm_eh 114 | emcmake cmake $(GENERATOR) $(EXTENSION_FLAGS) $(WASM_COMPILE_TIME_COMMON_FLAGS) -Bbuild/wasm_eh -DCMAKE_CXX_FLAGS="$(WASM_CXX_EH_FLAGS)" -S $(DUCKDB_SRCDIR) -DDUCKDB_EXPLICIT_PLATFORM=wasm_eh -DDUCKDB_CUSTOM_PLATFORM=wasm_eh 115 | emmake make -j8 -Cbuild/wasm_eh 116 | 117 | wasm_threads: 118 | mkdir -p ./build/wasm_threads 119 | emcmake cmake $(GENERATOR) $(EXTENSION_FLAGS) $(WASM_COMPILE_TIME_COMMON_FLAGS) -Bbuild/wasm_threads -DCMAKE_CXX_FLAGS="$(WASM_CXX_THREADS_FLAGS)" -S $(DUCKDB_SRCDIR) -DDUCKDB_EXPLICIT_PLATFORM=wasm_threads -DDUCKDB_CUSTOM_PLATFORM=wasm_threads 120 | emmake make -j8 -Cbuild/wasm_threads 121 | -------------------------------------------------------------------------------- /src/sqlite_stmt.cpp: -------------------------------------------------------------------------------- 1 | #include "sqlite_stmt.hpp" 2 | #include "sqlite_db.hpp" 3 | #include "sqlite_scanner.hpp" 4 | 5 | namespace duckdb { 6 | 7 | SQLiteStatement::SQLiteStatement() : db(nullptr), stmt(nullptr) { 8 | } 9 | 10 | SQLiteStatement::SQLiteStatement(sqlite3 *db, sqlite3_stmt *stmt) : db(db), stmt(stmt) { 11 | D_ASSERT(db); 12 | } 13 | 14 | SQLiteStatement::~SQLiteStatement() { 15 | Close(); 16 | } 17 | 18 | SQLiteStatement::SQLiteStatement(SQLiteStatement &&other) noexcept { 19 | std::swap(db, other.db); 20 | std::swap(stmt, other.stmt); 21 | } 22 | 23 | SQLiteStatement &SQLiteStatement::operator=(SQLiteStatement &&other) noexcept { 24 | std::swap(db, other.db); 25 | std::swap(stmt, other.stmt); 26 | return *this; 27 | } 28 | 29 | int SQLiteStatement::Step() { 30 | D_ASSERT(db); 31 | D_ASSERT(stmt); 32 | auto rc = sqlite3_step(stmt); 33 | if (rc == SQLITE_ROW) { 34 | return true; 35 | } 36 | if (rc == SQLITE_DONE) { 37 | return false; 38 | } 39 | throw std::runtime_error(string(sqlite3_errmsg(db))); 40 | } 41 | int SQLiteStatement::GetType(idx_t col) { 42 | D_ASSERT(stmt); 43 | return sqlite3_column_type(stmt, col); 44 | } 45 | 46 | bool SQLiteStatement::IsOpen() { 47 | return stmt; 48 | } 49 | 50 | void SQLiteStatement::Close() { 51 | if (!IsOpen()) { 52 | return; 53 | } 54 | sqlite3_finalize(stmt); 55 | db = nullptr; 56 | stmt = nullptr; 57 | } 58 | 59 | void SQLiteStatement::CheckTypeMatches(const SqliteBindData &bind_data, sqlite3_value *val, int sqlite_column_type, 60 | int expected_type, idx_t col_idx) { 61 | D_ASSERT(stmt); 62 | if (bind_data.all_varchar) { 63 | // no type check required 64 | return; 65 | } 66 | if (sqlite_column_type != expected_type) { 67 | auto column_name = string(sqlite3_column_name(stmt, int(col_idx))); 68 | auto value_as_text = string((char *)sqlite3_value_text(val)); 69 | auto message = "Invalid type in column \"" + column_name + "\": column was declared as " + 70 | SQLiteUtils::TypeToString(expected_type) + ", found \"" + value_as_text + "\" of type \"" + 71 | SQLiteUtils::TypeToString(sqlite_column_type) + "\" instead."; 72 | throw Exception(ExceptionType::MISMATCH_TYPE, message); 73 | } 74 | } 75 | 76 | void SQLiteStatement::CheckTypeIsFloatOrInteger(sqlite3_value *val, int sqlite_column_type, idx_t col_idx) { 77 | if (sqlite_column_type != SQLITE_FLOAT && sqlite_column_type != SQLITE_INTEGER) { 78 | auto column_name = string(sqlite3_column_name(stmt, int(col_idx))); 79 | auto value_as_text = string((const char *)sqlite3_value_text(val)); 80 | auto message = "Invalid type in column \"" + column_name + "\": expected float or integer, found \"" + 81 | value_as_text + "\" of type \"" + SQLiteUtils::TypeToString(sqlite_column_type) + "\" instead."; 82 | throw Exception(ExceptionType::MISMATCH_TYPE, message); 83 | } 84 | } 85 | 86 | void SQLiteStatement::Reset() { 87 | SQLiteUtils::Check(sqlite3_reset(stmt), db); 88 | } 89 | 90 | template <> 91 | string SQLiteStatement::GetValue(idx_t col) { 92 | D_ASSERT(stmt); 93 | auto ptr = sqlite3_column_text(stmt, col); 94 | if (!ptr) { 95 | return string(); 96 | } 97 | return string((char *)ptr); 98 | } 99 | 100 | template <> 101 | int SQLiteStatement::GetValue(idx_t col) { 102 | D_ASSERT(stmt); 103 | return sqlite3_column_int(stmt, col); 104 | } 105 | 106 | template <> 107 | int64_t SQLiteStatement::GetValue(idx_t col) { 108 | D_ASSERT(stmt); 109 | return sqlite3_column_int64(stmt, col); 110 | } 111 | 112 | template <> 113 | sqlite3_value *SQLiteStatement::GetValue(idx_t col) { 114 | D_ASSERT(stmt); 115 | return sqlite3_column_value(stmt, col); 116 | } 117 | 118 | template <> 119 | void SQLiteStatement::Bind(idx_t col, int32_t value) { 120 | SQLiteUtils::Check(sqlite3_bind_int(stmt, col + 1, value), db); 121 | } 122 | 123 | template <> 124 | void SQLiteStatement::Bind(idx_t col, int64_t value) { 125 | SQLiteUtils::Check(sqlite3_bind_int64(stmt, col + 1, value), db); 126 | } 127 | 128 | template <> 129 | void SQLiteStatement::Bind(idx_t col, double value) { 130 | SQLiteUtils::Check(sqlite3_bind_double(stmt, col + 1, value), db); 131 | } 132 | 133 | void SQLiteStatement::BindBlob(idx_t col, const string_t &value) { 134 | SQLiteUtils::Check(sqlite3_bind_blob(stmt, col + 1, value.GetDataUnsafe(), value.GetSize(), nullptr), db); 135 | } 136 | 137 | void SQLiteStatement::BindText(idx_t col, const string_t &value) { 138 | SQLiteUtils::Check(sqlite3_bind_text(stmt, col + 1, value.GetDataUnsafe(), value.GetSize(), nullptr), db); 139 | } 140 | 141 | template <> 142 | void SQLiteStatement::Bind(idx_t col, std::nullptr_t value) { 143 | SQLiteUtils::Check(sqlite3_bind_null(stmt, col + 1), db); 144 | } 145 | 146 | void SQLiteStatement::BindValue(Vector &col, idx_t c, idx_t r) { 147 | auto &mask = FlatVector::Validity(col); 148 | if (!mask.RowIsValid(r)) { 149 | Bind(c, nullptr); 150 | } else { 151 | switch (col.GetType().id()) { 152 | case LogicalTypeId::BIGINT: 153 | Bind(c, FlatVector::GetData(col)[r]); 154 | break; 155 | case LogicalTypeId::DOUBLE: 156 | Bind(c, FlatVector::GetData(col)[r]); 157 | break; 158 | case LogicalTypeId::BLOB: 159 | BindBlob(c, FlatVector::GetData(col)[r]); 160 | break; 161 | case LogicalTypeId::VARCHAR: 162 | BindText(c, FlatVector::GetData(col)[r]); 163 | break; 164 | default: 165 | throw InternalException("Unsupported type \"%s\" for SQLite::BindValue", col.GetType()); 166 | } 167 | } 168 | } 169 | 170 | } // namespace duckdb 171 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DuckDB SQLite extension 2 | 3 | The SQLite extension allows DuckDB to directly read and write data from a SQLite database file. The data can be queried directly from the underlying SQLite tables. Data can be loaded from SQLite tables into DuckDB tables, or vice versa. 4 | 5 | ## Reading Data from SQLite 6 | 7 | To make a SQLite file accessible to DuckDB, use the `ATTACH` command, for example with the bundled `sakila.db` file: 8 | 9 | ```sql 10 | ATTACH 'data/db/sakila.db' AS sakila; 11 | USE sakila; 12 | ``` 13 | 14 | The tables in the file can be read as if they were normal DuckDB tables, but the underlying data is read directly from the SQLite tables in the file at query time. 15 | 16 | ```sql 17 | SHOW TABLES; 18 | ``` 19 | 20 | You can query the tables using SQL, e.g. using the example queries from sakila-examples.sql 21 | 22 | ```sql 23 | SELECT cat.name category_name, 24 | Sum(Ifnull(pay.amount, 0)) revenue 25 | FROM category cat 26 | LEFT JOIN film_category flm_cat 27 | ON cat.category_id = flm_cat.category_id 28 | LEFT JOIN film fil 29 | ON flm_cat.film_id = fil.film_id 30 | LEFT JOIN inventory inv 31 | ON fil.film_id = inv.film_id 32 | LEFT JOIN rental ren 33 | ON inv.inventory_id = ren.inventory_id 34 | LEFT JOIN payment pay 35 | ON ren.rental_id = pay.rental_id 36 | GROUP BY cat.name 37 | ORDER BY revenue DESC 38 | LIMIT 5; 39 | ``` 40 | 41 | ## Opening SQLite Databases Directly 42 | 43 | SQLite databases can also be opened directly and can be used transparently instead of a DuckDB database file. In any client, when connecting, a path to a SQLite database file can be provided and the SQLite database will be opened instead. 44 | 45 | For example, with the shell: 46 | 47 | ```sql 48 | $ > duckdb data/db/sakila.db 49 | v0.9.1 401c8061c6 50 | D SHOW tables; 51 | ┌────────────────────────┐ 52 | │ name │ 53 | │ varchar │ 54 | ├────────────────────────┤ 55 | │ actor │ 56 | │ address │ 57 | │ category │ 58 | │ city │ 59 | │ country │ 60 | │ customer │ 61 | │ customer_list │ 62 | │ film │ 63 | │ film_actor │ 64 | │ film_category │ 65 | │ film_list │ 66 | │ film_text │ 67 | │ inventory │ 68 | │ language │ 69 | │ payment │ 70 | │ rental │ 71 | │ sales_by_film_category │ 72 | │ sales_by_store │ 73 | │ staff │ 74 | │ staff_list │ 75 | │ store │ 76 | ├────────────────────────┤ 77 | │ 21 rows │ 78 | └────────────────────────┘ 79 | ``` 80 | 81 | ## Writing Data to SQLite 82 | 83 | In addition to reading data from SQLite, the extension also allows you to create new SQLite database files, create tables, ingest data into SQLite and make other modifications to SQLite database files using standard SQL queries. 84 | 85 | This allows you to use DuckDB to, for example, export data that is stored in a SQLite database to Parquet, or read data from a Parquet file into SQLite. 86 | 87 | Below is a brief example of how to create a new SQLite database and load data into it. 88 | 89 | ```sql 90 | ATTACH 'new_sqlite_database.db' AS sqlite_db (TYPE SQLITE); 91 | CREATE TABLE sqlite_db.tbl(id INTEGER, name VARCHAR); 92 | INSERT INTO sqlite_db.tbl VALUES (42, 'DuckDB'); 93 | ``` 94 | 95 | The resulting SQLite database can then be read into from SQLite. 96 | 97 | ```sql 98 | $r > sqlite3 new_sqlite_database.db 99 | SQLite version 3.39.5 2022-10-14 20:58:05 100 | sqlite> SELECT * FROM tbl; 101 | id name 102 | -- ------ 103 | 42 DuckDB 104 | ``` 105 | 106 | Many operations on SQLite tables are supported. All these operations directly modify the SQLite database, and the result of subsequent operations can then be read using SQLite. 107 | 108 | Below is a list of supported operations. 109 | 110 | ###### CREATE TABLE 111 | ```sql 112 | CREATE TABLE sqlite_db.tbl(id INTEGER, name VARCHAR); 113 | ``` 114 | 115 | ###### INSERT INTO 116 | ```sql 117 | INSERT INTO sqlite_db.tbl VALUES (42, 'DuckDB'); 118 | ``` 119 | 120 | ###### SELECT 121 | ```sql 122 | SELECT * FROM sqlite_db.tbl; 123 | ┌───────┬─────────┐ 124 | │ id │ name │ 125 | │ int64 │ varchar │ 126 | ├───────┼─────────┤ 127 | │ 42 │ DuckDB │ 128 | └───────┴─────────┘ 129 | ``` 130 | 131 | ###### COPY 132 | ```sql 133 | COPY sqlite_db.tbl TO 'data.parquet'; 134 | COPY sqlite_db.tbl FROM 'data.parquet'; 135 | ``` 136 | 137 | ###### UPDATE 138 | ```sql 139 | UPDATE sqlite_db.tbl SET name='Woohoo' WHERE id=42; 140 | ``` 141 | 142 | ###### DELETE 143 | ```sql 144 | DELETE FROM sqlite_db.tbl WHERE id=42; 145 | ``` 146 | 147 | ###### ALTER TABLE 148 | ```sql 149 | ALTER TABLE sqlite_db.tbl ADD COLUMN k INTEGER; 150 | ``` 151 | 152 | ###### DROP TABLE 153 | ```sql 154 | DROP TABLE sqlite_db.tbl; 155 | ``` 156 | 157 | ###### CREATE VIEW 158 | ```sql 159 | CREATE VIEW sqlite_db.v1 AS SELECT 42; 160 | ``` 161 | 162 | ###### Transactions 163 | ```sql 164 | CREATE TABLE sqlite_db.tmp(i INTEGER); 165 | BEGIN; 166 | INSERT INTO sqlite_db.tmp VALUES (42); 167 | SELECT * FROM sqlite_db.tmp; 168 | ┌───────┐ 169 | │ i │ 170 | │ int64 │ 171 | ├───────┤ 172 | │ 42 │ 173 | └───────┘ 174 | ROLLBACK; 175 | SELECT * FROM sqlite_db.tmp; 176 | ┌────────┐ 177 | │ i │ 178 | │ int64 │ 179 | ├────────┤ 180 | │ 0 rows │ 181 | └────────┘ 182 | ``` 183 | 184 | ## Building & Loading the Extension 185 | 186 | To build, type 187 | ``` 188 | make 189 | ``` 190 | 191 | To run, run the bundled `duckdb` shell: 192 | ``` 193 | ./build/release/duckdb -unsigned 194 | ``` 195 | 196 | Then, load the SQLite extension like so: 197 | ```SQL 198 | LOAD 'build/release/extension/sqlite_scanner/sqlite_scanner.duckdb_extension'; 199 | ``` 200 | 201 | -------------------------------------------------------------------------------- /test/sql/scanner/chinook.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/scanner/chinook.test 2 | # description: Test chinook database 3 | # group: [sqlite_scanner] 4 | 5 | require sqlite_scanner 6 | 7 | statement ok 8 | CALL sqlite_attach('data/db/chinook.db'); 9 | 10 | query III 11 | SELECT * FROM albums LIMIT 10; 12 | ---- 13 | 1 For Those About To Rock We Salute You 1 14 | 2 Balls to the Wall 2 15 | 3 Restless and Wild 2 16 | 4 Let There Be Rock 1 17 | 5 Big Ones 3 18 | 6 Jagged Little Pill 4 19 | 7 Facelift 5 20 | 8 Warner 25 Anos 6 21 | 9 Plays Metallica By Four Cellos 7 22 | 10 Audioslave 8 23 | 24 | query II 25 | SELECT * FROM artists LIMIT 10; 26 | ---- 27 | 1 AC/DC 28 | 2 Accept 29 | 3 Aerosmith 30 | 4 Alanis Morissette 31 | 5 Alice In Chains 32 | 6 Antônio Carlos Jobim 33 | 7 Apocalyptica 34 | 8 Audioslave 35 | 9 BackBeat 36 | 10 Billy Cobham 37 | 38 | query IIIIIIIIIIIII 39 | SELECT * FROM customers LIMIT 10; 40 | ---- 41 | 1 Luís Gonçalves Embraer - Empresa Brasileira de Aeronáutica S.A. Av. Brigadeiro Faria Lima, 2170 São José dos Campos SP Brazil 12227-000 +55 (12) 3923-5555 +55 (12) 3923-5566 luisg@embraer.com.br 3 42 | 2 Leonie Köhler NULL Theodor-Heuss-Straße 34 Stuttgart NULL Germany 70174 +49 0711 2842222 NULL leonekohler@surfeu.de 5 43 | 3 François Tremblay NULL 1498 rue Bélanger Montréal QC Canada H2G 1A7 +1 (514) 721-4711 NULL ftremblay@gmail.com 3 44 | 4 Bjørn Hansen NULL Ullevålsveien 14 Oslo NULL Norway 0171 +47 22 44 22 22 NULL bjorn.hansen@yahoo.no 4 45 | 5 František Wichterlová JetBrains s.r.o. Klanova 9/506 Prague NULL Czech Republic 14700 +420 2 4172 5555 +420 2 4172 5555 frantisekw@jetbrains.com 4 46 | 6 Helena Holý NULL Rilská 3174/6 Prague NULL Czech Republic 14300 +420 2 4177 0449 NULL hholy@gmail.com 5 47 | 7 Astrid Gruber NULL Rotenturmstraße 4, 1010 Innere Stadt Vienne NULL Austria 1010 +43 01 5134505 NULL astrid.gruber@apple.at 5 48 | 8 Daan Peeters NULL Grétrystraat 63 Brussels NULL Belgium 1000 +32 02 219 03 03 NULL daan_peeters@apple.be 4 49 | 9 Kara Nielsen NULL Sønder Boulevard 51 Copenhagen NULL Denmark 1720 +453 3331 9991 NULL kara.nielsen@jubii.dk 4 50 | 10 Eduardo Martins Woodstock Discos Rua Dr. Falcão Filho, 155 São Paulo SP Brazil 01007-010 +55 (11) 3033-5446 +55 (11) 3033-4564 eduardo@woodstock.com.br 4 51 | 52 | query IIIIIIIIIIIIIII 53 | SELECT * FROM employees LIMIT 10; 54 | ---- 55 | 1 Adams Andrew General Manager NULL 1962-02-18 00:00:00 2002-08-14 00:00:00 11120 Jasper Ave NW Edmonton AB Canada T5K 2N1 +1 (780) 428-9482 +1 (780) 428-3457 andrew@chinookcorp.com 56 | 2 Edwards Nancy Sales Manager 1 1958-12-08 00:00:00 2002-05-01 00:00:00 825 8 Ave SW Calgary AB Canada T2P 2T3 +1 (403) 262-3443 +1 (403) 262-3322 nancy@chinookcorp.com 57 | 3 Peacock Jane Sales Support Agent 2 1973-08-29 00:00:00 2002-04-01 00:00:00 1111 6 Ave SW Calgary AB Canada T2P 5M5 +1 (403) 262-3443 +1 (403) 262-6712 jane@chinookcorp.com 58 | 4 Park Margaret Sales Support Agent 2 1947-09-19 00:00:00 2003-05-03 00:00:00 683 10 Street SW Calgary AB Canada T2P 5G3 +1 (403) 263-4423 +1 (403) 263-4289 margaret@chinookcorp.com 59 | 5 Johnson Steve Sales Support Agent 2 1965-03-03 00:00:00 2003-10-17 00:00:00 7727B 41 Ave Calgary AB Canada T3B 1Y7 1 (780) 836-9987 1 (780) 836-9543 steve@chinookcorp.com 60 | 6 Mitchell Michael IT Manager 1 1973-07-01 00:00:00 2003-10-17 00:00:00 5827 Bowness Road NW Calgary AB Canada T3B 0C5 +1 (403) 246-9887 +1 (403) 246-9899 michael@chinookcorp.com 61 | 7 King Robert IT Staff 6 1970-05-29 00:00:00 2004-01-02 00:00:00 590 Columbia Boulevard West Lethbridge AB Canada T1K 5N8 +1 (403) 456-9986 +1 (403) 456-8485 robert@chinookcorp.com 62 | 8 Callahan Laura IT Staff 6 1968-01-09 00:00:00 2004-03-04 00:00:00 923 7 ST NW Lethbridge AB Canada T1H 1Y8 +1 (403) 467-3351 +1 (403) 467-8772 laura@chinookcorp.com 63 | 64 | query II 65 | SELECT * FROM genres LIMIT 10; 66 | ---- 67 | 1 Rock 68 | 2 Jazz 69 | 3 Metal 70 | 4 Alternative & Punk 71 | 5 Rock And Roll 72 | 6 Blues 73 | 7 Latin 74 | 8 Reggae 75 | 9 Pop 76 | 10 Soundtrack 77 | 78 | query IIIIIIIII 79 | SELECT * FROM invoices LIMIT 10; 80 | ---- 81 | 1 2 2009-01-01 00:00:00 Theodor-Heuss-Straße 34 Stuttgart NULL Germany 70174 1.98 82 | 2 4 2009-01-02 00:00:00 Ullevålsveien 14 Oslo NULL Norway 0171 3.96 83 | 3 8 2009-01-03 00:00:00 Grétrystraat 63 Brussels NULL Belgium 1000 5.94 84 | 4 14 2009-01-06 00:00:00 8210 111 ST NW Edmonton AB Canada T6G 2C7 8.91 85 | 5 23 2009-01-11 00:00:00 69 Salem Street Boston MA USA 2113 13.86 86 | 6 37 2009-01-19 00:00:00 Berger Straße 10 Frankfurt NULL Germany 60316 0.99 87 | 7 38 2009-02-01 00:00:00 Barbarossastraße 19 Berlin NULL Germany 10779 1.98 88 | 8 40 2009-02-01 00:00:00 8, Rue Hanovre Paris NULL France 75002 1.98 89 | 9 42 2009-02-02 00:00:00 9, Place Louis Barthou Bordeaux NULL France 33000 3.96 90 | 10 46 2009-02-03 00:00:00 3 Chatham Street Dublin Dublin Ireland NULL 5.94 91 | 92 | query IIIII 93 | SELECT * FROM invoice_items LIMIT 10; 94 | ---- 95 | 1 1 2 0.99 1 96 | 2 1 4 0.99 1 97 | 3 2 6 0.99 1 98 | 4 2 8 0.99 1 99 | 5 2 10 0.99 1 100 | 6 2 12 0.99 1 101 | 7 3 16 0.99 1 102 | 8 3 20 0.99 1 103 | 9 3 24 0.99 1 104 | 10 3 28 0.99 1 105 | 106 | query II 107 | SELECT * FROM media_types LIMIT 10; 108 | ---- 109 | 1 MPEG audio file 110 | 2 Protected AAC audio file 111 | 3 Protected MPEG-4 video file 112 | 4 Purchased AAC audio file 113 | 5 AAC audio file 114 | 115 | query II 116 | SELECT * FROM playlists LIMIT 10; 117 | ---- 118 | 1 Music 119 | 2 Movies 120 | 3 TV Shows 121 | 4 Audiobooks 122 | 5 90’s Music 123 | 6 Audiobooks 124 | 7 Movies 125 | 8 Music 126 | 9 Music Videos 127 | 10 TV Shows 128 | 129 | 130 | query II 131 | SELECT * FROM playlist_track LIMIT 10; 132 | ---- 133 | 1 3402 134 | 1 3389 135 | 1 3390 136 | 1 3391 137 | 1 3392 138 | 1 3393 139 | 1 3394 140 | 1 3395 141 | 1 3396 142 | 1 3397 143 | 144 | query IIIIIIIII 145 | SELECT * FROM tracks LIMIT 10; 146 | ---- 147 | 1 For Those About To Rock (We Salute You) 1 1 1 Angus Young, Malcolm Young, Brian Johnson 343719 11170334 0.99 148 | 2 Balls to the Wall 2 2 1 NULL 342562 5510424 0.99 149 | 3 Fast As a Shark 3 2 1 F. Baltes, S. Kaufman, U. Dirkscneider & W. Hoffman 230619 3990994 0.99 150 | 4 Restless and Wild 3 2 1 F. Baltes, R.A. Smith-Diesel, S. Kaufman, U. Dirkscneider & W. Hoffman 252051 4331779 0.99 151 | 5 Princess of the Dawn 3 2 1 Deaffy & R.A. Smith-Diesel 375418 6290521 0.99 152 | 6 Put The Finger On You 1 1 1 Angus Young, Malcolm Young, Brian Johnson 205662 6713451 0.99 153 | 7 Let's Get It Up 1 1 1 Angus Young, Malcolm Young, Brian Johnson 233926 7636561 0.99 154 | 8 Inject The Venom 1 1 1 Angus Young, Malcolm Young, Brian Johnson 210834 6852860 0.99 155 | 9 Snowballed 1 1 1 Angus Young, Malcolm Young, Brian Johnson 203102 6599424 0.99 156 | 10 Evil Walks 1 1 1 Angus Young, Malcolm Young, Brian Johnson 263497 8611245 0.99 157 | -------------------------------------------------------------------------------- /src/storage/sqlite_insert.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_insert.hpp" 2 | #include "storage/sqlite_catalog.hpp" 3 | #include "storage/sqlite_transaction.hpp" 4 | #include "duckdb/planner/operator/logical_insert.hpp" 5 | #include "duckdb/planner/operator/logical_create_table.hpp" 6 | #include "storage/sqlite_table_entry.hpp" 7 | #include "duckdb/planner/parsed_data/bound_create_table_info.hpp" 8 | #include "duckdb/execution/operator/projection/physical_projection.hpp" 9 | #include "duckdb/planner/expression/bound_cast_expression.hpp" 10 | #include "duckdb/planner/expression/bound_reference_expression.hpp" 11 | #include "sqlite_db.hpp" 12 | #include "sqlite_stmt.hpp" 13 | 14 | namespace duckdb { 15 | 16 | SQLiteInsert::SQLiteInsert(LogicalOperator &op, TableCatalogEntry &table, 17 | physical_index_vector_t column_index_map_p) 18 | : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(&table), schema(nullptr), 19 | column_index_map(std::move(column_index_map_p)) { 20 | } 21 | 22 | SQLiteInsert::SQLiteInsert(LogicalOperator &op, SchemaCatalogEntry &schema, unique_ptr info) 23 | : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(nullptr), schema(&schema), 24 | info(std::move(info)) { 25 | } 26 | 27 | //===--------------------------------------------------------------------===// 28 | // States 29 | //===--------------------------------------------------------------------===// 30 | class SQLiteInsertGlobalState : public GlobalSinkState { 31 | public: 32 | explicit SQLiteInsertGlobalState(ClientContext &context, SQLiteTableEntry *table) : insert_count(0) { 33 | } 34 | 35 | SQLiteTableEntry *table; 36 | SQLiteStatement statement; 37 | idx_t insert_count; 38 | }; 39 | 40 | string GetInsertSQL(const SQLiteInsert &insert, SQLiteTableEntry *entry) { 41 | string result; 42 | result = "INSERT INTO " + KeywordHelper::WriteOptionallyQuoted(entry->name); 43 | auto &columns = entry->GetColumns(); 44 | idx_t column_count; 45 | if (!insert.column_index_map.empty()) { 46 | column_count = 0; 47 | result += " ("; 48 | vector column_indexes; 49 | column_indexes.resize(columns.LogicalColumnCount(), PhysicalIndex(DConstants::INVALID_INDEX)); 50 | for (idx_t c = 0; c < insert.column_index_map.size(); c++) { 51 | auto column_index = PhysicalIndex(c); 52 | auto mapped_index = insert.column_index_map[column_index]; 53 | if (mapped_index == DConstants::INVALID_INDEX) { 54 | // column not specified 55 | continue; 56 | } 57 | column_indexes[mapped_index] = column_index; 58 | column_count++; 59 | } 60 | for (idx_t c = 0; c < column_count; c++) { 61 | if (c > 0) { 62 | result += ", "; 63 | } 64 | auto &col = columns.GetColumn(column_indexes[c]); 65 | result += KeywordHelper::WriteOptionallyQuoted(col.GetName()); 66 | } 67 | result += ")"; 68 | } else { 69 | column_count = columns.LogicalColumnCount(); 70 | } 71 | result += " VALUES ("; 72 | for (idx_t i = 0; i < column_count; i++) { 73 | if (i > 0) { 74 | result += ", "; 75 | } 76 | result += "?"; 77 | } 78 | result += ");"; 79 | return result; 80 | } 81 | 82 | unique_ptr SQLiteInsert::GetGlobalSinkState(ClientContext &context) const { 83 | SQLiteTableEntry *insert_table; 84 | if (!table) { 85 | auto &schema_ref = *schema.get_mutable(); 86 | insert_table = 87 | &schema_ref.CreateTable(schema_ref.GetCatalogTransaction(context), *info)->Cast(); 88 | } else { 89 | insert_table = &table.get_mutable()->Cast(); 90 | } 91 | auto &transaction = SQLiteTransaction::Get(context, insert_table->catalog); 92 | auto result = make_uniq(context, insert_table); 93 | result->statement = transaction.GetDB().Prepare(GetInsertSQL(*this, insert_table)); 94 | return std::move(result); 95 | } 96 | 97 | //===--------------------------------------------------------------------===// 98 | // Sink 99 | //===--------------------------------------------------------------------===// 100 | SinkResultType SQLiteInsert::Sink(ExecutionContext &context, DataChunk &chunk, OperatorSinkInput &input) const { 101 | auto &gstate = sink_state->Cast(); 102 | chunk.Flatten(); 103 | auto &stmt = gstate.statement; 104 | for (idx_t r = 0; r < chunk.size(); r++) { 105 | for (idx_t c = 0; c < chunk.ColumnCount(); c++) { 106 | auto &col = chunk.data[c]; 107 | stmt.BindValue(col, c, r); 108 | } 109 | // execute and clear bindings 110 | stmt.Step(); 111 | stmt.Reset(); 112 | } 113 | gstate.insert_count += chunk.size(); 114 | return SinkResultType::NEED_MORE_INPUT; 115 | } 116 | 117 | //===--------------------------------------------------------------------===// 118 | // GetData 119 | //===--------------------------------------------------------------------===// 120 | SourceResultType SQLiteInsert::GetData(ExecutionContext &context, DataChunk &chunk, OperatorSourceInput &input) const { 121 | auto &insert_gstate = sink_state->Cast(); 122 | chunk.SetCardinality(1); 123 | chunk.SetValue(0, 0, Value::BIGINT(insert_gstate.insert_count)); 124 | 125 | return SourceResultType::FINISHED; 126 | } 127 | 128 | //===--------------------------------------------------------------------===// 129 | // Helpers 130 | //===--------------------------------------------------------------------===// 131 | string SQLiteInsert::GetName() const { 132 | return table ? "INSERT" : "CREATE_TABLE_AS"; 133 | } 134 | 135 | string SQLiteInsert::ParamsToString() const { 136 | return table ? table->name : info->Base().table; 137 | } 138 | 139 | //===--------------------------------------------------------------------===// 140 | // Plan 141 | //===--------------------------------------------------------------------===// 142 | unique_ptr AddCastToSQLiteTypes(ClientContext &context, unique_ptr plan) { 143 | // check if we need to cast anything 144 | bool require_cast = false; 145 | auto &child_types = plan->GetTypes(); 146 | for (auto &type : child_types) { 147 | auto sqlite_type = SQLiteUtils::ToSQLiteType(type); 148 | if (sqlite_type != type) { 149 | require_cast = true; 150 | break; 151 | } 152 | } 153 | if (require_cast) { 154 | vector sqlite_types; 155 | vector> select_list; 156 | for (idx_t i = 0; i < child_types.size(); i++) { 157 | auto &type = child_types[i]; 158 | unique_ptr expr; 159 | expr = make_uniq(type, i); 160 | 161 | auto sqlite_type = SQLiteUtils::ToSQLiteType(type); 162 | if (sqlite_type != type) { 163 | // add a cast 164 | expr = BoundCastExpression::AddCastToType(context, std::move(expr), sqlite_type); 165 | } 166 | sqlite_types.push_back(std::move(sqlite_type)); 167 | select_list.push_back(std::move(expr)); 168 | } 169 | // we need to cast: add casts 170 | auto proj = 171 | make_uniq(std::move(sqlite_types), std::move(select_list), plan->estimated_cardinality); 172 | proj->children.push_back(std::move(plan)); 173 | plan = std::move(proj); 174 | } 175 | 176 | return plan; 177 | } 178 | 179 | unique_ptr SQLiteCatalog::PlanInsert(ClientContext &context, LogicalInsert &op, 180 | unique_ptr plan) { 181 | if (op.return_chunk) { 182 | throw BinderException("RETURNING clause not yet supported for insertion into SQLite table"); 183 | } 184 | if (op.action_type != OnConflictAction::THROW) { 185 | throw BinderException("ON CONFLICT clause not yet supported for insertion into SQLite table"); 186 | } 187 | 188 | plan = AddCastToSQLiteTypes(context, std::move(plan)); 189 | 190 | auto insert = make_uniq(op, op.table, op.column_index_map); 191 | insert->children.push_back(std::move(plan)); 192 | return std::move(insert); 193 | } 194 | 195 | unique_ptr SQLiteCatalog::PlanCreateTableAs(ClientContext &context, LogicalCreateTable &op, 196 | unique_ptr plan) { 197 | plan = AddCastToSQLiteTypes(context, std::move(plan)); 198 | 199 | auto insert = make_uniq(op, op.schema, std::move(op.info)); 200 | insert->children.push_back(std::move(plan)); 201 | return std::move(insert); 202 | } 203 | 204 | } // namespace duckdb 205 | -------------------------------------------------------------------------------- /src/sqlite_db.cpp: -------------------------------------------------------------------------------- 1 | #include "duckdb/parser/constraints/not_null_constraint.hpp" 2 | #include "duckdb/parser/constraints/unique_constraint.hpp" 3 | #include "duckdb/parser/expression/constant_expression.hpp" 4 | #include "duckdb/storage/table_storage_info.hpp" 5 | #include "duckdb/parser/column_list.hpp" 6 | #include "duckdb/parser/parser.hpp" 7 | #include "sqlite_db.hpp" 8 | #include "sqlite_stmt.hpp" 9 | 10 | namespace duckdb { 11 | 12 | SQLiteDB::SQLiteDB() : db(nullptr) { 13 | } 14 | 15 | SQLiteDB::SQLiteDB(sqlite3 *db) : db(db) { 16 | } 17 | 18 | SQLiteDB::~SQLiteDB() { 19 | Close(); 20 | } 21 | 22 | SQLiteDB::SQLiteDB(SQLiteDB &&other) noexcept { 23 | std::swap(db, other.db); 24 | } 25 | 26 | SQLiteDB &SQLiteDB::operator=(SQLiteDB &&other) noexcept { 27 | std::swap(db, other.db); 28 | return *this; 29 | } 30 | 31 | SQLiteDB SQLiteDB::Open(const string &path, const SQLiteOpenOptions &options, bool is_shared) { 32 | SQLiteDB result; 33 | int flags = SQLITE_OPEN_PRIVATECACHE; 34 | if (options.access_mode == AccessMode::READ_ONLY) { 35 | flags |= SQLITE_OPEN_READONLY; 36 | } else { 37 | flags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; 38 | } 39 | if (!is_shared) { 40 | // FIXME: we should just make sure we are not re-using the same `sqlite3` 41 | // object across threads 42 | flags |= SQLITE_OPEN_NOMUTEX; 43 | } 44 | flags |= SQLITE_OPEN_EXRESCODE; 45 | auto rc = sqlite3_open_v2(path.c_str(), &result.db, flags, nullptr); 46 | if (rc != SQLITE_OK) { 47 | throw std::runtime_error("Unable to open database \"" + path + "\": " + string(sqlite3_errstr(rc))); 48 | } 49 | // default busy time-out of 5 seconds 50 | if (options.busy_timeout > 0) { 51 | if (options.busy_timeout > NumericLimits::Maximum()) { 52 | throw std::runtime_error("busy_timeout out of range - must be within valid range for type int"); 53 | } 54 | rc = sqlite3_busy_timeout(result.db, int(options.busy_timeout)); 55 | if (rc != SQLITE_OK) { 56 | throw std::runtime_error("Failed to set busy timeout"); 57 | } 58 | } 59 | if (!options.journal_mode.empty()) { 60 | result.Execute("PRAGMA journal_mode=" + KeywordHelper::EscapeQuotes(options.journal_mode, '\'')); 61 | } 62 | return result; 63 | } 64 | 65 | bool SQLiteDB::TryPrepare(const string &query, SQLiteStatement &stmt) { 66 | stmt.db = db; 67 | auto rc = sqlite3_prepare_v2(db, query.c_str(), -1, &stmt.stmt, nullptr); 68 | if (rc != SQLITE_OK) { 69 | return false; 70 | } 71 | return true; 72 | } 73 | 74 | SQLiteStatement SQLiteDB::Prepare(const string &query) { 75 | SQLiteStatement stmt; 76 | if (!TryPrepare(query, stmt)) { 77 | string error = "Failed to prepare query \"" + query + "\": " + string(sqlite3_errmsg(db)); 78 | throw std::runtime_error(error); 79 | } 80 | return stmt; 81 | } 82 | 83 | void SQLiteDB::Execute(const string &query) { 84 | auto rc = sqlite3_exec(db, query.c_str(), nullptr, nullptr, nullptr); 85 | if (rc != SQLITE_OK) { 86 | string error = "Failed to execute query \"" + query + "\": " + string(sqlite3_errmsg(db)); 87 | throw std::runtime_error(error); 88 | } 89 | } 90 | 91 | bool SQLiteDB::IsOpen() { 92 | return db; 93 | } 94 | 95 | void SQLiteDB::Close() { 96 | if (!IsOpen()) { 97 | return; 98 | } 99 | auto rc = sqlite3_close_v2(db); 100 | if (rc == SQLITE_BUSY) { 101 | throw InternalException("Failed to close database - SQLITE_BUSY"); 102 | } 103 | db = nullptr; 104 | } 105 | 106 | vector SQLiteDB::GetEntries(string entry_type) { 107 | vector result; 108 | SQLiteStatement stmt = Prepare("SELECT name FROM sqlite_master WHERE type='" + entry_type + "'"); 109 | while (stmt.Step()) { 110 | auto table_name = stmt.GetValue(0); 111 | result.push_back(std::move(table_name)); 112 | } 113 | return result; 114 | } 115 | 116 | vector SQLiteDB::GetTables() { 117 | return GetEntries("table"); 118 | } 119 | 120 | CatalogType SQLiteDB::GetEntryType(const string &name) { 121 | SQLiteStatement stmt; 122 | stmt = Prepare(StringUtil::Format("SELECT type FROM sqlite_master WHERE lower(name)=lower('%s');", 123 | SQLiteUtils::SanitizeString(name))); 124 | while (stmt.Step()) { 125 | auto type = stmt.GetValue(0); 126 | if (type == "table") { 127 | return CatalogType::TABLE_ENTRY; 128 | } else if (type == "view") { 129 | return CatalogType::VIEW_ENTRY; 130 | } else if (type == "index") { 131 | return CatalogType::INDEX_ENTRY; 132 | } else { 133 | throw InternalException("Unrecognized SQLite type \"%s\"", name); 134 | } 135 | } 136 | return CatalogType::INVALID; 137 | } 138 | 139 | void SQLiteDB::GetIndexInfo(const string &index_name, string &sql, string &table_name) { 140 | SQLiteStatement stmt; 141 | stmt = Prepare(StringUtil::Format("SELECT tbl_name, sql FROM sqlite_master WHERE lower(name)=lower('%s');", 142 | SQLiteUtils::SanitizeString(index_name))); 143 | while (stmt.Step()) { 144 | table_name = stmt.GetValue(0); 145 | sql = stmt.GetValue(1); 146 | return; 147 | } 148 | throw InternalException("GetViewInfo - index \"%s\" not found", index_name); 149 | } 150 | 151 | void SQLiteDB::GetViewInfo(const string &view_name, string &sql) { 152 | SQLiteStatement stmt; 153 | stmt = Prepare(StringUtil::Format("SELECT sql FROM sqlite_master WHERE lower(name)=lower('%s');", 154 | SQLiteUtils::SanitizeString(view_name))); 155 | while (stmt.Step()) { 156 | sql = stmt.GetValue(0); 157 | return; 158 | } 159 | throw InternalException("GetViewInfo - view \"%s\" not found", view_name); 160 | } 161 | 162 | void SQLiteDB::GetTableInfo(const string &table_name, ColumnList &columns, vector> &constraints, 163 | bool all_varchar) { 164 | SQLiteStatement stmt; 165 | 166 | idx_t primary_key_index = idx_t(-1); 167 | vector primary_keys; 168 | 169 | bool found = false; 170 | 171 | stmt = Prepare(StringUtil::Format("PRAGMA table_info('%s')", SQLiteUtils::SanitizeString(table_name))); 172 | while (stmt.Step()) { 173 | auto cid = stmt.GetValue(0); 174 | auto sqlite_colname = stmt.GetValue(1); 175 | auto sqlite_type = StringUtil::Lower(stmt.GetValue(2)); 176 | auto not_null = stmt.GetValue(3); 177 | auto default_value = stmt.GetValue(4); 178 | auto pk = stmt.GetValue(5); 179 | StringUtil::Trim(sqlite_type); 180 | auto column_type = all_varchar ? LogicalType::VARCHAR : SQLiteUtils::TypeToLogicalType(sqlite_type); 181 | 182 | if (pk) { 183 | primary_key_index = cid; 184 | primary_keys.push_back(sqlite_colname); 185 | } 186 | ColumnDefinition column(std::move(sqlite_colname), std::move(column_type)); 187 | if (!default_value.empty() && default_value != "\"\"") { 188 | auto expressions = Parser::ParseExpressionList(default_value); 189 | if (expressions.empty()) { 190 | throw InternalException("Expression list is empty"); 191 | } 192 | column.SetDefaultValue(std::move(expressions[0])); 193 | } 194 | columns.AddColumn(std::move(column)); 195 | if (not_null) { 196 | constraints.push_back(make_uniq(LogicalIndex(cid))); 197 | } 198 | found = true; 199 | } 200 | if (!found) { 201 | throw InternalException("GetTableInfo - table \"%s\" not found", table_name); 202 | } 203 | if (!primary_keys.empty()) { 204 | if (primary_keys.size() == 1) { 205 | constraints.push_back(make_uniq(LogicalIndex(primary_key_index), true)); 206 | } else { 207 | constraints.push_back(make_uniq(std::move(primary_keys), true)); 208 | } 209 | } 210 | } 211 | 212 | bool SQLiteDB::ColumnExists(const string &table_name, const string &column_name) { 213 | SQLiteStatement stmt; 214 | 215 | stmt = Prepare(StringUtil::Format("PRAGMA table_info(\"%s\")", SQLiteUtils::SanitizeIdentifier(table_name))); 216 | while (stmt.Step()) { 217 | auto sqlite_colname = stmt.GetValue(1); 218 | if (sqlite_colname == column_name) { 219 | return true; 220 | } 221 | } 222 | return false; 223 | } 224 | 225 | bool SQLiteDB::GetMaxRowId(const string &table_name, idx_t &max_row_id) { 226 | SQLiteStatement stmt; 227 | if (!TryPrepare(StringUtil::Format("SELECT MAX(ROWID) FROM \"%s\"", SQLiteUtils::SanitizeIdentifier(table_name)), 228 | stmt)) { 229 | return false; 230 | } 231 | if (!stmt.Step()) { 232 | return false; 233 | } 234 | int64_t val = stmt.GetValue(0); 235 | ; 236 | if (val <= 0) { 237 | return false; 238 | } 239 | max_row_id = idx_t(val); 240 | return true; 241 | } 242 | 243 | vector SQLiteDB::GetIndexInfo(const string &table_name) { 244 | vector info; 245 | // fetch the primary key 246 | SQLiteStatement stmt; 247 | stmt = Prepare(StringUtil::Format("SELECT cid FROM pragma_table_info('%s') WHERE pk", 248 | SQLiteUtils::SanitizeString(table_name))); 249 | IndexInfo pk_index; 250 | while (stmt.Step()) { 251 | auto cid = stmt.GetValue(0); 252 | pk_index.column_set.insert(cid); 253 | } 254 | if (!pk_index.column_set.empty()) { 255 | // we have a pk - add it 256 | pk_index.is_primary = true; 257 | pk_index.is_unique = true; 258 | pk_index.is_foreign = false; 259 | info.push_back(std::move(pk_index)); 260 | } 261 | 262 | // now query the set of unique constraints for the table 263 | stmt = Prepare(StringUtil::Format("SELECT name FROM pragma_index_list('%s') " 264 | "WHERE \"unique\" AND origin='u'", 265 | SQLiteUtils::SanitizeString(table_name))); 266 | vector unique_indexes; 267 | while (stmt.Step()) { 268 | auto index_name = stmt.GetValue(0); 269 | unique_indexes.push_back(index_name); 270 | } 271 | for (auto &index_name : unique_indexes) { 272 | stmt = Prepare( 273 | StringUtil::Format("SELECT cid FROM pragma_index_info('%s')", SQLiteUtils::SanitizeString(index_name))); 274 | IndexInfo unique_index; 275 | while (stmt.Step()) { 276 | auto cid = stmt.GetValue(0); 277 | unique_index.column_set.insert(cid); 278 | } 279 | if (!unique_index.column_set.empty()) { 280 | // we have a pk - add it 281 | unique_index.is_primary = false; 282 | unique_index.is_unique = true; 283 | unique_index.is_foreign = false; 284 | info.push_back(std::move(unique_index)); 285 | } 286 | } 287 | return info; 288 | } 289 | 290 | idx_t SQLiteDB::RunPragma(string pragma_name) { 291 | SQLiteStatement stmt; 292 | stmt = Prepare("PRAGMA " + pragma_name); 293 | while (stmt.Step()) { 294 | return idx_t(stmt.GetValue(0)); 295 | } 296 | throw InternalException("No result returned from pragma " + pragma_name); 297 | } 298 | 299 | } // namespace duckdb 300 | -------------------------------------------------------------------------------- /data/sql/sakila-examples.sql: -------------------------------------------------------------------------------- 1 | LOAD 'build/release/sqlite_scanner.duckdb_extension'; 2 | CALL sqlite_attach('sakila.db'); 3 | 4 | -- 1a. Display the first and last names of all actors from the table actor. 5 | 6 | select first_name, last_name 7 | from actor; 8 | 9 | -- 1b. Display the first and last name of each actor in a single column in upper case letters. Name the column Actor Name. 10 | 11 | select upper(concat(first_name, ' ', last_name)) 'Actor Name' 12 | from actor; 13 | 14 | -- 2a. You need to find the ID number, first name, and last name of an actor, of whom you know only the first name, "Joe." What is one query would you use to obtain this information? 15 | 16 | select actor_id, first_name, last_name 17 | from actor 18 | where lower(first_name) = lower('Joe'); 19 | 20 | -- 2b. Find all actors whose last name contain the letters GEN: 21 | 22 | select * 23 | from actor 24 | where upper(last_name) like '%GEN%'; 25 | 26 | -- 2c. Find all actors whose last names contain the letters LI. This time, order the rows by last name and first name, in that order: 27 | 28 | select * 29 | from actor 30 | where upper(last_name) like '%LI%' 31 | order by last_name, first_name; 32 | 33 | -- 2d. Using IN, display the country_id and country columns of the following countries: Afghanistan, Bangladesh, and China: 34 | 35 | select country_id, country 36 | from country 37 | where country in ('Afghanistan', 'Bangladesh', 'China'); 38 | 39 | -- 3a. You want to keep a description of each actor. You don't think you will be performing queries on a description, so create a column in the table actor named description and use the data type BLOB (Make sure to research the type BLOB, as the difference between it and VARCHAR are significant). 40 | 41 | select * from actor; 42 | 43 | ALTER TABLE actor 44 | ADD COLUMN description BLOB; 45 | 46 | select * from actor; 47 | 48 | -- 3b. Very quickly you realize that entering descriptions for each actor is too much effort. Delete the description column. 49 | 50 | select * from actor; 51 | 52 | alter table actor 53 | drop column description; 54 | 55 | select * from actor; 56 | 57 | -- 4a. List the last names of actors, as well as how many actors have that last name. 58 | 59 | select last_name, count(*) actor_count 60 | from actor 61 | group by last_name 62 | order by actor_count desc, last_name; 63 | 64 | -- 4b. List last names of actors and the number of actors who have that last name, but only for names that are shared by at least two actors 65 | 66 | select last_name, count(*) actor_count 67 | from actor 68 | group by last_name 69 | having actor_count >1 70 | order by actor_count desc, last_name; 71 | 72 | -- 4c. The actor HARPO WILLIAMS was accidentally entered in the actor table as GROUCHO WILLIAMS. Write a query to fix the record. 73 | 74 | select * from actor where first_name = 'GROUCHO' and last_name = 'WILLIAMS'; 75 | 76 | update actor set first_name = 'HARPO', last_name = 'WILLIAMS' where first_name = 'GROUCHO' and last_name = 'WILLIAMS'; 77 | 78 | select * from actor where last_name = 'WILLIAMS'; 79 | 80 | -- 4d. Perhaps we were too hasty in changing GROUCHO to HARPO. It turns out that GROUCHO was the correct name after all! In a single query, if the first name of the actor is currently HARPO, change it to GROUCHO. 81 | 82 | update actor set first_name = 'GROUCHO', last_name = 'WILLIAMS' where first_name = 'HARPO' and last_name = 'WILLIAMS'; 83 | 84 | select * from actor where last_name = 'WILLIAMS'; 85 | 86 | -- 5a. You cannot locate the schema of the address table. Which query would you use to re-create it? 87 | 88 | SHOW CREATE TABLE address; 89 | 90 | -- 6a. Use JOIN to display the first and last names, as well as the address, of each staff member. Use the tables staff and address: 91 | 92 | select stf.first_name, stf.last_name, adr.address, adr.district, adr.postal_code, adr.city_id 93 | from staff stf 94 | left join address adr 95 | on stf.address_id = adr.address_id; 96 | 97 | -- 6b. Use JOIN to display the total amount rung up by each staff member in August of 2005. Use tables staff and payment. 98 | 99 | select stf.first_name, stf.last_name, sum(pay.amount) 100 | from staff stf 101 | left join payment pay 102 | on stf.staff_id = pay.staff_id 103 | WHERE month(pay.payment_date) = 8 104 | and year(pay.payment_date) = 2005 105 | group by stf.first_name, stf.last_name; 106 | 107 | -- 6c. List each film and the number of actors who are listed for that film. Use tables film_actor and film. Use inner join. 108 | 109 | select flm.title, count(*) number_of_actors 110 | from film flm 111 | inner join film_actor fim_act 112 | on flm.film_id = fim_act.film_id 113 | group by flm.title 114 | order by number_of_actors desc; 115 | 116 | -- 6d. How many copies of the film Hunchback Impossible exist in the inventory system? 117 | 118 | select flm.title, count(*) number_in_inventory 119 | from film flm 120 | inner join inventory inv 121 | on flm.film_id = inv.film_id 122 | where lower(flm.title) = lower('Hunchback Impossible') 123 | group by flm.title; 124 | 125 | -- 6e. Using the tables payment and customer and the JOIN command, list the total paid by each customer. List the customers alphabetically by last name: 126 | 127 | select cust.first_name, cust.last_name, sum(pay.amount) "Total Amount Paid" 128 | from payment pay 129 | join customer cust 130 | on pay.customer_id = cust.customer_id 131 | group by cust.first_name, cust.last_name 132 | order by cust.last_name; 133 | 134 | -- 7a. The music of Queen and Kris Kristofferson have seen an unlikely resurgence. As an unintended consequence, films starting with the letters K and Q have also soared in popularity. Use subqueries to display the titles of movies starting with the letters K and Q whose language is English. 135 | 136 | select title 137 | from film 138 | where (title like 'K%' or title like 'Q%') 139 | and language_id in ( 140 | select language_id 141 | from language 142 | where name = 'English' 143 | ) 144 | order by title; 145 | 146 | -- 7b. Use subqueries to display all actors who appear in the film Alone Trip. 147 | 148 | select first_name, last_name 149 | from actor 150 | where actor_id in ( 151 | select actor_id 152 | from film_actor 153 | where film_id in ( 154 | select film_id from film where lower(title) = lower('Alone Trip') 155 | ) 156 | ); 157 | 158 | -- 7c. You want to run an email marketing campaign in Canada, for which you will need the names and email addresses of all Canadian customers. Use joins to retrieve this information. 159 | 160 | -- Subquery 161 | select first_name, last_name, email 162 | from customer 163 | where address_id in ( 164 | select address_id 165 | from address 166 | where city_id in ( 167 | select city_id 168 | from city 169 | where country_id in ( 170 | select country_id 171 | from country 172 | where country = 'Canada' 173 | ) 174 | ) 175 | ); 176 | 177 | -- Join 178 | 179 | select cus.first_name, cus.last_name, cus.email 180 | from customer cus 181 | join address adr 182 | on cus.address_id = adr.address_id 183 | join city cit 184 | on adr.city_id = cit.city_id 185 | join country cou 186 | on cit.country_id = cou.country_id 187 | where cou.country = 'Canada'; 188 | 189 | -- 7d. Sales have been lagging among young families, and you wish to target all family movies for a promotion. Identify all movies categorized as family films. 190 | 191 | select film_id, title, release_year 192 | from film 193 | where film_id in ( 194 | select film_id 195 | from film_category 196 | where category_id in ( 197 | select category_id 198 | from category 199 | where name = 'Family' 200 | ) 201 | ); 202 | 203 | -- 7e. Display the most frequently rented movies in descending order. 204 | 205 | select A.film_id, A.title, B.* 206 | from film A 207 | join ( 208 | select inv.film_id, count(ren.rental_id) times_rented 209 | from rental ren 210 | join inventory inv 211 | on ren.inventory_id = inv.inventory_id 212 | group by inv.film_id 213 | ) B 214 | on A.film_id = B.film_id 215 | order by B.times_rented desc; 216 | 217 | -- 7f. Write a query to display how much business, in dollars, each store brought in. 218 | 219 | select A.store_id, B.sales 220 | from store A 221 | join ( 222 | select cus.store_id, sum(pay.amount) sales 223 | from customer cus 224 | join payment pay 225 | on pay.customer_id = cus.customer_id 226 | group by cus.store_id 227 | ) B 228 | on A.store_id = B.store_id 229 | order by a.store_id; 230 | 231 | -- 7g. Write a query to display for each store its store ID, city, and country. 232 | 233 | select sto.store_id, cit.city, cou.country 234 | from store sto 235 | left join address adr 236 | on sto.address_id = adr.address_id 237 | join city cit 238 | on adr.city_id = cit.city_id 239 | join country cou 240 | on cit.country_id = cou.country_id; 241 | 242 | select A.*, B.sales 243 | from ( 244 | select sto.store_id, cit.city, cou.country 245 | from store sto 246 | left join address adr 247 | on sto.address_id = adr.address_id 248 | join city cit 249 | on adr.city_id = cit.city_id 250 | join country cou 251 | on cit.country_id = cou.country_id 252 | ) A 253 | join ( 254 | select cus.store_id, sum(pay.amount) sales 255 | from customer cus 256 | join payment pay 257 | on pay.customer_id = cus.customer_id 258 | group by cus.store_id 259 | ) B 260 | on A.store_id = B.store_id 261 | order by a.store_id; 262 | 263 | -- 7h. List the top five genres in gross revenue in descending order. (Hint: you may need to use the following tables: category, film_category, inventory, payment, and rental.) 264 | 265 | select cat.name category_name, sum( IFNULL(pay.amount, 0) ) revenue 266 | from category cat 267 | left join film_category flm_cat 268 | on cat.category_id = flm_cat.category_id 269 | left join film fil 270 | on flm_cat.film_id = fil.film_id 271 | left join inventory inv 272 | on fil.film_id = inv.film_id 273 | left join rental ren 274 | on inv.inventory_id = ren.inventory_id 275 | left join payment pay 276 | on ren.rental_id = pay.rental_id 277 | group by cat.name 278 | order by revenue desc 279 | limit 5; 280 | 281 | -- 8a. In your new role as an executive, you would like to have an easy way of viewing the Top five genres by gross revenue. Use the solution from the problem above to create a view. If you haven't solved 7h, you can substitute another query to create a view. 282 | 283 | create view top_five_genres as 284 | select cat.name category_name, sum( IFNULL(pay.amount, 0) ) revenue 285 | from category cat 286 | left join film_category flm_cat 287 | on cat.category_id = flm_cat.category_id 288 | left join film fil 289 | on flm_cat.film_id = fil.film_id 290 | left join inventory inv 291 | on fil.film_id = inv.film_id 292 | left join rental ren 293 | on inv.inventory_id = ren.inventory_id 294 | left join payment pay 295 | on ren.rental_id = pay.rental_id 296 | group by cat.name 297 | order by revenue desc 298 | limit 5; 299 | 300 | -- 8b. How would you display the view that you created in 8a? 301 | 302 | select * from top_five_genres; 303 | 304 | -- 8c. You find that you no longer need the view top_five_genres. Write a query to delete it. 305 | 306 | drop view top_five_genres; -------------------------------------------------------------------------------- /src/sqlite_scanner.cpp: -------------------------------------------------------------------------------- 1 | #include "duckdb.hpp" 2 | 3 | #include "sqlite_db.hpp" 4 | #include "sqlite_stmt.hpp" 5 | #include "sqlite_scanner.hpp" 6 | #include 7 | #include "duckdb/parser/parser.hpp" 8 | #include "duckdb/parser/expression/cast_expression.hpp" 9 | #include "duckdb/common/types/date.hpp" 10 | #include "duckdb/common/types/timestamp.hpp" 11 | #include "duckdb/storage/table/row_group.hpp" 12 | #include "duckdb/main/client_context.hpp" 13 | #include "duckdb/main/config.hpp" 14 | #include "duckdb/storage/storage_extension.hpp" 15 | 16 | #include 17 | 18 | namespace duckdb { 19 | 20 | struct SqliteLocalState : public LocalTableFunctionState { 21 | SQLiteDB *db; 22 | SQLiteDB owned_db; 23 | SQLiteStatement stmt; 24 | bool done = false; 25 | vector column_ids; 26 | 27 | ~SqliteLocalState() { 28 | } 29 | }; 30 | 31 | struct SqliteGlobalState : public GlobalTableFunctionState { 32 | SqliteGlobalState(idx_t max_threads) : max_threads(max_threads) { 33 | } 34 | 35 | mutex lock; 36 | idx_t position = 0; 37 | idx_t max_threads; 38 | 39 | idx_t MaxThreads() const override { 40 | return max_threads; 41 | } 42 | }; 43 | 44 | static unique_ptr SqliteBind(ClientContext &context, TableFunctionBindInput &input, 45 | vector &return_types, vector &names) { 46 | 47 | auto result = make_uniq(); 48 | result->file_name = input.inputs[0].GetValue(); 49 | result->table_name = input.inputs[1].GetValue(); 50 | 51 | SQLiteDB db; 52 | SQLiteStatement stmt; 53 | SQLiteOpenOptions options; 54 | options.access_mode = AccessMode::READ_ONLY; 55 | db = SQLiteDB::Open(result->file_name, options); 56 | 57 | ColumnList columns; 58 | vector> constraints; 59 | 60 | result->all_varchar = false; 61 | Value sqlite_all_varchar; 62 | if (context.TryGetCurrentSetting("sqlite_all_varchar", sqlite_all_varchar)) { 63 | result->all_varchar = BooleanValue::Get(sqlite_all_varchar); 64 | } 65 | db.GetTableInfo(result->table_name, columns, constraints, result->all_varchar); 66 | for (auto &column : columns.Logical()) { 67 | names.push_back(column.GetName()); 68 | return_types.push_back(column.GetType()); 69 | } 70 | 71 | if (names.empty()) { 72 | throw std::runtime_error("no columns for table " + result->table_name); 73 | } 74 | 75 | if (!db.GetMaxRowId(result->table_name, result->max_rowid)) { 76 | result->max_rowid = idx_t(-1); 77 | result->rows_per_group = idx_t(-1); 78 | } 79 | 80 | result->names = names; 81 | result->types = return_types; 82 | 83 | return std::move(result); 84 | } 85 | 86 | static void SqliteInitInternal(ClientContext &context, const SqliteBindData &bind_data, SqliteLocalState &local_state, 87 | idx_t rowid_min, idx_t rowid_max) { 88 | D_ASSERT(rowid_min <= rowid_max); 89 | 90 | local_state.done = false; 91 | // we may have leftover statements or connections from a previous call to this 92 | // function 93 | local_state.stmt.Close(); 94 | if (!local_state.db) { 95 | SQLiteOpenOptions options; 96 | options.access_mode = AccessMode::READ_ONLY; 97 | local_state.owned_db = SQLiteDB::Open(bind_data.file_name.c_str(), options); 98 | local_state.db = &local_state.owned_db; 99 | } 100 | 101 | auto col_names = StringUtil::Join( 102 | local_state.column_ids.data(), local_state.column_ids.size(), ", ", [&](const idx_t column_id) { 103 | return column_id == (column_t)-1 ? "ROWID" 104 | : '"' + SQLiteUtils::SanitizeIdentifier(bind_data.names[column_id]) + '"'; 105 | }); 106 | 107 | auto sql = 108 | StringUtil::Format("SELECT %s FROM \"%s\"", col_names, SQLiteUtils::SanitizeIdentifier(bind_data.table_name)); 109 | if (bind_data.rows_per_group != idx_t(-1)) { 110 | // we are scanning a subset of the rows - generate a WHERE clause based on 111 | // the rowid 112 | auto where_clause = StringUtil::Format(" WHERE ROWID BETWEEN %d AND %d", rowid_min, rowid_max); 113 | sql += where_clause; 114 | } else { 115 | // we are scanning the entire table - no need for a WHERE clause 116 | D_ASSERT(rowid_min == 0); 117 | } 118 | local_state.stmt = local_state.db->Prepare(sql.c_str()); 119 | } 120 | 121 | static unique_ptr SqliteCardinality(ClientContext &context, const FunctionData *bind_data_p) { 122 | D_ASSERT(bind_data_p); 123 | auto &bind_data = bind_data_p->Cast(); 124 | return make_uniq(bind_data.max_rowid); 125 | } 126 | 127 | static idx_t SqliteMaxThreads(ClientContext &context, const FunctionData *bind_data_p) { 128 | D_ASSERT(bind_data_p); 129 | auto &bind_data = bind_data_p->Cast(); 130 | if (bind_data.global_db) { 131 | return 1; 132 | } 133 | return bind_data.max_rowid / bind_data.rows_per_group; 134 | } 135 | 136 | static bool SqliteParallelStateNext(ClientContext &context, const SqliteBindData &bind_data, SqliteLocalState &lstate, 137 | SqliteGlobalState &gstate) { 138 | lock_guard parallel_lock(gstate.lock); 139 | if (gstate.position < bind_data.max_rowid) { 140 | auto start = gstate.position; 141 | auto end = start + bind_data.rows_per_group - 1; 142 | SqliteInitInternal(context, bind_data, lstate, start, end); 143 | gstate.position = end + 1; 144 | return true; 145 | } 146 | return false; 147 | } 148 | 149 | static unique_ptr 150 | SqliteInitLocalState(ExecutionContext &context, TableFunctionInitInput &input, GlobalTableFunctionState *global_state) { 151 | auto &bind_data = input.bind_data->Cast(); 152 | auto &gstate = global_state->Cast(); 153 | auto result = make_uniq(); 154 | result->column_ids = input.column_ids; 155 | result->db = bind_data.global_db; 156 | if (!SqliteParallelStateNext(context.client, bind_data, *result, gstate)) { 157 | result->done = true; 158 | } 159 | return std::move(result); 160 | } 161 | 162 | static unique_ptr SqliteInitGlobalState(ClientContext &context, 163 | TableFunctionInitInput &input) { 164 | auto result = make_uniq(SqliteMaxThreads(context, input.bind_data.get())); 165 | result->position = 0; 166 | return std::move(result); 167 | } 168 | 169 | static void SqliteScan(ClientContext &context, TableFunctionInput &data, DataChunk &output) { 170 | auto &state = data.local_state->Cast(); 171 | auto &gstate = data.global_state->Cast(); 172 | auto &bind_data = data.bind_data->Cast(); 173 | 174 | while (output.size() == 0) { 175 | if (state.done) { 176 | if (!SqliteParallelStateNext(context, bind_data, state, gstate)) { 177 | return; 178 | } 179 | } 180 | 181 | idx_t out_idx = 0; 182 | while (true) { 183 | if (out_idx == STANDARD_VECTOR_SIZE) { 184 | output.SetCardinality(out_idx); 185 | return; 186 | } 187 | auto &stmt = state.stmt; 188 | auto has_more = stmt.Step(); 189 | if (!has_more) { 190 | state.done = true; 191 | output.SetCardinality(out_idx); 192 | break; 193 | } 194 | for (idx_t col_idx = 0; col_idx < output.ColumnCount(); col_idx++) { 195 | auto &out_vec = output.data[col_idx]; 196 | auto sqlite_column_type = stmt.GetType(col_idx); 197 | if (sqlite_column_type == SQLITE_NULL) { 198 | auto &mask = FlatVector::Validity(out_vec); 199 | mask.Set(out_idx, false); 200 | continue; 201 | } 202 | 203 | auto val = stmt.GetValue(col_idx); 204 | switch (out_vec.GetType().id()) { 205 | case LogicalTypeId::BIGINT: 206 | stmt.CheckTypeMatches(bind_data, val, sqlite_column_type, SQLITE_INTEGER, col_idx); 207 | FlatVector::GetData(out_vec)[out_idx] = sqlite3_value_int64(val); 208 | break; 209 | case LogicalTypeId::DOUBLE: 210 | stmt.CheckTypeIsFloatOrInteger(val, sqlite_column_type, col_idx); 211 | FlatVector::GetData(out_vec)[out_idx] = sqlite3_value_double(val); 212 | break; 213 | case LogicalTypeId::VARCHAR: 214 | stmt.CheckTypeMatches(bind_data, val, sqlite_column_type, SQLITE_TEXT, col_idx); 215 | FlatVector::GetData(out_vec)[out_idx] = StringVector::AddString( 216 | out_vec, (const char *)sqlite3_value_text(val), sqlite3_value_bytes(val)); 217 | break; 218 | case LogicalTypeId::DATE: 219 | stmt.CheckTypeMatches(bind_data, val, sqlite_column_type, SQLITE_TEXT, col_idx); 220 | FlatVector::GetData(out_vec)[out_idx] = 221 | Date::FromCString((const char *)sqlite3_value_text(val), sqlite3_value_bytes(val)); 222 | break; 223 | case LogicalTypeId::TIMESTAMP: 224 | stmt.CheckTypeMatches(bind_data, val, sqlite_column_type, SQLITE_TEXT, col_idx); 225 | FlatVector::GetData(out_vec)[out_idx] = 226 | Timestamp::FromCString((const char *)sqlite3_value_text(val), sqlite3_value_bytes(val)); 227 | break; 228 | case LogicalTypeId::BLOB: 229 | FlatVector::GetData(out_vec)[out_idx] = StringVector::AddStringOrBlob( 230 | out_vec, (const char *)sqlite3_value_blob(val), sqlite3_value_bytes(val)); 231 | break; 232 | default: 233 | throw std::runtime_error(out_vec.GetType().ToString()); 234 | } 235 | } 236 | out_idx++; 237 | } 238 | } 239 | } 240 | 241 | static string SqliteToString(const FunctionData *bind_data_p) { 242 | D_ASSERT(bind_data_p); 243 | auto &bind_data = bind_data_p->Cast(); 244 | return StringUtil::Format("%s:%s", bind_data.file_name, bind_data.table_name); 245 | } 246 | 247 | /* 248 | static unique_ptr 249 | SqliteStatistics(ClientContext &context, const FunctionData *bind_data_p, 250 | column_t column_index) { 251 | auto &bind_data = (SqliteBindData &)*bind_data_p; 252 | auto stats = BaseStatistics::CreateEmpty(bind_data.types[column_index]); 253 | stats->validity_stats = 254 | make_uniq(!bind_data.not_nulls[column_index]); 255 | return stats; 256 | } 257 | */ 258 | 259 | SqliteScanFunction::SqliteScanFunction() 260 | : TableFunction("sqlite_scan", {LogicalType::VARCHAR, LogicalType::VARCHAR}, SqliteScan, SqliteBind, 261 | SqliteInitGlobalState, SqliteInitLocalState) { 262 | cardinality = SqliteCardinality; 263 | to_string = SqliteToString; 264 | projection_pushdown = true; 265 | } 266 | 267 | struct AttachFunctionData : public TableFunctionData { 268 | AttachFunctionData() { 269 | } 270 | 271 | bool finished = false; 272 | bool overwrite = false; 273 | string file_name = ""; 274 | }; 275 | 276 | static unique_ptr AttachBind(ClientContext &context, TableFunctionBindInput &input, 277 | vector &return_types, vector &names) { 278 | 279 | auto result = make_uniq(); 280 | result->file_name = input.inputs[0].GetValue(); 281 | 282 | for (auto &kv : input.named_parameters) { 283 | if (kv.first == "overwrite") { 284 | result->overwrite = BooleanValue::Get(kv.second); 285 | } 286 | } 287 | 288 | return_types.emplace_back(LogicalType::BOOLEAN); 289 | names.emplace_back("Success"); 290 | return std::move(result); 291 | } 292 | 293 | static void AttachFunction(ClientContext &context, TableFunctionInput &data_p, DataChunk &output) { 294 | auto &data = data_p.bind_data->CastNoConst(); 295 | if (data.finished) { 296 | return; 297 | } 298 | 299 | SQLiteOpenOptions options; 300 | options.access_mode = AccessMode::READ_ONLY; 301 | SQLiteDB db = SQLiteDB::Open(data.file_name, options); 302 | auto dconn = Connection(context.db->GetDatabase(context)); 303 | { 304 | auto tables = db.GetTables(); 305 | for (auto &table_name : tables) { 306 | dconn.TableFunction("sqlite_scan", {Value(data.file_name), Value(table_name)}) 307 | ->CreateView(table_name, data.overwrite, false); 308 | } 309 | } 310 | { 311 | SQLiteStatement stmt = db.Prepare("SELECT sql FROM sqlite_master WHERE type='view'"); 312 | while (stmt.Step()) { 313 | auto view_sql = stmt.GetValue(0); 314 | dconn.Query(view_sql); 315 | } 316 | } 317 | data.finished = true; 318 | } 319 | 320 | SqliteAttachFunction::SqliteAttachFunction() 321 | : TableFunction("sqlite_attach", {LogicalType::VARCHAR}, AttachFunction, AttachBind) { 322 | named_parameters["overwrite"] = LogicalType::BOOLEAN; 323 | } 324 | 325 | } // namespace duckdb 326 | -------------------------------------------------------------------------------- /src/storage/sqlite_schema_entry.cpp: -------------------------------------------------------------------------------- 1 | #include "storage/sqlite_schema_entry.hpp" 2 | #include "storage/sqlite_table_entry.hpp" 3 | #include "storage/sqlite_transaction.hpp" 4 | #include "duckdb/catalog/dependency_list.hpp" 5 | #include "duckdb/parser/parsed_data/create_table_info.hpp" 6 | #include "duckdb/parser/parsed_data/create_view_info.hpp" 7 | #include "duckdb/parser/parsed_data/create_index_info.hpp" 8 | #include "duckdb/planner/parsed_data/bound_create_table_info.hpp" 9 | #include "duckdb/parser/parsed_data/drop_info.hpp" 10 | #include "duckdb/parser/constraints/list.hpp" 11 | #include "duckdb/common/unordered_set.hpp" 12 | #include "duckdb/parser/parsed_data/alter_info.hpp" 13 | #include "duckdb/parser/parsed_data/alter_table_info.hpp" 14 | #include "duckdb/parser/parsed_expression_iterator.hpp" 15 | 16 | namespace duckdb { 17 | 18 | SQLiteSchemaEntry::SQLiteSchemaEntry(Catalog &catalog, CreateSchemaInfo &info) : SchemaCatalogEntry(catalog, info) { 19 | } 20 | 21 | SQLiteTransaction &GetSQLiteTransaction(CatalogTransaction transaction) { 22 | if (!transaction.transaction) { 23 | throw InternalException("No transaction!?"); 24 | } 25 | return transaction.transaction->Cast(); 26 | } 27 | 28 | string GetCreateTableSQL(CreateTableInfo &info) { 29 | for (idx_t i = 0; i < info.columns.LogicalColumnCount(); i++) { 30 | auto &col = info.columns.GetColumnMutable(LogicalIndex(i)); 31 | col.SetType(SQLiteUtils::ToSQLiteType(col.GetType())); 32 | } 33 | 34 | std::stringstream ss; 35 | ss << "CREATE TABLE "; 36 | if (info.on_conflict == OnCreateConflict::IGNORE_ON_CONFLICT) { 37 | ss << "IF NOT EXISTS "; 38 | } 39 | ss << KeywordHelper::WriteOptionallyQuoted(info.table); 40 | ss << TableCatalogEntry::ColumnsToSQL(info.columns, info.constraints); 41 | ss << ";"; 42 | return ss.str(); 43 | } 44 | 45 | void SQLiteSchemaEntry::TryDropEntry(ClientContext &context, CatalogType catalog_type, const string &name) { 46 | DropInfo info; 47 | info.type = catalog_type; 48 | info.name = name; 49 | info.cascade = false; 50 | info.if_not_found = OnEntryNotFound::RETURN_NULL; 51 | DropEntry(context, info); 52 | } 53 | 54 | optional_ptr SQLiteSchemaEntry::CreateTable(CatalogTransaction transaction, BoundCreateTableInfo &info) { 55 | auto &sqlite_transaction = GetSQLiteTransaction(transaction); 56 | auto &base_info = info.Base(); 57 | auto table_name = base_info.table; 58 | if (base_info.on_conflict == OnCreateConflict::REPLACE_ON_CONFLICT) { 59 | // CREATE OR REPLACE - drop any existing entries first (if any) 60 | TryDropEntry(transaction.GetContext(), CatalogType::TABLE_ENTRY, table_name); 61 | } 62 | 63 | sqlite_transaction.GetDB().Execute(GetCreateTableSQL(base_info)); 64 | return GetEntry(transaction, CatalogType::TABLE_ENTRY, table_name); 65 | } 66 | 67 | optional_ptr SQLiteSchemaEntry::CreateFunction(CatalogTransaction transaction, CreateFunctionInfo &info) { 68 | throw BinderException("SQLite databases do not support creating functions"); 69 | } 70 | 71 | void UnqualifyColumnReferences(ParsedExpression &expr) { 72 | if (expr.type == ExpressionType::COLUMN_REF) { 73 | auto &colref = expr.Cast(); 74 | auto name = std::move(colref.column_names.back()); 75 | colref.column_names = {std::move(name)}; 76 | return; 77 | } 78 | ParsedExpressionIterator::EnumerateChildren(expr, UnqualifyColumnReferences); 79 | } 80 | 81 | string GetCreateIndexSQL(CreateIndexInfo &info, TableCatalogEntry &tbl) { 82 | string sql; 83 | sql = "CREATE"; 84 | if (info.constraint_type == IndexConstraintType::UNIQUE) { 85 | sql += " UNIQUE"; 86 | } 87 | sql += " INDEX "; 88 | sql += KeywordHelper::WriteOptionallyQuoted(info.index_name); 89 | sql += " ON "; 90 | sql += KeywordHelper::WriteOptionallyQuoted(tbl.name); 91 | sql += "("; 92 | for (idx_t i = 0; i < info.parsed_expressions.size(); i++) { 93 | if (i > 0) { 94 | sql += ", "; 95 | } 96 | UnqualifyColumnReferences(*info.parsed_expressions[i]); 97 | sql += info.parsed_expressions[i]->ToString(); 98 | } 99 | sql += ")"; 100 | return sql; 101 | } 102 | 103 | optional_ptr SQLiteSchemaEntry::CreateIndex(CatalogTransaction transaction, CreateIndexInfo &info, 104 | TableCatalogEntry &table) { 105 | auto &sqlite_transaction = SQLiteTransaction::Get(transaction.GetContext(), table.catalog); 106 | sqlite_transaction.GetDB().Execute(GetCreateIndexSQL(info, table)); 107 | return nullptr; 108 | } 109 | 110 | string GetCreateViewSQL(CreateViewInfo &info) { 111 | string sql; 112 | sql = "CREATE VIEW "; 113 | if (info.on_conflict == OnCreateConflict::IGNORE_ON_CONFLICT) { 114 | sql += "IF NOT EXISTS "; 115 | } 116 | sql += KeywordHelper::WriteOptionallyQuoted(info.view_name); 117 | sql += " "; 118 | if (!info.aliases.empty()) { 119 | sql += "("; 120 | for (idx_t i = 0; i < info.aliases.size(); i++) { 121 | if (i > 0) { 122 | sql += ", "; 123 | } 124 | auto &alias = info.aliases[i]; 125 | sql += KeywordHelper::WriteOptionallyQuoted(alias); 126 | } 127 | sql += ") "; 128 | } 129 | sql += "AS "; 130 | sql += info.query->ToString(); 131 | return sql; 132 | } 133 | 134 | optional_ptr SQLiteSchemaEntry::CreateView(CatalogTransaction transaction, CreateViewInfo &info) { 135 | if (info.sql.empty()) { 136 | throw BinderException("Cannot create view in SQLite that originated from " 137 | "an empty SQL statement"); 138 | } 139 | if (info.on_conflict == OnCreateConflict::REPLACE_ON_CONFLICT) { 140 | // CREATE OR REPLACE - drop any existing entries first (if any) 141 | TryDropEntry(transaction.GetContext(), CatalogType::VIEW_ENTRY, info.view_name); 142 | } 143 | auto &sqlite_transaction = GetSQLiteTransaction(transaction); 144 | sqlite_transaction.GetDB().Execute(GetCreateViewSQL(info)); 145 | return GetEntry(transaction, CatalogType::VIEW_ENTRY, info.view_name); 146 | } 147 | 148 | optional_ptr SQLiteSchemaEntry::CreateSequence(CatalogTransaction transaction, CreateSequenceInfo &info) { 149 | throw BinderException("SQLite databases do not support creating sequences"); 150 | } 151 | 152 | optional_ptr SQLiteSchemaEntry::CreateTableFunction(CatalogTransaction transaction, 153 | CreateTableFunctionInfo &info) { 154 | throw BinderException("SQLite databases do not support creating table functions"); 155 | } 156 | 157 | optional_ptr SQLiteSchemaEntry::CreateCopyFunction(CatalogTransaction transaction, 158 | CreateCopyFunctionInfo &info) { 159 | throw BinderException("SQLite databases do not support creating copy functions"); 160 | } 161 | 162 | optional_ptr SQLiteSchemaEntry::CreatePragmaFunction(CatalogTransaction transaction, 163 | CreatePragmaFunctionInfo &info) { 164 | throw BinderException("SQLite databases do not support creating pragma functions"); 165 | } 166 | 167 | optional_ptr SQLiteSchemaEntry::CreateCollation(CatalogTransaction transaction, 168 | CreateCollationInfo &info) { 169 | throw BinderException("SQLite databases do not support creating collations"); 170 | } 171 | 172 | optional_ptr SQLiteSchemaEntry::CreateType(CatalogTransaction transaction, CreateTypeInfo &info) { 173 | throw BinderException("SQLite databases do not support creating types"); 174 | } 175 | 176 | void SQLiteSchemaEntry::AlterTable(SQLiteTransaction &sqlite_transaction, RenameTableInfo &info) { 177 | string sql = "ALTER TABLE "; 178 | sql += KeywordHelper::WriteOptionallyQuoted(info.name); 179 | sql += " RENAME TO "; 180 | sql += KeywordHelper::WriteOptionallyQuoted(info.new_table_name); 181 | sqlite_transaction.GetDB().Execute(sql); 182 | } 183 | 184 | void SQLiteSchemaEntry::AlterTable(SQLiteTransaction &sqlite_transaction, RenameColumnInfo &info) { 185 | string sql = "ALTER TABLE "; 186 | sql += KeywordHelper::WriteOptionallyQuoted(info.name); 187 | sql += " RENAME COLUMN "; 188 | sql += KeywordHelper::WriteOptionallyQuoted(info.old_name); 189 | sql += " TO "; 190 | sql += KeywordHelper::WriteOptionallyQuoted(info.new_name); 191 | sqlite_transaction.GetDB().Execute(sql); 192 | } 193 | 194 | void SQLiteSchemaEntry::AlterTable(SQLiteTransaction &sqlite_transaction, AddColumnInfo &info) { 195 | if (info.if_column_not_exists) { 196 | if (sqlite_transaction.GetDB().ColumnExists(info.name, info.new_column.GetName())) { 197 | return; 198 | } 199 | } 200 | string sql = "ALTER TABLE "; 201 | sql += KeywordHelper::WriteOptionallyQuoted(info.name); 202 | sql += " ADD COLUMN "; 203 | sql += KeywordHelper::WriteOptionallyQuoted(info.new_column.Name()); 204 | sql += " "; 205 | sql += info.new_column.Type().ToString(); 206 | sqlite_transaction.GetDB().Execute(sql); 207 | } 208 | 209 | void SQLiteSchemaEntry::AlterTable(SQLiteTransaction &sqlite_transaction, RemoveColumnInfo &info) { 210 | if (info.if_column_exists) { 211 | if (!sqlite_transaction.GetDB().ColumnExists(info.name, info.removed_column)) { 212 | return; 213 | } 214 | } 215 | string sql = "ALTER TABLE "; 216 | sql += KeywordHelper::WriteOptionallyQuoted(info.name); 217 | sql += " DROP COLUMN "; 218 | sql += KeywordHelper::WriteOptionallyQuoted(info.removed_column); 219 | sqlite_transaction.GetDB().Execute(sql); 220 | } 221 | 222 | void SQLiteSchemaEntry::Alter(CatalogTransaction catalog_transaction, AlterInfo &info) { 223 | if (info.type != AlterType::ALTER_TABLE) { 224 | throw BinderException("Only altering tables is supported for now"); 225 | } 226 | auto &alter = info.Cast(); 227 | auto &transaction = SQLiteTransaction::Get(catalog_transaction.GetContext(), catalog); 228 | switch (alter.alter_table_type) { 229 | case AlterTableType::RENAME_TABLE: 230 | AlterTable(transaction, alter.Cast()); 231 | break; 232 | case AlterTableType::RENAME_COLUMN: 233 | AlterTable(transaction, alter.Cast()); 234 | break; 235 | case AlterTableType::ADD_COLUMN: 236 | AlterTable(transaction, alter.Cast()); 237 | break; 238 | case AlterTableType::REMOVE_COLUMN: 239 | AlterTable(transaction, alter.Cast()); 240 | break; 241 | default: 242 | throw BinderException("Unsupported ALTER TABLE type - SQLite tables only " 243 | "support RENAME TABLE, RENAME COLUMN, " 244 | "ADD COLUMN and DROP COLUMN"); 245 | } 246 | transaction.ClearTableEntry(info.name); 247 | } 248 | 249 | void SQLiteSchemaEntry::Scan(ClientContext &context, CatalogType type, 250 | const std::function &callback) { 251 | auto &transaction = SQLiteTransaction::Get(context, catalog); 252 | vector entries; 253 | switch (type) { 254 | case CatalogType::TABLE_ENTRY: 255 | entries = transaction.GetDB().GetTables(); 256 | break; 257 | case CatalogType::VIEW_ENTRY: 258 | entries = transaction.GetDB().GetEntries("view"); 259 | break; 260 | case CatalogType::INDEX_ENTRY: 261 | entries = transaction.GetDB().GetEntries("index"); 262 | break; 263 | default: 264 | // no entries of this catalog type 265 | return; 266 | } 267 | for (auto &entry_name : entries) { 268 | callback(*GetEntry(GetCatalogTransaction(context), type, entry_name)); 269 | } 270 | } 271 | void SQLiteSchemaEntry::Scan(CatalogType type, const std::function &callback) { 272 | throw InternalException("Scan"); 273 | } 274 | 275 | void SQLiteSchemaEntry::DropEntry(ClientContext &context, DropInfo &info) { 276 | switch (info.type) { 277 | case CatalogType::TABLE_ENTRY: 278 | case CatalogType::VIEW_ENTRY: 279 | case CatalogType::INDEX_ENTRY: 280 | break; 281 | default: 282 | throw BinderException("SQLite databases do not support dropping entries of type \"%s\"", 283 | CatalogTypeToString(type)); 284 | } 285 | auto table = GetEntry(GetCatalogTransaction(context), info.type, info.name); 286 | if (!table) { 287 | throw InternalException("Failed to drop entry \"%s\" - could not find entry", info.name); 288 | } 289 | auto &transaction = SQLiteTransaction::Get(context, catalog); 290 | transaction.DropEntry(info.type, info.name, info.cascade); 291 | } 292 | 293 | optional_ptr SQLiteSchemaEntry::GetEntry(CatalogTransaction transaction, CatalogType type, 294 | const string &name) { 295 | auto &sqlite_transaction = GetSQLiteTransaction(transaction); 296 | switch (type) { 297 | case CatalogType::INDEX_ENTRY: 298 | case CatalogType::TABLE_ENTRY: 299 | case CatalogType::VIEW_ENTRY: 300 | return sqlite_transaction.GetCatalogEntry(name); 301 | default: 302 | return nullptr; 303 | } 304 | } 305 | 306 | } // namespace duckdb -------------------------------------------------------------------------------- /.github/workflows/_extension_distribution.yml: -------------------------------------------------------------------------------- 1 | # Reusable workflow for building DuckDB extensions using a standardized environment 2 | # 3 | # The workflow: 4 | # - builds the extension using the CI workflow from the corresponding DuckDB version 5 | # - uploads the extensions as gh actions artifacts in the following format: 6 | # --extension- 7 | # 8 | # note: extensions are simply uploaded to GitHub actions, deploying the extensions is done a separate step. More info on 9 | # this can be found in https://github.com/duckdb/extension-template 10 | 11 | name: Extension distribution 12 | on: 13 | workflow_call: 14 | inputs: 15 | # The name with which the extension will be built 16 | extension_name: 17 | required: true 18 | type: string 19 | # DuckDB version to build against, should in most cases be identical to 20 | duckdb_version: 21 | required: true 22 | type: string 23 | # ';' separated list of architectures to exclude, for example: 'linux_amd64;osx_arm64' 24 | exclude_archs: 25 | required: false 26 | type: string 27 | default: "" 28 | # Postfix added to artifact names. Can be used to guarantee unique names when this workflow is called multiple times 29 | artifact_postfix: 30 | required: false 31 | type: string 32 | default: "" 33 | # Override the default vcpkg commit used by this version of DuckDB 34 | vcpkg_commit: 35 | required: false 36 | type: string 37 | default: "a1a1cbc975abf909a6c8985a6a2b8fe20bbd9bd6" 38 | # Override the default script producing the matrices. Allows specifying custom matrices. 39 | matrix_parse_script: 40 | required: false 41 | type: string 42 | default: "./duckdb/scripts/modify_distribution_matrix.py" 43 | # Enable building the DuckDB Shell 44 | build_duckdb_shell: 45 | required: false 46 | type: boolean 47 | default: true 48 | 49 | jobs: 50 | generate_matrix: 51 | name: Generate matrix 52 | runs-on: ubuntu-latest 53 | outputs: 54 | linux_matrix: ${{ steps.set-matrix-linux.outputs.linux_matrix }} 55 | windows_matrix: ${{ steps.set-matrix-windows.outputs.windows_matrix }} 56 | osx_matrix: ${{ steps.set-matrix-osx.outputs.osx_matrix }} 57 | wasm_matrix: ${{ steps.set-matrix-wasm.outputs.wasm_matrix }} 58 | steps: 59 | - uses: actions/checkout@v3 60 | with: 61 | fetch-depth: 0 62 | submodules: 'true' 63 | 64 | - name: Checkout DuckDB to version 65 | run: | 66 | cd duckdb 67 | git checkout ${{ inputs.duckdb_version }} 68 | 69 | - id: parse-matrices 70 | run: | 71 | python3 ${{ inputs.matrix_parse_script }} --input ./duckdb/.github/config/distribution_matrix.json --select_os linux --output linux_matrix.json --exclude "${{ inputs.exclude_archs }}" --pretty 72 | python3 ${{ inputs.matrix_parse_script }} --input ./duckdb/.github/config/distribution_matrix.json --select_os osx --output osx_matrix.json --exclude "${{ inputs.exclude_archs }}" --pretty 73 | python3 ${{ inputs.matrix_parse_script }} --input ./duckdb/.github/config/distribution_matrix.json --select_os windows --output windows_matrix.json --exclude "${{ inputs.exclude_archs }}" --pretty 74 | python3 ${{ inputs.matrix_parse_script }} --input ./duckdb/.github/config/distribution_matrix.json --select_os wasm --output wasm_matrix.json --exclude "${{ inputs.exclude_archs }}" --pretty 75 | 76 | - id: set-matrix-linux 77 | run: | 78 | linux_matrix="`cat linux_matrix.json`" 79 | echo linux_matrix=$linux_matrix >> $GITHUB_OUTPUT 80 | echo `cat $GITHUB_OUTPUT` 81 | 82 | - id: set-matrix-osx 83 | run: | 84 | osx_matrix="`cat osx_matrix.json`" 85 | echo osx_matrix=$osx_matrix >> $GITHUB_OUTPUT 86 | echo `cat $GITHUB_OUTPUT` 87 | 88 | - id: set-matrix-windows 89 | run: | 90 | windows_matrix="`cat windows_matrix.json`" 91 | echo windows_matrix=$windows_matrix >> $GITHUB_OUTPUT 92 | echo `cat $GITHUB_OUTPUT` 93 | 94 | - id: set-matrix-wasm 95 | run: | 96 | wasm_matrix="`cat wasm_matrix.json`" 97 | echo wasm_matrix=$wasm_matrix >> $GITHUB_OUTPUT 98 | echo `cat $GITHUB_OUTPUT` 99 | 100 | linux: 101 | name: Linux 102 | runs-on: ubuntu-latest 103 | container: ${{ matrix.container }} 104 | needs: generate_matrix 105 | if: ${{ needs.generate_matrix.outputs.linux_matrix != '{}' && needs.generate_matrix.outputs.linux_matrix != '' }} 106 | strategy: 107 | matrix: ${{fromJson(needs.generate_matrix.outputs.linux_matrix)}} 108 | env: 109 | VCPKG_TARGET_TRIPLET: ${{ matrix.vcpkg_triplet }} 110 | VCPKG_TOOLCHAIN_PATH: ${{ github.workspace }}/vcpkg/scripts/buildsystems/vcpkg.cmake 111 | GEN: Ninja 112 | BUILD_SHELL: ${{ inputs.build_duckdb_shell && '1' || '0' }} 113 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 114 | 115 | steps: 116 | - name: Install required ubuntu packages 117 | if: ${{ matrix.duckdb_arch == 'linux_amd64' || matrix.duckdb_arch == 'linux_arm64' }} 118 | run: | 119 | apt-get update -y -qq 120 | apt-get install -y -qq software-properties-common 121 | add-apt-repository ppa:git-core/ppa 122 | apt-get update -y -qq 123 | apt-get install -y -qq ninja-build make gcc-multilib g++-multilib libssl-dev wget openjdk-8-jdk zip maven unixodbc-dev libc6-dev-i386 lib32readline6-dev libssl-dev libcurl4-gnutls-dev libexpat1-dev gettext unzip build-essential checkinstall libffi-dev curl libz-dev openssh-client 124 | 125 | - name: Install Git 2.18.5 126 | if: ${{ matrix.duckdb_arch == 'linux_amd64' || matrix.duckdb_arch == 'linux_arm64' }} 127 | run: | 128 | wget https://github.com/git/git/archive/refs/tags/v2.18.5.tar.gz 129 | tar xvf v2.18.5.tar.gz 130 | cd git-2.18.5 131 | make 132 | make prefix=/usr install 133 | git --version 134 | 135 | - uses: actions/checkout@v3 136 | with: 137 | fetch-depth: 0 138 | submodules: 'true' 139 | 140 | - name: Checkout DuckDB to version 141 | run: | 142 | cd duckdb 143 | git checkout ${{ inputs.duckdb_version }} 144 | 145 | - name: Setup ManyLinux2014 146 | if: ${{ matrix.duckdb_arch == 'linux_amd64_gcc4' }} 147 | run: | 148 | ./duckdb/scripts/setup_manylinux2014.sh general aws-cli ccache ssh python_alias openssl 149 | 150 | - name: Setup Ccache 151 | uses: hendrikmuhs/ccache-action@v1.2.11 # Note: pinned due to GLIBC incompatibility in later releases 152 | continue-on-error: true 153 | with: 154 | key: ${{ github.job }}-${{ matrix.duckdb_arch }} 155 | 156 | - name: Setup Ubuntu 157 | if: ${{ matrix.duckdb_arch == 'linux_amd64' || matrix.duckdb_arch == 'linux_arm64' }} 158 | uses: ./duckdb/.github/actions/ubuntu_18_setup 159 | with: 160 | aarch64_cross_compile: ${{ matrix.duckdb_arch == 'linux_arm64' && 1 }} 161 | 162 | - name: Setup vcpkg 163 | uses: lukka/run-vcpkg@v11.1 164 | with: 165 | vcpkgGitCommitId: ${{ inputs.vcpkg_commit }} 166 | 167 | - name: Build extension 168 | env: 169 | GEN: ninja 170 | CC: ${{ matrix.duckdb_arch == 'linux_arm64' && 'aarch64-linux-gnu-gcc' || '' }} 171 | CXX: ${{ matrix.duckdb_arch == 'linux_arm64' && 'aarch64-linux-gnu-g++' || '' }} 172 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 173 | run: | 174 | make release 175 | 176 | - name: Test extension 177 | if: ${{ matrix.duckdb_arch != 'linux_arm64'}} 178 | run: | 179 | make test 180 | 181 | - uses: actions/upload-artifact@v3 182 | with: 183 | if-no-files-found: error 184 | name: ${{ inputs.extension_name }}-${{ inputs.duckdb_version }}-extension-${{matrix.duckdb_arch}}${{inputs.artifact_postfix}} 185 | path: | 186 | build/release/extension/${{ inputs.extension_name }}/${{ inputs.extension_name }}.duckdb_extension 187 | 188 | macos: 189 | name: MacOS 190 | runs-on: macos-latest 191 | needs: generate_matrix 192 | if: ${{ needs.generate_matrix.outputs.osx_matrix != '{}' && needs.generate_matrix.outputs.osx_matrix != '' }} 193 | strategy: 194 | matrix: ${{fromJson(needs.generate_matrix.outputs.osx_matrix)}} 195 | env: 196 | VCPKG_TOOLCHAIN_PATH: ${{ github.workspace }}/vcpkg/scripts/buildsystems/vcpkg.cmake 197 | VCPKG_TARGET_TRIPLET: ${{ matrix.vcpkg_triplet }} 198 | OSX_BUILD_ARCH: ${{ matrix.osx_build_arch }} 199 | GEN: Ninja 200 | BUILD_SHELL: ${{ inputs.build_duckdb_shell && '1' || '0' }} 201 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 202 | 203 | steps: 204 | - uses: actions/checkout@v3 205 | with: 206 | fetch-depth: 0 207 | submodules: 'true' 208 | 209 | - name: Install Ninja 210 | run: | 211 | brew install ninja 212 | 213 | - name: Setup Ccache 214 | uses: hendrikmuhs/ccache-action@main 215 | continue-on-error: true 216 | with: 217 | key: ${{ github.job }}-${{ matrix.duckdb_arch }} 218 | 219 | - uses: actions/setup-python@v5 220 | with: 221 | python-version: '3.11' 222 | 223 | - name: Checkout DuckDB to version 224 | run: | 225 | cd duckdb 226 | git checkout ${{ inputs.duckdb_version }} 227 | 228 | - name: Setup vcpkg 229 | uses: lukka/run-vcpkg@v11.1 230 | with: 231 | vcpkgGitCommitId: ${{ inputs.vcpkg_commit }} 232 | 233 | - name: Build extension 234 | shell: bash 235 | env: 236 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 237 | run: | 238 | make release 239 | 240 | - name: Test Extension 241 | if: ${{ matrix.osx_build_arch == 'x86_64'}} 242 | shell: bash 243 | run: | 244 | make test 245 | 246 | - uses: actions/upload-artifact@v3 247 | with: 248 | if-no-files-found: error 249 | name: ${{ inputs.extension_name }}-${{ inputs.duckdb_version }}-extension-${{matrix.duckdb_arch}}${{inputs.artifact_postfix}} 250 | path: | 251 | build/release/extension/${{ inputs.extension_name }}/${{ inputs.extension_name }}.duckdb_extension 252 | 253 | windows: 254 | name: Windows 255 | runs-on: windows-latest 256 | needs: generate_matrix 257 | if: ${{ needs.generate_matrix.outputs.windows_matrix != '{}' && needs.generate_matrix.outputs.windows_matrix != '' }} 258 | strategy: 259 | matrix: ${{fromJson(needs.generate_matrix.outputs.windows_matrix)}} 260 | env: 261 | GEN: Ninja 262 | VCPKG_TOOLCHAIN_PATH: ${{ github.workspace }}/vcpkg/scripts/buildsystems/vcpkg.cmake 263 | VCPKG_TARGET_TRIPLET: ${{ matrix.vcpkg_triplet }} 264 | BUILD_SHELL: ${{ inputs.build_duckdb_shell && '1' || '0' }} 265 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 266 | CC: ${{ matrix.duckdb_arch == 'windows_amd64_rtools' && 'gcc' || '' }} 267 | CXX: ${{ matrix.duckdb_arch == 'windows_amd64_rtools' && 'g++' || '' }} 268 | 269 | steps: 270 | - name: Keep \n line endings 271 | shell: bash 272 | run: | 273 | git config --global core.autocrlf false 274 | git config --global core.eol lf 275 | 276 | - uses: actions/checkout@v3 277 | with: 278 | fetch-depth: 0 279 | submodules: 'true' 280 | 281 | - uses: actions/setup-python@v5 282 | with: 283 | python-version: '3.11' 284 | 285 | - uses: r-lib/actions/setup-r@v2 286 | if: matrix.duckdb_arch == 'windows_amd64_rtools' 287 | with: 288 | r-version: 'devel' 289 | update-rtools: true 290 | rtools-version: '42' # linker bug in 43 291 | 292 | - name: Checkout DuckDB to version 293 | run: | 294 | cd duckdb 295 | git checkout ${{ inputs.duckdb_version }} 296 | 297 | - name: Setup Ccache 298 | uses: hendrikmuhs/ccache-action@main 299 | continue-on-error: true 300 | with: 301 | key: ${{ github.job }}-${{ matrix.duckdb_arch }} 302 | 303 | - name: Setup vcpkg 304 | uses: lukka/run-vcpkg@v11.1 305 | with: 306 | vcpkgGitCommitId: ${{ inputs.vcpkg_commit }} 307 | 308 | - name: Fix for MSVC issue 309 | shell: bash 310 | env: 311 | OVERLAY_TRIPLET_SRC: ${{ github.workspace }}/vcpkg/triplets/community/x64-windows-static-md.cmake 312 | OVERLAY_TRIPLET_DST: ${{ github.workspace }}/overlay_triplets/x64-windows-static-md.cmake 313 | run: | 314 | mkdir overlay_triplets 315 | cp $OVERLAY_TRIPLET_SRC $OVERLAY_TRIPLET_DST 316 | echo "set(VCPKG_PLATFORM_TOOLSET_VERSION "14.39")" >> $OVERLAY_TRIPLET_DST 317 | 318 | - name: Build & test extension 319 | env: 320 | VCPKG_OVERLAY_TRIPLETS: "${{ github.workspace }}/overlay_triplets" 321 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 322 | DUCKDB_PLATFORM_RTOOLS: ${{ matrix.duckdb_arch == 'windows_amd64_rtools' && 1 || 0 }} 323 | run: | 324 | make test_release 325 | 326 | - uses: actions/upload-artifact@v3 327 | with: 328 | if-no-files-found: error 329 | name: ${{ inputs.extension_name }}-${{ inputs.duckdb_version }}-extension-${{matrix.duckdb_arch}}${{inputs.artifact_postfix}} 330 | path: | 331 | build/release/extension/${{ inputs.extension_name }}/${{ inputs.extension_name }}.duckdb_extension 332 | 333 | wasm: 334 | name: DuckDB-Wasm 335 | runs-on: ubuntu-latest 336 | needs: generate_matrix 337 | if: ${{ needs.generate_matrix.outputs.wasm_matrix != '{}' && needs.generate_matrix.outputs.wasm_matrix != '' }} 338 | strategy: 339 | matrix: ${{fromJson(needs.generate_matrix.outputs.wasm_matrix)}} 340 | env: 341 | VCPKG_TARGET_TRIPLET: ${{ matrix.vcpkg_triplet }} 342 | VCPKG_TOOLCHAIN_PATH: ${{ github.workspace }}/vcpkg/scripts/buildsystems/vcpkg.cmake 343 | GEN: Ninja 344 | DUCKDB_PLATFORM: ${{ matrix.duckdb_arch }} 345 | 346 | steps: 347 | - uses: actions/checkout@v3 348 | with: 349 | fetch-depth: 0 350 | submodules: 'true' 351 | 352 | - name: Checkout DuckDB to version 353 | run: | 354 | cd duckdb 355 | git checkout ${{ inputs.duckdb_version }} 356 | 357 | - uses: mymindstorm/setup-emsdk@v13 358 | with: 359 | version: 'latest' 360 | 361 | - name: Setup vcpkg 362 | uses: lukka/run-vcpkg@v11.1 363 | with: 364 | vcpkgGitCommitId: ${{ inputs.vcpkg_commit }} 365 | 366 | - name: Setup Ccache 367 | uses: hendrikmuhs/ccache-action@main 368 | continue-on-error: true 369 | with: 370 | key: ${{ github.job }}-${{ matrix.duckdb_arch }} 371 | 372 | - name: Build Wasm module 373 | run: | 374 | make ${{ matrix.duckdb_arch }} 375 | 376 | - uses: actions/upload-artifact@v3 377 | with: 378 | if-no-files-found: error 379 | name: ${{ inputs.extension_name }}-${{ inputs.duckdb_version }}-extension-${{matrix.duckdb_arch}}${{inputs.artifact_postfix}} 380 | path: | 381 | build/${{ matrix.duckdb_arch }}/extension/${{ inputs.extension_name }}/${{ inputs.extension_name }}.duckdb_extension.wasm 382 | --------------------------------------------------------------------------------