├── .github └── workflows │ ├── ExtensionTemplate.yml │ ├── Linux.yml │ ├── MacOS.yml │ ├── NodeJS.yml │ ├── Python.yml │ └── Windows.yml ├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── Makefile ├── README.md ├── data └── .gitkeep ├── docker-compose.yml ├── docker └── docker-compose.yml ├── docs ├── NEXT_README.md ├── ODBC_CONNECTION_STRING_AND_DSN_FORMATS.md └── README.md ├── flake.lock ├── flake.nix ├── scripts ├── extension-upload.sh └── set_extension_name.py ├── src ├── include │ ├── exception.hpp │ ├── odbc.hpp │ ├── odbc_scan.hpp │ └── odbc_scanner_extension.hpp ├── odbc_scan.cpp └── odbc_scanner_extension.cpp ├── templates ├── .clang-format.template ├── .clangd.template ├── .odbc.ini.template └── .odbcinst.ini.template └── test ├── README.md ├── nodejs ├── analyze_record_test.js ├── detect_record_test.js ├── read_record_test.js └── write_record_test.js ├── python ├── analyze_record.py ├── detect_record_test.py ├── read_record_test.py └── write_record_test.py └── sql ├── odbc_scan_big_query.test ├── odbc_scan_db2.test ├── odbc_scan_mariadb.test ├── odbc_scan_mongo_db.test ├── odbc_scan_mssql.test ├── odbc_scan_mysql.test ├── odbc_scan_oracle.test ├── odbc_scan_postgres.test └── odbc_scan_snowflake.test /.github/workflows/ExtensionTemplate.yml: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: this workflow is for testing the extension template itself, feel free to delete this file in your own repo. 3 | # 4 | 5 | name: Extension Template 6 | on: [push, pull_request,repository_dispatch] 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 9 | cancel-in-progress: true 10 | 11 | jobs: 12 | linux: 13 | name: Linux Extensions 14 | if: ${{ vars.RUN_RENAME_TEST == 'true' }} 15 | runs-on: ubuntu-latest 16 | container: ubuntu:16.04 17 | strategy: 18 | matrix: 19 | # Add commits/tags to build against other DuckDB versions 20 | duckdb_version: [ '' ] 21 | env: 22 | GEN: ninja 23 | defaults: 24 | run: 25 | shell: bash 26 | 27 | steps: 28 | - name: Install required ubuntu packages 29 | run: | 30 | apt-get update -y -qq 31 | apt-get install -y -qq software-properties-common 32 | add-apt-repository ppa:git-core/ppa 33 | apt-get update -y -qq 34 | apt-get install -y -qq ninja-build make gcc-multilib g++-multilib libssl-dev wget openjdk-8-jdk zip maven unixodbc-dev libc6-dev-i386 lib32readline6-dev libssl-dev libcurl4-gnutls-dev libexpat1-dev gettext unzip build-essential checkinstall libffi-dev curl libz-dev openssh-client 35 | 36 | - name: Install Git 2.18.5 37 | run: | 38 | wget https://github.com/git/git/archive/refs/tags/v2.18.5.tar.gz 39 | tar xvf v2.18.5.tar.gz 40 | cd git-2.18.5 41 | make 42 | make prefix=/usr install 43 | git --version 44 | 45 | - uses: actions/checkout@v3 46 | with: 47 | fetch-depth: 0 48 | submodules: 'true' 49 | 50 | - name: Checkout DuckDB to version 51 | if: ${{ matrix.duckdb_version != ''}} 52 | run: | 53 | cd duckdb 54 | git checkout ${{ matrix.duckdb_version }} 55 | 56 | - uses: ./duckdb/.github/actions/ubuntu_16_setup 57 | 58 | - name: Rename extension 59 | run: | 60 | python3 scripts/set_extension_name.py testext 61 | 62 | - name: Build 63 | run: | 64 | make 65 | 66 | - name: Test 67 | run: | 68 | make test 69 | 70 | macos: 71 | name: MacOS 72 | if: ${{ vars.RUN_RENAME_TEST == 'true' }} 73 | runs-on: macos-latest 74 | strategy: 75 | matrix: 76 | # Add commits/tags to build against other DuckDB versions 77 | duckdb_version: [ ''] 78 | env: 79 | OSX_BUILD_UNIVERSAL: 1 80 | GEN: ninja 81 | defaults: 82 | run: 83 | shell: bash 84 | 85 | steps: 86 | - uses: actions/checkout@v3 87 | with: 88 | fetch-depth: 0 89 | submodules: 'true' 90 | 91 | - name: Install Ninja 92 | run: brew install ninja 93 | 94 | - uses: actions/setup-python@v2 95 | with: 96 | python-version: '3.7' 97 | 98 | - name: Checkout DuckDB to version 99 | if: ${{ matrix.duckdb_version != ''}} 100 | run: | 101 | cd duckdb 102 | git checkout ${{ matrix.duckdb_version }} 103 | 104 | - name: Rename extension 105 | run: | 106 | python scripts/set_extension_name.py testext 107 | 108 | - name: Build 109 | run: | 110 | make 111 | 112 | - name: Test 113 | run: | 114 | make test 115 | 116 | windows: 117 | name: Windows Extensions (x64) 118 | if: ${{ vars.RUN_RENAME_TEST == 'true' }} 119 | runs-on: windows-latest 120 | strategy: 121 | matrix: 122 | # Add commits/tags to build against other DuckDB versions 123 | duckdb_version: [ '' ] 124 | defaults: 125 | run: 126 | shell: bash 127 | 128 | steps: 129 | - uses: actions/checkout@v3 130 | with: 131 | fetch-depth: 0 132 | submodules: 'true' 133 | 134 | - uses: actions/setup-python@v2 135 | with: 136 | python-version: '3.7' 137 | 138 | - name: Checkout DuckDB to version 139 | # Add commits/tags to build against other DuckDB versions 140 | if: ${{ matrix.duckdb_version != ''}} 141 | run: | 142 | cd duckdb 143 | git checkout ${{ matrix.duckdb_version }} 144 | 145 | - name: Rename extension 146 | run: | 147 | python scripts/set_extension_name.py testext 148 | 149 | - name: Build 150 | run: | 151 | make 152 | 153 | - name: Build extension 154 | run: | 155 | build/release/test/Release/unittest.exe -------------------------------------------------------------------------------- /.github/workflows/Linux.yml: -------------------------------------------------------------------------------- 1 | name: Linux 2 | on: [push, pull_request,repository_dispatch] 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 5 | cancel-in-progress: true 6 | defaults: 7 | run: 8 | shell: bash 9 | 10 | jobs: 11 | linux: 12 | name: Linux Release 13 | runs-on: ubuntu-latest 14 | container: ${{ matrix.container }} 15 | strategy: 16 | matrix: 17 | # Add commits/tags to build against other DuckDB versions 18 | duckdb_version: [ '' ] 19 | arch: ['linux_amd64', 'linux_arm64', 'linux_amd64_gcc4'] 20 | include: 21 | - arch: 'linux_amd64' 22 | container: 'ubuntu:16.04' 23 | - arch: 'linux_arm64' 24 | container: 'ubuntu:18.04' 25 | - arch: 'linux_amd64_gcc4' 26 | container: 'quay.io/pypa/manylinux2014_x86_64' 27 | env: 28 | GEN: ninja 29 | 30 | steps: 31 | - name: Install required ubuntu packages 32 | if: ${{ matrix.arch == 'linux_amd64' || matrix.arch == 'linux_arm64' }} 33 | run: | 34 | apt-get update -y -qq 35 | apt-get install -y -qq software-properties-common 36 | add-apt-repository ppa:git-core/ppa 37 | apt-get update -y -qq 38 | apt-get install -y -qq ninja-build make gcc-multilib g++-multilib libssl-dev wget openjdk-8-jdk zip maven unixodbc-dev libc6-dev-i386 lib32readline6-dev libssl-dev libcurl4-gnutls-dev libexpat1-dev gettext unzip build-essential checkinstall libffi-dev curl libz-dev openssh-client 39 | 40 | - name: Install Git 2.18.5 41 | if: ${{ matrix.arch == 'linux_amd64' || matrix.arch == 'linux_arm64' }} 42 | run: | 43 | wget https://github.com/git/git/archive/refs/tags/v2.18.5.tar.gz 44 | tar xvf v2.18.5.tar.gz 45 | cd git-2.18.5 46 | make 47 | make prefix=/usr install 48 | git --version 49 | 50 | - uses: actions/checkout@v3 51 | with: 52 | fetch-depth: 0 53 | submodules: 'true' 54 | 55 | - name: Checkout DuckDB to version 56 | if: ${{ matrix.duckdb_version != ''}} 57 | run: | 58 | cd duckdb 59 | git checkout ${{ matrix.duckdb_version }} 60 | 61 | - if: ${{ matrix.arch == 'linux_amd64_gcc4' }} 62 | uses: ./duckdb/.github/actions/centos_7_setup 63 | with: 64 | openssl: 0 65 | 66 | - if: ${{ matrix.arch == 'linux_amd64' || matrix.arch == 'linux_arm64' }} 67 | uses: ./duckdb/.github/actions/ubuntu_16_setup 68 | with: 69 | aarch64_cross_compile: ${{ matrix.arch == 'linux_arm64' && 1 }} 70 | 71 | # Build extension 72 | - name: Build extension 73 | env: 74 | GEN: ninja 75 | STATIC_LIBCPP: 1 76 | CC: ${{ matrix.arch == 'linux_arm64' && 'aarch64-linux-gnu-gcc' || '' }} 77 | CXX: ${{ matrix.arch == 'linux_arm64' && 'aarch64-linux-gnu-g++' || '' }} 78 | run: | 79 | make release 80 | 81 | - name: Build extension 82 | if: ${{ matrix.arch != 'linux_arm64'}} 83 | run: | 84 | make test 85 | 86 | - uses: actions/upload-artifact@v2 87 | with: 88 | name: ${{matrix.arch}}-extensions 89 | path: | 90 | build/release/extension/odbc_scan/odbc_scan.duckdb_extension 91 | 92 | - name: Deploy 93 | env: 94 | AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_ID }} 95 | AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_KEY }} 96 | AWS_DEFAULT_REGION: ${{ secrets.S3_REGION }} 97 | BUCKET_NAME: ${{ secrets.S3_BUCKET }} 98 | run: | 99 | git config --global --add safe.directory '*' 100 | cd duckdb 101 | git fetch --tags 102 | export DUCKDB_VERSION=`git tag --points-at HEAD` 103 | export DUCKDB_VERSION=${DUCKDB_VERSION:=`git log -1 --format=%h`} 104 | cd .. 105 | if [[ "$AWS_ACCESS_KEY_ID" == "" ]] ; then 106 | echo 'No key set, skipping' 107 | elif [[ "$GITHUB_REF" =~ ^(refs/tags/v.+)$ ]] ; then 108 | python3 -m pip install pip awscli 109 | ./scripts/extension-upload.sh odbc_scan ${{ github.ref_name }} $DUCKDB_VERSION ${{matrix.arch}} $BUCKET_NAME true 110 | elif [[ "$GITHUB_REF" =~ ^(refs/heads/main)$ ]] ; then 111 | python3 -m pip install pip awscli 112 | ./scripts/extension-upload.sh odbc_scan `git log -1 --format=%h` $DUCKDB_VERSION ${{matrix.arch}} $BUCKET_NAME false 113 | fi -------------------------------------------------------------------------------- /.github/workflows/MacOS.yml: -------------------------------------------------------------------------------- 1 | name: MacOS 2 | on: [push, pull_request,repository_dispatch] 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 5 | cancel-in-progress: true 6 | defaults: 7 | run: 8 | shell: bash 9 | 10 | jobs: 11 | macos: 12 | name: MacOS Release (Universal) 13 | runs-on: macos-latest 14 | strategy: 15 | matrix: 16 | # Add commits/tags to build against other DuckDB versions 17 | duckdb_version: [ '' ] 18 | 19 | env: 20 | OSX_BUILD_UNIVERSAL: 1 21 | GEN: ninja 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | with: 26 | fetch-depth: 0 27 | submodules: 'true' 28 | 29 | - name: Install Ninja 30 | run: brew install ninja 31 | 32 | - uses: actions/setup-python@v2 33 | with: 34 | python-version: '3.7' 35 | 36 | - name: Checkout DuckDB to version 37 | if: ${{ matrix.duckdb_version != ''}} 38 | run: | 39 | cd duckdb 40 | git checkout ${{ matrix.duckdb_version }} 41 | 42 | # Build extension 43 | - name: Build extension 44 | shell: bash 45 | run: | 46 | make release 47 | make test 48 | 49 | - name: Deploy 50 | env: 51 | AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_ID }} 52 | AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_KEY }} 53 | AWS_DEFAULT_REGION: ${{ secrets.S3_REGION }} 54 | BUCKET_NAME: ${{ secrets.S3_BUCKET }} 55 | run: | 56 | cd duckdb 57 | git fetch --tags 58 | export DUCKDB_VERSION=`git tag --points-at HEAD` 59 | echo $DUCKDB_VERSION 60 | export DUCKDB_VERSION=${DUCKDB_VERSION:=`git log -1 --format=%h`} 61 | echo $DUCKDB_VERSION 62 | cd .. 63 | if [[ "$AWS_ACCESS_KEY_ID" == "" ]] ; then 64 | echo 'No key set, skipping' 65 | elif [[ "$GITHUB_REF" =~ ^(refs/tags/v.+)$ ]] ; then 66 | python -m pip install awscli 67 | ./scripts/extension-upload.sh odbc_scan ${{ github.ref_name }} $DUCKDB_VERSION osx_amd64 $BUCKET_NAME true 68 | ./scripts/extension-upload.sh odbc_scan ${{ github.ref_name }} $DUCKDB_VERSION osx_arm64 $BUCKET_NAME true 69 | elif [[ "$GITHUB_REF" =~ ^(refs/heads/main)$ ]] ; then 70 | python -m pip install awscli 71 | ./scripts/extension-upload.sh odbc_scan `git log -1 --format=%h` $DUCKDB_VERSION osx_amd64 $BUCKET_NAME false 72 | ./scripts/extension-upload.sh odbc_scan `git log -1 --format=%h` $DUCKDB_VERSION osx_arm64 $BUCKET_NAME false 73 | fi -------------------------------------------------------------------------------- /.github/workflows/NodeJS.yml: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: if NodeJS tests are unused, deleting this file or disabling the workflow on GitHub will speed up CI 3 | # 4 | 5 | name: NodeJS 6 | on: [push, pull_request,repository_dispatch] 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 9 | cancel-in-progress: true 10 | defaults: 11 | run: 12 | shell: bash 13 | 14 | jobs: 15 | nodejs: 16 | name: NodeJS 17 | runs-on: ubuntu-latest 18 | env: 19 | GEN: ninja 20 | 21 | steps: 22 | - name: Install Ninja 23 | run: | 24 | sudo apt-get update -y -qq 25 | sudo apt-get install -y -qq ninja-build 26 | 27 | - uses: actions/checkout@v2 28 | with: 29 | fetch-depth: 0 30 | submodules: 'true' 31 | 32 | - uses: actions/setup-python@v2 33 | with: 34 | python-version: '3.9' 35 | 36 | - name: Build DuckDB NodeJS client 37 | run: make debug_js 38 | 39 | - name: Run NodeJS client tests 40 | run: make test_debug_js -------------------------------------------------------------------------------- /.github/workflows/Python.yml: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: if python tests are unused, deleting this file or disabling the workflow on GitHub will speed up CI 3 | # 4 | 5 | name: Python 6 | on: [push, pull_request,repository_dispatch] 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 9 | cancel-in-progress: true 10 | defaults: 11 | run: 12 | shell: bash 13 | 14 | jobs: 15 | python: 16 | name: Python 17 | runs-on: ubuntu-latest 18 | env: 19 | GEN: ninja 20 | 21 | steps: 22 | - name: Install Ninja 23 | run: | 24 | sudo apt-get update -y -qq 25 | sudo apt-get install -y -qq ninja-build 26 | 27 | - uses: actions/checkout@v2 28 | with: 29 | fetch-depth: 0 30 | submodules: 'true' 31 | 32 | - uses: actions/setup-python@v2 33 | with: 34 | python-version: '3.9' 35 | 36 | - name: Build DuckDB Python client 37 | run: make debug_python 38 | 39 | - name: Install Python test dependencies 40 | run: python -m pip install --upgrade pytest 41 | 42 | - name: Run Python client tests 43 | run: | 44 | make test_debug_python -------------------------------------------------------------------------------- /.github/workflows/Windows.yml: -------------------------------------------------------------------------------- 1 | name: Windows 2 | on: [push, pull_request,repository_dispatch] 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || '' }}-${{ github.base_ref || '' }}-${{ github.ref != 'refs/heads/main' || github.sha }} 5 | cancel-in-progress: true 6 | defaults: 7 | run: 8 | shell: bash 9 | 10 | jobs: 11 | windows: 12 | name: Release 13 | runs-on: windows-latest 14 | strategy: 15 | matrix: 16 | # Add commits/tags to build against other DuckDB versions 17 | duckdb_version: [ '' ] 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | with: 22 | fetch-depth: 0 23 | submodules: 'true' 24 | 25 | - uses: actions/setup-python@v2 26 | with: 27 | python-version: '3.7' 28 | 29 | - name: Checkout DuckDB to version 30 | # Add commits/tags to build against other DuckDB versions 31 | if: ${{ matrix.duckdb_version != ''}} 32 | run: | 33 | cd duckdb 34 | git checkout ${{ matrix.duckdb_version }} 35 | 36 | - name: Build extension 37 | run: | 38 | make release 39 | build/release/test/Release/unittest.exe 40 | 41 | - uses: actions/upload-artifact@v2 42 | with: 43 | name: linux-extensions-64-aarch64 44 | path: | 45 | build/release/extension/odbc_scan/odbc_scan.duckdb_extension 46 | 47 | - name: Deploy 48 | env: 49 | AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_ID }} 50 | AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_KEY }} 51 | AWS_DEFAULT_REGION: ${{ secrets.S3_REGION }} 52 | BUCKET_NAME: ${{ secrets.S3_BUCKET }} 53 | run: | 54 | cd duckdb 55 | git fetch --tags 56 | export DUCKDB_VERSION=`git tag --points-at HEAD` 57 | export DUCKDB_VERSION=${DUCKDB_VERSION:=`git log -1 --format=%h`} 58 | cd .. 59 | if [[ "$AWS_ACCESS_KEY_ID" == "" ]] ; then 60 | echo 'No key set, skipping' 61 | elif [[ "$GITHUB_REF" =~ ^(refs/tags/v.+)$ ]] ; then 62 | python -m pip install awscli 63 | ./scripts/extension-upload.sh odbc_scan ${{ github.ref_name }} $DUCKDB_VERSION windows_amd64 $BUCKET_NAME true 64 | elif [[ "$GITHUB_REF" =~ ^(refs/heads/main)$ ]] ; then 65 | python -m pip install awscli 66 | ./scripts/extension-upload.sh odbc_scan `git log -1 --format=%h` $DUCKDB_VERSION windows_amd64 $BUCKET_NAME false 67 | fi -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | .idea 3 | cmake-build-debug 4 | duckdb_unittest_tempdir/ 5 | .DS_Store 6 | testext 7 | test/python/__pycache__/ 8 | .Rhistory 9 | .clangd 10 | .odbc.ini 11 | .odbcinst.ini 12 | .clang-format 13 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "duckdb"] 2 | path = duckdb 3 | url = https://github.com/duckdb/duckdb.git 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | 3 | # Set extension name here 4 | set(TARGET_NAME odbc_scanner) 5 | 6 | set(EXTENSION_NAME ${TARGET_NAME}_extension) 7 | project(${TARGET_NAME}) 8 | 9 | include_directories(src/include) 10 | 11 | set( 12 | EXTENSION_SOURCES 13 | src/odbc_scan.cpp 14 | src/odbc_scanner_extension.cpp 15 | ) 16 | add_library(${EXTENSION_NAME} STATIC ${EXTENSION_SOURCES}) 17 | 18 | set(PARAMETERS "-warnings") 19 | build_loadable_extension(${TARGET_NAME} ${PARAMETERS} ${EXTENSION_SOURCES}) 20 | 21 | # nix store location of ODBC_CONFIG is passed in as CLIENT_FLAGS to root level Makefile 22 | # set(ODBC_CONFIG /nix/store/xs4bg5404nsjarivdzxszq0z0pn2ckzv-unixODBC-2.3.11/bin/odbc_config) 23 | find_package(ODBC REQUIRED ODBC_INCLUDE_DIR ODBC_LIBRARY) 24 | if(NOT ODBC_FOUND) 25 | message(FATAL_ERROR "No ODBC found") 26 | endif() 27 | # Link ODBC to static lib 28 | target_include_directories(${EXTENSION_NAME} PUBLIC $) 29 | target_link_libraries(${EXTENSION_NAME} ${ODBC_LIBRARIES}) 30 | # Link ODBC to loadable binary 31 | target_include_directories(${TARGET_NAME}_loadable_extension PRIVATE $) 32 | target_link_libraries(${TARGET_NAME}_loadable_extension ${ODBC_LIBRARIES}) 33 | 34 | install( 35 | TARGETS ${EXTENSION_NAME} 36 | EXPORT "${DUCKDB_EXPORT_SET}" 37 | LIBRARY DESTINATION "${INSTALL_LIB_DIR}" 38 | ARCHIVE DESTINATION "${INSTALL_LIB_DIR}") 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Alex Kwiatkowski 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean format debug release duckdb_debug duckdb_release pull update 2 | 3 | all: release 4 | 5 | MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) 6 | PROJ_DIR := $(dir $(MKFILE_PATH)) 7 | 8 | OSX_BUILD_UNIVERSAL_FLAG= 9 | ifeq (${OSX_BUILD_UNIVERSAL}, 1) 10 | OSX_BUILD_UNIVERSAL_FLAG=-DOSX_BUILD_UNIVERSAL=1 11 | endif 12 | ifeq (${STATIC_LIBCPP}, 1) 13 | STATIC_LIBCPP=-DSTATIC_LIBCPP=TRUE 14 | endif 15 | 16 | ifeq ($(GEN),ninja) 17 | GENERATOR=-G "Ninja" 18 | FORCE_COLOR=-DFORCE_COLORED_OUTPUT=1 19 | endif 20 | 21 | BUILD_FLAGS=-DEXTENSION_STATIC_BUILD=1 -DBUILD_TPCH_EXTENSION=1 -DBUILD_PARQUET_EXTENSION=1 ${OSX_BUILD_UNIVERSAL_FLAG} ${STATIC_LIBCPP} 22 | 23 | CLIENT_FLAGS := 24 | 25 | # These flags will make DuckDB build the extension 26 | EXTENSION_FLAGS=-DDUCKDB_OOT_EXTENSION_NAMES="odbc_scanner" -DDUCKDB_OOT_EXTENSION_ODBC_SCANNER_PATH="$(PROJ_DIR)" -DDUCKDB_OOT_EXTENSION_ODBC_SCANNER_SHOULD_LINK="TRUE" -DDUCKDB_OOT_EXTENSION_ODBC_SCANNER_INCLUDE_PATH="$(PROJ_DIR)src/include" 27 | 28 | pull: 29 | git submodule init 30 | git submodule update --recursive --remote 31 | 32 | clean: 33 | rm -rf build 34 | rm -rf testext 35 | cd duckdb && make clean 36 | 37 | # Main build 38 | debug: 39 | mkdir -p build/debug && \ 40 | cmake $(GENERATOR) $(FORCE_COLOR) $(EXTENSION_FLAGS) ${CLIENT_FLAGS} -DEXTENSION_STATIC_BUILD=1 -DCMAKE_BUILD_TYPE=Debug ${BUILD_FLAGS} -S ./duckdb/ -B build/debug && \ 41 | cmake --build build/debug --config Debug 42 | 43 | release: 44 | mkdir -p build/release && \ 45 | cmake $(GENERATOR) $(FORCE_COLOR) $(EXTENSION_FLAGS) ${CLIENT_FLAGS} -DEXTENSION_STATIC_BUILD=1 -DCMAKE_BUILD_TYPE=Release ${BUILD_FLAGS} -S ./duckdb/ -B build/release && \ 46 | cmake --build build/release --config Release 47 | 48 | # Client build 49 | debug_js: CLIENT_FLAGS=-DBUILD_NODE=1 -DBUILD_JSON_EXTENSION=1 50 | debug_js: debug 51 | 52 | debug_r: CLIENT_FLAGS=-DBUILD_R=1 53 | debug_r: debug 54 | 55 | debug_python: CLIENT_FLAGS=-DBUILD_PYTHON=1 -DBUILD_JSON_EXTENSION=1 -DBUILD_FTS_EXTENSION=1 -DBUILD_TPCH_EXTENSION=1 -DBUILD_VISUALIZER_EXTENSION=1 -DBUILD_TPCDS_EXTENSION=1 56 | debug_python: debug 57 | 58 | release_js: CLIENT_FLAGS=-DBUILD_NODE=1 -DBUILD_JSON_EXTENSION=1 59 | release_js: release 60 | 61 | release_r: CLIENT_FLAGS=-DBUILD_R=1 62 | release_r: release 63 | 64 | release_python: CLIENT_FLAGS=-DBUILD_PYTHON=1 -DBUILD_JSON_EXTENSION=1 -DBUILD_FTS_EXTENSION=1 -DBUILD_TPCH_EXTENSION=1 -DBUILD_VISUALIZER_EXTENSION=1 -DBUILD_TPCDS_EXTENSION=1 65 | release_python: release 66 | 67 | # Main tests 68 | test: test_release 69 | 70 | test_release: release 71 | ./build/release/test/unittest --test-dir . "[sql]" 72 | 73 | test_debug: debug 74 | ./build/debug/test/unittest --test-dir . "[sql]" 75 | 76 | # Client tests 77 | test_js: test_debug_js 78 | test_debug_js: debug_js 79 | cd duckdb/tools/nodejs && npm run test-path -- "../../../test/nodejs/**/*.js" 80 | 81 | test_release_js: release_js 82 | cd duckdb/tools/nodejs && npm run test-path -- "../../../test/nodejs/**/*.js" 83 | 84 | test_python: test_debug_python 85 | test_debug_python: debug_python 86 | cd test/python && python3 -m pytest 87 | 88 | test_release_python: release_python 89 | cd test/python && python3 -m pytest 90 | 91 | format: 92 | find src/ -iname *.hpp -o -iname *.cpp | xargs clang-format --sort-includes=0 -style=file -i 93 | cmake-format -i CMakeLists.txt 94 | 95 | update: 96 | git submodule update --remote --merge 97 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ODBC Scanner DuckDB Extension 2 | 3 | A DuckDB extension to read data directly from databases supporting the ODBC interface 4 | 5 | ### odbc_scan 6 | 7 | ```shell 8 | ODBCSYSINI=${PWD} ODBCINSTINI=.odbcinst.ini ODBCINI=.odbc.ini ./build/release/duckdb 9 | ``` 10 | ```duckdb 11 | D select * from odbc_scan( 12 | 'Driver={db2 odbctest};Hostname=localhost;Database=odbctest;Uid=db2inst1;Pwd=password;Port=50000', 13 | 'DB2INST1', 14 | 'PEOPLE' 15 | ); 16 | ┌──────────────┬───────┬───────────────┐ 17 | │ NAME │ AGE │ SALARY │ 18 | │ varchar │ int32 │ decimal(20,2) │ 19 | ├──────────────┼───────┼───────────────┤ 20 | │ Lebron James │ 37 │ 100.10 │ 21 | │ Spiderman │ 25 │ 200.20 │ 22 | │ Wonder Woman │ 22 │ 300.30 │ 23 | │ David Bowie │ 69 │ 400.40 │ 24 | └──────────────┴───────┴───────────────┘ 25 | ``` 26 | 27 | ## Supported Databases 28 | 29 | This extension is tested and known to work with the ODBC drivers of the following databases. 30 | 31 | | Database | Tests | Linux x86_64 | Linux arm64 | OS X x86_64 | OS X aarch64 | 32 | | ---------- | :--------------------------------------------------------: | :----------: | :---------: | :---------: | :----------: | 33 | | IBM Db2 | [odbc_scan_db2](./test/sql/odbc_scan_db2.test) | `[x]` | `[ ]` | `[x]` | `[ ]` | 34 | | MSSQL | [odbc_scan_msql](./test/sql/odbc_scan_mssql.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 35 | | Oracle | [odbc_scan_oracle](./test/sql/odbc_scan_oracle.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 36 | | Postgres | [odbc_scan_postgres](./test/sql/odbc_scan_postgres.test) | `[x]` | `[x]` | `[x]` | `[x]` | 37 | | MySQL | [odbc_scan_mysql](./test/sql/odbc_scan_mysql.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 38 | | MariaDB | [odbc_scan_mariadb](./test/sql/odbc_scan_mariadb.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 39 | | Snowflake | [odbc_scan_snowflake](./test/sql/odbc_scan_snowflake.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 40 | | BigQuery | [odbc_scan_big_query](./test/sql/odbc_scan_big_query.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 41 | | MongoDB | [odbc_scan_mongo_db](./test/sql/odbc_scan_mongo_db.test) | `[ ]` | `[ ]` | `[ ]` | `[ ]` | 42 | 43 | If you have tested the extension against other databases let us know by opening an [issue](https://github.com/rupurt/odbc-scanner-duckdb-extension/issues/new) 44 | or creating a pull request with a set of tests. 45 | 46 | ## Connection String and DSN Formats 47 | 48 | For a full list of supported values read the [connection string](./docs/ODBC_CONNECTION_STRING_AND_DSN_FORMATS.md) documentation. 49 | 50 | ## ODBC Drivers 51 | 52 | Drivers can be downloaded from most vendors. This package manages ODBC drivers through the [odbc-drivers-nix](https://github.com/rupurt/odbc-drivers-nix) 53 | flake. If you'd like to have nix download and manage drivers and their dependencies follow the instructions in the [README](https://github.com/rupurt/odbc-drivers-nix#add-the-odbc-drivers-nix-overlay-to-your-own-flake). 54 | 55 | ## Development 56 | 57 | This repository manages development dependencies such drivers and shared libraries with [nix](https://nixos.org). It assumes you 58 | have it [installed](https://github.com/DeterminateSystems/nix-installer). 59 | 60 | All `development` and `test` tasks should be run within a nix shell 61 | 62 | ```shell 63 | nix develop -c $SHELL 64 | ``` 65 | 66 | The `odbc-scanner-duckdb-extension` is built with a `clang` toolchain. To enable `clangd` LSP support run the `.clangd` 67 | generator nix application. 68 | 69 | ```shell 70 | nix run .#generate-dot-clangd 71 | ``` 72 | 73 | To build the extension with the official DuckDB `cmake` toolchain and `clangd` run the build nix application which will link 74 | to the correct version of `unixodbc`. 75 | 76 | ```shell 77 | nix run .#build 78 | ./build/release/duckdb 79 | ``` 80 | 81 | To use ODBC DSN's with driver paths managed by the `odbc-drivers-nix` flake run the generate nix apps. 82 | 83 | ```shell 84 | nix run .#generate-odbc-ini 85 | nix run .#generate-odbcinst-ini 86 | ``` 87 | 88 | ## Test 89 | 90 | Run the official DuckDB `cmake` builder with `nix` to ensure `unixodbc` is linked correctly 91 | 92 | ```shell 93 | docker compose up 94 | nix run .#test 95 | ``` 96 | 97 | ## Installing the deployed binaries 98 | 99 | To install your extension binaries from S3, you will need to do two things. Firstly, DuckDB should be launched with the 100 | `allow_unsigned_extensions` option set to true. How to set this will depend on the client you're using. Some examples: 101 | 102 | CLI: 103 | ```shell 104 | duckdb -unsigned 105 | ``` 106 | 107 | Python: 108 | ```python 109 | con = duckdb.connect(':memory:', config={'allow_unsigned_extensions' : 'true'}) 110 | ``` 111 | 112 | NodeJS: 113 | ```js 114 | db = new duckdb.Database(':memory:', {"allow_unsigned_extensions": "true"}); 115 | ``` 116 | 117 | Secondly, you will need to set the repository endpoint in DuckDB to the HTTP url of your bucket + version of the extension 118 | you want to install. To do this run the following SQL query in DuckDB: 119 | ```sql 120 | SET custom_extension_repository='bucket.s3.eu-west-1.amazonaws.com//latest'; 121 | ``` 122 | Note that the `/latest` path will allow you to install the latest extension version available for your current version of 123 | DuckDB. To specify a specific version, you can pass the version instead. 124 | 125 | After running these steps, you can install and load your extension using the regular INSTALL/LOAD commands in DuckDB: 126 | ```sql 127 | INSTALL 'build/release/extension/odbc_scanner/odbc_scanner.duckdb_extension'; 128 | LOAD 'build/release/extension/odbc_scanner/odbc_scanner.duckdb_extension'; 129 | ``` 130 | 131 | ## License 132 | 133 | `odbc-scanner-duckdb-extension` is released under the [MIT license](./LICENSE) 134 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rupurt/odbc-scanner-duckdb-extension/205d2a913d484a1c4cbf04be0c03c23a786f27a1/data/.gitkeep -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | docker/docker-compose.yml -------------------------------------------------------------------------------- /docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | db2: 5 | image: icr.io/db2_community/db2:11.5.8.0 6 | platform: linux/amd64 7 | privileged: true 8 | ports: 9 | - "50000:50000" 10 | volumes: 11 | - db2_data:/database 12 | environment: 13 | - LICENSE=${DB2_LICENSE:-accept} 14 | - DB2INST1_PASSWORD=${DB2_DBINST1_PASSWORD:-password} 15 | - DBNAME=${DB2_DBNAME:-odbctest} 16 | - DB2INSTANCE=${DB2_DB2INSTANCE:-db2inst1} 17 | - BLU=${DB2_BLU:-false} 18 | - ENABLE_ORACLE_COMPATIBILITY=${DB2_ENABLE_ORACLE_COMPATIBILITY:-false} 19 | - UPDATEAVAIL=${DB2_UPDATEAVAIL:-NO} 20 | - TO_CREATE_SAMPLEDB=${DB2_TO_CREATE_SAMPLEDB:-false} 21 | - SAMPLEDB=${DB2_SAMPLEDB:-false} 22 | - IS_OSXFS=${DB2_IS_OSXFS:-true} 23 | - REPODB=${DB2_REPODB:-false} 24 | - HADR_ENABLED=${DB2_HADR_ENABLED:-false} 25 | - PERSISTENT_HOME=${DB2_PERSISTENT_HOME:-true} 26 | - ETCD_ENDPOINT=${DB2_ETCD_ENDPOINT:-} 27 | - ETCD_USERNAME=${DB2_ETCD_USERNAME:-} 28 | - ETCD_PASSWORD=${DB2_ETCD_PASSWORD:-} 29 | healthcheck: 30 | test: ["CMD", "/opt/ibm/db2/V11.5/bin/db2", "connect to odbctest"] 31 | interval: 10s 32 | timeout: 10s 33 | retries: 1000 34 | start_period: 60s 35 | 36 | mssql: 37 | image: mcr.microsoft.com/mssql/server:2022-latest 38 | platform: linux/amd64 39 | privileged: true 40 | ports: 41 | - "1433:1433" 42 | # volumes: 43 | # - mssql_data:/var/opt/mssql/data 44 | environment: 45 | - ACCEPT_EULA=Y 46 | - MSSQL_SA_PASSWORD=password 47 | 48 | oracle: 49 | image: container-registry.oracle.com/database/free:23.2.0.0 50 | ports: 51 | - "1521:1521" 52 | volumes: 53 | - oracle_data:/opt/oracle/oradata 54 | environment: 55 | - ORACLE_PWD=password 56 | 57 | postgres: 58 | image: postgres:15.3-alpine3.18 59 | ports: 60 | - "5432:5432" 61 | volumes: 62 | - postgres_data:/var/lib/postgresql/data 63 | user: postgres 64 | environment: 65 | - POSTGRES_USER=postgres 66 | - POSTGRES_PASSWORD=password 67 | - PGDATA=/var/lib/postgresql/data/pgdata 68 | healthcheck: 69 | test: ["CMD-SHELL", "pg_isready"] 70 | interval: 10s 71 | timeout: 5s 72 | retries: 5 73 | 74 | mariadb: 75 | image: mariadb:11.0.2 76 | ports: 77 | - "3306:3306" 78 | volumes: 79 | - mariadb_data:/var/lib/mysql 80 | environment: 81 | - MARIADB_USER=mariadb 82 | - MARIADB_PASSWORD=password 83 | - MARIADB_ROOT_PASSWORD=password 84 | 85 | volumes: 86 | db2_data: {} 87 | mssql_data: {} 88 | oracle_data: {} 89 | postgres_data: {} 90 | mariadb_data: {} 91 | -------------------------------------------------------------------------------- /docs/NEXT_README.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | This repository is based on https://github.com/duckdb/extension-template, check it out if you want to build and ship your own DuckDB extension. 4 | 5 | --- 6 | 7 | This extension, , allow you to ... . 8 | 9 | 10 | ## Building 11 | To build the extension: 12 | ```sh 13 | make 14 | ``` 15 | The main binaries that will be built are: 16 | ```sh 17 | ./build/release/duckdb 18 | ./build/release/test/unittest 19 | ./build/release/extension//.duckdb_extension 20 | ``` 21 | - `duckdb` is the binary for the duckdb shell with the extension code automatically loaded. 22 | - `unittest` is the test runner of duckdb. Again, the extension is already linked into the binary. 23 | - `.duckdb_extension` is the loadable binary as it would be distributed. 24 | 25 | ## Running the extension 26 | To run the extension code, simply start the shell with `./build/release/duckdb`. 27 | 28 | Now we can use the features from the extension directly in DuckDB. The template contains a single scalar function `quack()` that takes a string arguments and returns a string: 29 | ``` 30 | D select quack('Jane') as result; 31 | ┌───────────────┐ 32 | │ result │ 33 | │ varchar │ 34 | ├───────────────┤ 35 | │ Quack Jane 🐥 │ 36 | └───────────────┘ 37 | ``` 38 | 39 | ## Running the tests 40 | Different tests can be created for DuckDB extensions. The primary way of testing DuckDB extensions should be the SQL tests in `./test/sql`. These SQL tests can be run using: 41 | ```sh 42 | make test 43 | ``` 44 | 45 | ### Installing the deployed binaries 46 | To install your extension binaries from S3, you will need to do two things. Firstly, DuckDB should be launched with the 47 | `allow_unsigned_extensions` option set to true. How to set this will depend on the client you're using. Some examples: 48 | 49 | CLI: 50 | ```shell 51 | duckdb -unsigned 52 | ``` 53 | 54 | Python: 55 | ```python 56 | con = duckdb.connect(':memory:', config={'allow_unsigned_extensions' : 'true'}) 57 | ``` 58 | 59 | NodeJS: 60 | ```js 61 | db = new duckdb.Database(':memory:', {"allow_unsigned_extensions": "true"}); 62 | ``` 63 | 64 | Secondly, you will need to set the repository endpoint in DuckDB to the HTTP url of your bucket + version of the extension 65 | you want to install. To do this run the following SQL query in DuckDB: 66 | ```sql 67 | SET custom_extension_repository='bucket.s3.eu-west-1.amazonaws.com//latest'; 68 | ``` 69 | Note that the `/latest` path will allow you to install the latest extension version available for your current version of 70 | DuckDB. To specify a specific version, you can pass the version instead. 71 | 72 | After running these steps, you can install and load your extension using the regular INSTALL/LOAD commands in DuckDB: 73 | ```sql 74 | INSTALL 75 | LOAD 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/ODBC_CONNECTION_STRING_AND_DSN_FORMATS.md: -------------------------------------------------------------------------------- 1 | # ODBC Connection String and DSN Formats 2 | 3 | `odbc-scanner-duckdb` supports standard ODBC connection string and DSN formats. A detailed list can be obtained 4 | from [connectionstrings.com](https://www.connectionstrings.com). 5 | 6 | ## Find the Path to your Nix managed ODBC Driver 7 | 8 | ```shell 9 | nix run .#odbc-driver-paths 10 | db2 /nix/store/6flbacf9h5bk09iw37b7sncgjn9mdkwj-db2-odbc-driver-11.5.8/lib/libdb2.so 11 | postgres /nix/store/j648cwmz16prd2n35h0xdhji9b02pip6-postgres-odbc-driver-15.00.0000/lib/psqlodbca.so 12 | ``` 13 | 14 | ## DSN's 15 | 16 | ### Db2 17 | 18 | ```odbc.ini 19 | [db2 odbctest] 20 | Driver = db2 21 | ``` 22 | 23 | ```odbcinst.ini 24 | [db2] 25 | Driver = ${DB2_DRIVER_PATH} 26 | ``` 27 | 28 | ### Postgres 29 | 30 | ```odbc.ini 31 | [postgres odbc_test] 32 | Driver = postgres 33 | ``` 34 | 35 | ```odbcinst.ini 36 | [db2] 37 | Driver = ${DB2_DRIVER_PATH} 38 | 39 | [postgres] 40 | Driver = ${POSTGRES_DRIVER_PATH} 41 | ``` 42 | 43 | ## Connection Strings 44 | 45 | ### Db2 46 | 47 | ``` 48 | Driver=/nix/store/py6m0q4ij50pwjk6a5f18qhhahrvf2sk-db2-driver-11.5.8/lib/libdb2.so;Hostname=localhost;Database=odbctest;Uid=db2inst1;Pwd=password;Port=50000 49 | ``` 50 | 51 | ### Postgres 52 | 53 | ``` 54 | Driver=/nix/store/j648cwmz16prd2n35h0xdhji9b02pip6-postgres-odbc-driver-15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_test;Uid=postgres;Pwd=password;Port=5432 55 | ``` 56 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # WIP Disclaimer 2 | This template is currently a work-in-progress. Feel free to play around with it and give us feedback. Note also that this template depends on a development version of DuckDB. 3 | 4 | Get in contact with fellow extension developers on https://discord.duckdb.org and follow https://duckdb.org/news for more information on official launch. 5 | 6 | # DuckDB Extension Template 7 | The main goal of this template is to allow users to easily develop, test and distribute their own DuckDB extension. 8 | 9 | ## Getting started 10 | First step to getting started is to create your own repo from this template by clicking `Use this template`. Then clone your new repository using 11 | ```sh 12 | git clone --recurse-submodules https://github.com//.git 13 | ``` 14 | Note that `--recurse-submodules` will ensure the correct version of duckdb is pulled allowing you to get started right away. 15 | 16 | ## Building 17 | To build the extension: 18 | ```sh 19 | make 20 | ``` 21 | The main binaries that will be built are: 22 | ```sh 23 | ./build/release/duckdb 24 | ./build/release/test/unittest 25 | ./build/release/extension//.duckdb_extension 26 | ``` 27 | - `duckdb` is the binary for the duckdb shell with the extension code automatically loaded. 28 | - `unittest` is the test runner of duckdb. Again, the extension is already linked into the binary. 29 | - `.duckdb_extension` is the loadable binary as it would be distributed. 30 | 31 | ## Running the extension 32 | To run the extension code, simply start the shell with `./build/release/duckdb`. 33 | 34 | Now we can use the features from the extension directly in DuckDB. The template contains a single scalar function `quack()` that takes a string arguments and returns a string: 35 | ``` 36 | D select quack('Jane') as result; 37 | ┌───────────────┐ 38 | │ result │ 39 | │ varchar │ 40 | ├───────────────┤ 41 | │ Quack Jane 🐥 │ 42 | └───────────────┘ 43 | ``` 44 | 45 | ## Running the tests 46 | Different tests can be created for DuckDB extensions. The primary way of testing DuckDB extensions should be the SQL tests in `./test/sql`. These SQL tests can be run using: 47 | ```sh 48 | make test 49 | ``` 50 | 51 | ## Getting started with your own extension 52 | After creating a repository from this template, the first step is to name your extension. To rename the extension, run: 53 | ``` 54 | python3 ./scripts/set_extension_name.py 55 | ``` 56 | Feel free to delete the script after this step. 57 | 58 | Now you're good to go! After a (re)build, you should now be able to use your duckdb extension: 59 | ``` 60 | ./build/release/duckdb 61 | D select ('Jane') as result; 62 | ┌─────────────────────────────────────┐ 63 | │ result │ 64 | │ varchar │ 65 | ├─────────────────────────────────────┤ 66 | │ Jane 🐥 │ 67 | └─────────────────────────────────────┘ 68 | ``` 69 | 70 | For inspiration/examples on how to extend DuckDB in a more meaningful way, check out the [test extensions](https://github.com/duckdb/duckdb/blob/master/test/extension), 71 | the [in-tree extensions](https://github.com/duckdb/duckdb/tree/master/extension), and the [out-of-tree extensions](https://github.com/duckdblabs). 72 | 73 | ## Distributing your extension 74 | Easy distribution of extensions built with this template is facilitated using a similar process used by DuckDB itself. 75 | Binaries are generated for various versions/platforms allowing duckdb to automatically install the correct binary. 76 | 77 | This step requires that you pass the following 4 parameters to your GitHub repo as action secrets: 78 | 79 | | secret name | description | 80 | | ------------- | ----------------------------------- | 81 | | S3_REGION | s3 region holding your bucket | 82 | | S3_BUCKET | the name of the bucket to deploy to | 83 | | S3_DEPLOY_ID | the S3 key id | 84 | | S3_DEPLOY_KEY | the S3 key secret | 85 | 86 | After setting these variables, all pushes to master will trigger a new (dev) release. Note that your AWS token should 87 | have full permissions to the bucket, and you will need to have ACLs enabled. 88 | 89 | ### Installing the deployed binaries 90 | To install your extension binaries from S3, you will need to do two things. Firstly, DuckDB should be launched with the 91 | `allow_unsigned_extensions` option set to true. How to set this will depend on the client you're using. Some examples: 92 | 93 | CLI: 94 | ```shell 95 | duckdb -unsigned 96 | ``` 97 | 98 | Python: 99 | ```python 100 | con = duckdb.connect(':memory:', config={'allow_unsigned_extensions' : 'true'}) 101 | ``` 102 | 103 | NodeJS: 104 | ```js 105 | db = new duckdb.Database(':memory:', {"allow_unsigned_extensions": "true"}); 106 | ``` 107 | 108 | Secondly, you will need to set the repository endpoint in DuckDB to the HTTP url of your bucket + version of the extension 109 | you want to install. To do this run the following SQL query in DuckDB: 110 | ```sql 111 | SET custom_extension_repository='bucket.s3.eu-west-1.amazonaws.com//latest'; 112 | ``` 113 | Note that the `/latest` path will allow you to install the latest extension version available for your current version of 114 | DuckDB. To specify a specific version, you can pass the version instead. 115 | 116 | After running these steps, you can install and load your extension using the regular INSTALL/LOAD commands in DuckDB: 117 | ```sql 118 | INSTALL 119 | LOAD 120 | ``` 121 | 122 | ### Versioning of your extension 123 | Extension binaries will only work for the specific DuckDB version they were built for. Since you may want to support multiple 124 | versions of DuckDB for a release of your extension, you can specify which versions to build for in the CI of this template. 125 | By default, the CI will build your extension against the version of the DuckDB submodule, which should generally be the most 126 | recent version of DuckDB. To build for multiple versions of DuckDB, simply add the version to the matrix variable, e.g.: 127 | ``` 128 | strategy: 129 | matrix: 130 | duckdb_version: [ '', 'v0.7.0'] 131 | ``` 132 | 133 | ## Setting up CLion 134 | 135 | ### Opening project 136 | Configuring CLion with the extension template requires a little work. Firstly, make sure that the DuckDB submodule is available. 137 | Then make sure to open `./duckdb/CMakeLists.txt` (so not the top level `CMakeLists.txt` file from this repo) as a project in CLion. 138 | Now to fix your project path go to `tools->CMake->Change Project Root`([docs](https://www.jetbrains.com/help/clion/change-project-root-directory.html)) to set the project root to the root dir of this repo. 139 | 140 | ### Debugging 141 | To set up debugging in CLion, there are two simple steps required. Firstly, in `CLion -> Settings / Preferences -> Build, Execution, Deploy -> CMake` you will need to add the desired builds (e.g. Debug, Release, RelDebug, etc). There's different ways to configure this, but the easiest is to leave all empty, except the `build path`, which needs to be set to `../build/{build type}`. Now on a clean repository you will first need to run `make {build type}` to initialize the CMake build directory. After running make, you will be able to (re)build from CLion by using the build target we just created. 142 | 143 | The second step is to configure the unittest runner as a run/debug configuration. To do this, go to `Run -> Edit Configurations` and click `+ -> Cmake Application`. The target and executable should be `unittest`. This will run all the DuckDB tests. To specify only running the extension specific tests, add `--test-dir ../../.. [sql]` to the `Program Arguments`. Note that it is recommended to use the `unittest` executable for testing/development within CLion. The actual DuckDB CLI currently does not reliably work as a run target in CLion. 144 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1692799911, 9 | "narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "flake-utils_2": { 22 | "inputs": { 23 | "systems": "systems_2" 24 | }, 25 | "locked": { 26 | "lastModified": 1692799911, 27 | "narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", 28 | "owner": "numtide", 29 | "repo": "flake-utils", 30 | "rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "owner": "numtide", 35 | "repo": "flake-utils", 36 | "type": "github" 37 | } 38 | }, 39 | "nixpkgs": { 40 | "locked": { 41 | "lastModified": 1693865745, 42 | "narHash": "sha256-9P+9EhXg7KbaMJLS8v03NMj+FhO2ijEGre07QCw7m4c=", 43 | "owner": "nixos", 44 | "repo": "nixpkgs", 45 | "rev": "374433bcfbc81c65d678189bd00ac97efb9b0a79", 46 | "type": "github" 47 | }, 48 | "original": { 49 | "owner": "nixos", 50 | "repo": "nixpkgs", 51 | "type": "github" 52 | } 53 | }, 54 | "nixpkgs_2": { 55 | "locked": { 56 | "lastModified": 1693865745, 57 | "narHash": "sha256-9P+9EhXg7KbaMJLS8v03NMj+FhO2ijEGre07QCw7m4c=", 58 | "owner": "nixos", 59 | "repo": "nixpkgs", 60 | "rev": "374433bcfbc81c65d678189bd00ac97efb9b0a79", 61 | "type": "github" 62 | }, 63 | "original": { 64 | "owner": "nixos", 65 | "repo": "nixpkgs", 66 | "type": "github" 67 | } 68 | }, 69 | "odbc-drivers": { 70 | "inputs": { 71 | "flake-utils": "flake-utils_2", 72 | "nixpkgs": "nixpkgs_2" 73 | }, 74 | "locked": { 75 | "lastModified": 1693866333, 76 | "narHash": "sha256-o5ncy1BJjYUKyJkkq6Hj6YbW+vH85aZXLQXF287ZJT0=", 77 | "owner": "rupurt", 78 | "repo": "odbc-drivers-nix", 79 | "rev": "0632935f1391a59172b94c0a87b553c2751d481f", 80 | "type": "github" 81 | }, 82 | "original": { 83 | "owner": "rupurt", 84 | "repo": "odbc-drivers-nix", 85 | "type": "github" 86 | } 87 | }, 88 | "root": { 89 | "inputs": { 90 | "flake-utils": "flake-utils", 91 | "nixpkgs": "nixpkgs", 92 | "odbc-drivers": "odbc-drivers" 93 | } 94 | }, 95 | "systems": { 96 | "locked": { 97 | "lastModified": 1681028828, 98 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 99 | "owner": "nix-systems", 100 | "repo": "default", 101 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 102 | "type": "github" 103 | }, 104 | "original": { 105 | "owner": "nix-systems", 106 | "repo": "default", 107 | "type": "github" 108 | } 109 | }, 110 | "systems_2": { 111 | "locked": { 112 | "lastModified": 1681028828, 113 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 114 | "owner": "nix-systems", 115 | "repo": "default", 116 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 117 | "type": "github" 118 | }, 119 | "original": { 120 | "owner": "nix-systems", 121 | "repo": "default", 122 | "type": "github" 123 | } 124 | } 125 | }, 126 | "root": "root", 127 | "version": 7 128 | } 129 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "Nix flake for the odbc_scanner duckdb extension"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | odbc-drivers.url = "github:rupurt/odbc-drivers-nix"; 8 | }; 9 | 10 | outputs = { 11 | flake-utils, 12 | nixpkgs, 13 | odbc-drivers, 14 | ... 15 | }: let 16 | systems = ["x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin"]; 17 | outputs = flake-utils.lib.eachSystem systems (system: let 18 | pkgs = import nixpkgs { 19 | inherit system; 20 | overlays = [ 21 | odbc-drivers.overlay 22 | ]; 23 | }; 24 | stdenv = pkgs.llvmPackages_15.stdenv; 25 | in rec { 26 | # packages exported by the flake 27 | packages = { 28 | db2-odbc-driver = pkgs.db2-odbc-driver {}; 29 | postgres-odbc-driver = pkgs.postgres-odbc-driver {}; 30 | }; 31 | 32 | # nix run 33 | apps = { 34 | generate-dot-clangd = { 35 | type = "app"; 36 | program = toString (pkgs.writeScript "generate-dot-clangd" '' 37 | UNIX_ODBC_DIR=${pkgs.unixODBC} \ 38 | envsubst < ./templates/.clangd.template > .clangd 39 | ''); 40 | }; 41 | generate-dot-clang-format = { 42 | type = "app"; 43 | program = toString (pkgs.writeScript "generate-dot-clang-format" '' 44 | cp ./templates/.clang-format.template .clang-format 45 | ''); 46 | }; 47 | generate-odbc-ini = { 48 | type = "app"; 49 | program = toString (pkgs.writeScript "generate-odbc-ini" '' 50 | cp ./templates/.odbc.ini.template .odbc.ini 51 | ''); 52 | }; 53 | generate-odbcinst-ini = { 54 | type = "app"; 55 | program = toString (pkgs.writeScript "generate-odbcinst-ini" '' 56 | DB2_DRIVER_PATH=${packages.db2-odbc-driver}/lib/${ 57 | if stdenv.isDarwin 58 | then "libdb2.dylib" 59 | else "libdb2.so" 60 | } \ 61 | POSTGRES_DRIVER_PATH=${packages.postgres-odbc-driver}/lib/psqlodbca.so \ 62 | envsubst < ./templates/.odbcinst.ini.template > .odbcinst.ini 63 | ''); 64 | }; 65 | ls-odbc-driver-paths = { 66 | type = "app"; 67 | program = toString (pkgs.writeScript "ls-odbc-driver-paths" '' 68 | echo "db2 ${packages.db2-odbc-driver}/lib/${ 69 | if stdenv.isDarwin 70 | then "libdb2.dylib" 71 | else "libdb2.so" 72 | }" 73 | echo "postgres ${packages.postgres-odbc-driver}/lib/psqlodbca.so" 74 | ''); 75 | }; 76 | load-db2-schema = { 77 | type = "app"; 78 | program = toString (pkgs.writeScript "load-db2-schema" '' 79 | echo "TODO: load db2 schema" 80 | ''); 81 | }; 82 | test = { 83 | type = "app"; 84 | program = toString (pkgs.writeScript "test" '' 85 | export PATH="${pkgs.lib.makeBinPath ( 86 | with pkgs; [ 87 | git 88 | gnumake 89 | cmake 90 | ninja 91 | openssl 92 | packages.db2-odbc-driver 93 | packages.postgres-odbc-driver 94 | ] 95 | )}:$PATH" 96 | export CC=${stdenv.cc}/bin/clang 97 | export CXX=${stdenv.cc}/bin/clang++ 98 | 99 | make \ 100 | GEN=ninja \ 101 | ODBCSYSINI=$PWD \ 102 | ODBCINSTINI=.odbcinst.ini \ 103 | ODBCINI=$PWD/.odbc.ini \ 104 | test CLIENT_FLAGS="-DODBC_CONFIG=${pkgs.unixODBC}/bin/odbc_config" 105 | ''); 106 | }; 107 | build = { 108 | type = "app"; 109 | program = toString (pkgs.writeScript "build" '' 110 | export PATH="${pkgs.lib.makeBinPath ( 111 | with pkgs; [ 112 | git 113 | gnumake 114 | cmake 115 | ninja 116 | openssl 117 | packages.db2-odbc-driver 118 | packages.postgres-odbc-driver 119 | ] 120 | )}:$PATH" 121 | export CC=${stdenv.cc}/bin/clang 122 | export CXX=${stdenv.cc}/bin/clang++ 123 | 124 | make \ 125 | GEN=ninja \ 126 | CLIENT_FLAGS="-DODBC_CONFIG=${pkgs.unixODBC}/bin/odbc_config" 127 | ''); 128 | }; 129 | default = apps.build; 130 | }; 131 | 132 | # nix fmt 133 | formatter = pkgs.alejandra; 134 | 135 | # nix develop -c $SHELL 136 | devShells.default = pkgs.mkShell { 137 | packages = [ 138 | pkgs.git 139 | pkgs.gnumake 140 | pkgs.cmake 141 | # faster cmake builds 142 | pkgs.ninja 143 | # clangd lsp 144 | pkgs.llvmPackages_15.bintools 145 | pkgs.llvmPackages_15.clang 146 | pkgs.envsubst 147 | pkgs.openssl 148 | pkgs.unixODBC 149 | # psql cli 150 | pkgs.postgresql_15 151 | packages.db2-odbc-driver 152 | packages.postgres-odbc-driver 153 | ]; 154 | }; 155 | }); 156 | in 157 | outputs; 158 | } 159 | -------------------------------------------------------------------------------- /scripts/extension-upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: ./extension-upload.sh 4 | # : Name of the extension 5 | # : Version (commit / version tag) of the extension 6 | # : Version (commit / version tag) of DuckDB 7 | # : Architecture target of the extension binary 8 | # : S3 bucket to upload to 9 | # : Set this as the latest version ("true" / "false", default: "false") 10 | 11 | set -e 12 | 13 | ext="build/release/extension/$1/$1.duckdb_extension" 14 | 15 | # compress extension binary 16 | gzip < "${ext}" > "$1.duckdb_extension.gz" 17 | 18 | # upload compressed extension binary to S3 19 | aws s3 cp "$1.duckdb_extension.gz s3://$5/$1/$2/$3/$4/$1.duckdb_extension.gz" --acl public-read 20 | 21 | # upload to latest if copy_to_latest is set to true 22 | if [[ $6 = 'true' ]]; then 23 | aws s3 cp "$1.duckdb_extension.gz s3://$5/$1/latest/$3/$4/$1.duckdb_extension.gz" --acl public-read 24 | fi 25 | -------------------------------------------------------------------------------- /scripts/set_extension_name.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import sys, os, shutil 4 | from pathlib import Path 5 | 6 | shutil.copyfile(f'docs/NEXT_README.md', f'README.md') 7 | 8 | if (len(sys.argv) != 3): 9 | raise Exception('usage: python3 set_extension_name.py ') 10 | 11 | name_extension = sys.argv[1] 12 | name_function = sys.argv[2] 13 | 14 | def replace(file_name, to_find, to_replace): 15 | with open(file_name, 'r', encoding="utf8") as file : 16 | filedata = file.read() 17 | filedata = filedata.replace(to_find, to_replace) 18 | with open(file_name, 'w', encoding="utf8") as file: 19 | file.write(filedata) 20 | 21 | files_to_search = [] 22 | files_to_search.extend(Path('./.github').rglob('./**/*.yml')) 23 | files_to_search.extend(Path('./test').rglob('./**/*.py')) 24 | files_to_search.extend(Path('./test').rglob('./**/*.test')) 25 | files_to_search.extend(Path('./test').rglob('./**/*.js')) 26 | files_to_search.extend(Path('./src').rglob('./**/*.hpp')) 27 | files_to_search.extend(Path('./src').rglob('./**/*.cpp')) 28 | files_to_search.extend(Path('./src').rglob('./**/*.txt')) 29 | files_to_search.extend(Path('./src').rglob('./*.md')) 30 | 31 | def replace_everywhere(to_find, to_replace): 32 | for path in files_to_search: 33 | replace(path, to_find, to_replace) 34 | replace(path, to_find.capitalize(), to_replace.capitalize()) 35 | 36 | replace("./CMakeLists.txt", to_find, to_replace) 37 | replace("./Makefile", to_find, to_replace) 38 | replace("./Makefile", to_find.capitalize(), to_replace.capitalize()) 39 | replace("./Makefile", to_find.upper(), to_replace.upper()) 40 | replace("./README.md", to_find, to_replace) 41 | 42 | replace_everywhere("quack", name_function) 43 | replace_everywhere("", name_extension) 44 | 45 | string_to_replace = name_function 46 | string_to_find = "quack" 47 | 48 | # rename files 49 | os.rename(f'test/python/{string_to_find}_test.py', f'test/python/{string_to_replace}_test.py') 50 | os.rename(f'test/sql/{string_to_find}.test', f'test/sql/{string_to_replace}.test') 51 | os.rename(f'src/{string_to_find}_extension.cpp', f'src/{string_to_replace}_extension.cpp') 52 | os.rename(f'src/include/{string_to_find}_extension.hpp', f'src/include/{string_to_replace}_extension.hpp') 53 | os.rename(f'test/nodejs/{string_to_find}_test.js', f'test/nodejs/{string_to_replace}_test.js') 54 | -------------------------------------------------------------------------------- /src/include/exception.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "duckdb.hpp" 4 | #include "duckdb/function/table_function.hpp" 5 | 6 | #include "sql.h" 7 | #include "sqlext.h" 8 | 9 | namespace duckdb { 10 | struct OdbcDiagnostics { 11 | std::string msg; 12 | std::string state; 13 | SQLINTEGER native; 14 | }; 15 | 16 | static unique_ptr ExtractDiagnostics(SQLSMALLINT handle_type, SQLHANDLE handle) { 17 | SQLINTEGER i = 1; 18 | SQLINTEGER native; 19 | SQLCHAR state[7]; 20 | SQLCHAR text[256]; 21 | SQLSMALLINT len; 22 | SQLRETURN return_code; 23 | auto diagnostics = make_uniq(); 24 | 25 | while (SQL_SUCCEEDED(SQLGetDiagRec(handle_type, handle, i, state, &native, text, sizeof(text), &len))) { 26 | diagnostics->msg += string((char *)text); 27 | diagnostics->state = string((char *)state); 28 | diagnostics->native = native; 29 | i++; 30 | } 31 | 32 | return std::move(diagnostics); 33 | } 34 | 35 | static std::string SqlReturnCodeToString(SQLRETURN return_code) { 36 | switch (return_code) { 37 | case SQL_SUCCESS: 38 | return "SQL_SUCCESS"; 39 | case SQL_SUCCESS_WITH_INFO: 40 | return "SQL_SUCCESS_WITH_INFO"; 41 | case SQL_NO_DATA: 42 | return "SQL_NO_DATA"; 43 | case SQL_ERROR: 44 | return "SQL_ERROR"; 45 | case SQL_INVALID_HANDLE: 46 | return "SQL_INVALID_HANDLE"; 47 | case SQL_STILL_EXECUTING: 48 | return "SQL_STILL_EXECUTING"; 49 | case SQL_NEED_DATA: 50 | return "SQL_NEED_DATA"; 51 | default: 52 | return "UNKNOWN"; 53 | } 54 | } 55 | 56 | static void ThrowExceptionWithDiagnostics(std::string msg_prefix, SQLSMALLINT handle_type, SQLHANDLE handle, 57 | SQLRETURN return_code) { 58 | auto diagnostics = ExtractDiagnostics(handle_type, handle); 59 | throw Exception(msg_prefix + " return_code=" + std::to_string(return_code) + ":" + 60 | SqlReturnCodeToString(return_code) + " msg='" + diagnostics->msg + 61 | "' state=" + diagnostics->state + " native=" + std::to_string(diagnostics->native)); 62 | } 63 | } // namespace duckdb 64 | -------------------------------------------------------------------------------- /src/include/odbc.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "exception.hpp" 4 | 5 | #include "duckdb.hpp" 6 | #include "duckdb/function/table_function.hpp" 7 | 8 | #include "sql.h" 9 | #include "sqlext.h" 10 | 11 | namespace duckdb { 12 | struct OdbcEnvironment { 13 | OdbcEnvironment() { handle = SQL_NULL_HENV; } 14 | ~OdbcEnvironment() { FreeHandle(); } 15 | 16 | SQLHENV handle; 17 | 18 | void FreeHandle() { 19 | if (handle != SQL_NULL_HENV) { 20 | auto return_code = SQLFreeHandle(SQL_HANDLE_ENV, handle); 21 | if (!SQL_SUCCEEDED(return_code)) { 22 | ThrowExceptionWithDiagnostics("OdbcEnvironment->Init() SQLFreeHandle", SQL_HANDLE_ENV, handle, 23 | return_code); 24 | } 25 | } 26 | } 27 | 28 | public: 29 | void Init() { 30 | if (handle != SQL_NULL_HENV) { 31 | throw Exception("OdbcEnvironment->Init() handle is not null"); 32 | } 33 | 34 | SQLRETURN return_code; 35 | 36 | return_code = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HENV, &handle); 37 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 38 | ThrowExceptionWithDiagnostics("OdbcEnvironment->Init() SQLAllocHandle", SQL_HANDLE_ENV, handle, 39 | return_code); 40 | } 41 | 42 | return_code = SQLSetEnvAttr(handle, SQL_ATTR_ODBC_VERSION, (SQLPOINTER *)SQL_OV_ODBC3, 0); 43 | if (!SQL_SUCCEEDED(return_code)) { 44 | ThrowExceptionWithDiagnostics("OdbcEnvironment->Init() SQLSetEnvAttr", SQL_HANDLE_ENV, handle, 45 | return_code); 46 | } 47 | } 48 | SQLHENV Handle() const { return handle; } 49 | }; 50 | 51 | #define MAX_CONN_STR_OUT 1024 52 | 53 | struct OdbcConnection { 54 | OdbcConnection() : handle(SQL_NULL_HDBC), dialed(false) {} 55 | ~OdbcConnection() { 56 | Disconnect(); 57 | FreeHandle(); 58 | } 59 | 60 | SQLHDBC handle; 61 | bool dialed; 62 | 63 | void FreeHandle() { 64 | if (handle != SQL_NULL_HDBC) { 65 | auto return_code = SQLFreeHandle(SQL_HANDLE_DBC, handle); 66 | if (!SQL_SUCCEEDED(return_code)) { 67 | ThrowExceptionWithDiagnostics("OdbcConnection->FreeHandle() SQLFreeHandle", SQL_HANDLE_DBC, handle, 68 | return_code); 69 | } 70 | } 71 | } 72 | 73 | public: 74 | void Init(shared_ptr &env) { 75 | if (handle != SQL_NULL_HDBC) { 76 | throw Exception("OdbcConnection->Init(): connection handle is not null"); 77 | } 78 | 79 | auto return_code = SQLAllocHandle(SQL_HANDLE_DBC, env->Handle(), &handle); 80 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 81 | ThrowExceptionWithDiagnostics("OdbcConnection->Init() SQLAllocHandle", SQL_HANDLE_DBC, handle, 82 | return_code); 83 | } 84 | } 85 | void Dial(string connection_string) { 86 | auto conn_str_in_len = (SQLSMALLINT)connection_string.length(); 87 | SQLSMALLINT conn_str_out_len = 0; 88 | SQLCHAR conn_str_out[MAX_CONN_STR_OUT + 1] = {0}; 89 | 90 | auto return_code = 91 | SQLDriverConnect(handle, NULL, (SQLCHAR *)connection_string.c_str(), conn_str_in_len, conn_str_out, 92 | (SQLSMALLINT)MAX_CONN_STR_OUT, &conn_str_out_len, SQL_DRIVER_NOPROMPT); 93 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 94 | ThrowExceptionWithDiagnostics("OdbcConnection->Dial() SQLDriverConnect", SQL_HANDLE_DBC, handle, 95 | return_code); 96 | } 97 | 98 | dialed = true; 99 | } 100 | void Disconnect() { 101 | if (!dialed) { 102 | return; 103 | } 104 | 105 | auto return_code = SQLDisconnect(handle); 106 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 107 | ThrowExceptionWithDiagnostics("OdbcConnection->Disconnect() SQLDisconnect", SQL_HANDLE_DBC, handle, 108 | return_code); 109 | } 110 | 111 | dialed = false; 112 | } 113 | SQLHSTMT Handle() { return handle; } 114 | }; 115 | 116 | struct OdbcColumnDescription { 117 | SQLCHAR name[32]; 118 | SQLSMALLINT name_length; 119 | SQLSMALLINT sql_data_type; 120 | SQLSMALLINT c_data_type; 121 | SQLULEN size; 122 | SQLULEN length; 123 | SQLSMALLINT decimal_digits; 124 | SQLSMALLINT nullable; 125 | }; 126 | 127 | struct OdbcStatementOptions { 128 | OdbcStatementOptions(SQLULEN _row_array_size) : row_array_size(_row_array_size) {} 129 | 130 | SQLULEN row_array_size; 131 | }; 132 | 133 | struct OdbcStatement { 134 | OdbcStatement(shared_ptr _conn) 135 | : conn(_conn), handle(SQL_NULL_HSTMT), prepared(false), executing(false) {} 136 | ~OdbcStatement() { 137 | prepared = false; 138 | executing = false; 139 | FreeHandle(); 140 | } 141 | 142 | shared_ptr conn; 143 | SQLHSTMT handle; 144 | bool prepared; 145 | bool executing; 146 | 147 | void FreeHandle() { 148 | if (handle != SQL_NULL_HSTMT) { 149 | SQLRETURN return_code = SQLFreeHandle(SQL_HANDLE_STMT, handle); 150 | if (!SQL_SUCCEEDED(return_code)) { 151 | ThrowExceptionWithDiagnostics("OdbcStatement->FreeHandle() SQLFreeHandle", SQL_HANDLE_STMT, handle, 152 | return_code); 153 | } 154 | } 155 | } 156 | 157 | public: 158 | void Init() { 159 | if (handle != SQL_NULL_HSTMT) { 160 | throw Exception("OdbcStatement->Init() handle has already been initialized. To " 161 | "execute a different statement instantiate a new statement"); 162 | } 163 | 164 | auto return_code = SQLAllocHandle(SQL_HANDLE_STMT, conn->Handle(), &handle); 165 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 166 | ThrowExceptionWithDiagnostics("OdbcStatement->Init() SQLAllocHandle", SQL_HANDLE_STMT, handle, 167 | return_code); 168 | } 169 | } 170 | void Prepare(std::string sql_statement) { 171 | if (handle == SQL_NULL_HSTMT) { 172 | throw Exception("OdbcStatement->Prepare() handle has not been allocated. Call " 173 | "OdbcStatement#Init() before OdbcStatement#Prepare()"); 174 | } 175 | 176 | auto sql_len = (SQLSMALLINT)sql_statement.length(); 177 | auto return_code = SQLPrepare(handle, (SQLCHAR *)sql_statement.c_str(), sql_len); 178 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 179 | ThrowExceptionWithDiagnostics("OdbcStatement->Prepare() SQLPrepare", SQL_HANDLE_STMT, handle, 180 | return_code); 181 | } 182 | 183 | prepared = true; 184 | } 185 | void SetAttribute(SQLINTEGER attribute, SQLPOINTER value) { 186 | if (handle == SQL_NULL_HSTMT) { 187 | throw Exception("OdbcStatement->SetAttribute() handle has not been allocated. Call " 188 | "OdbcStatement#Init() before OdbcStatement#SetAttribute()"); 189 | } 190 | 191 | auto return_code = SQLSetStmtAttr(handle, attribute, value, 0); 192 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 193 | ThrowExceptionWithDiagnostics("OdbcStatement->SetAttribute() SQLSetStmtAttr", SQL_HANDLE_STMT, handle, 194 | return_code); 195 | } 196 | } 197 | void BindColumn(SQLUSMALLINT column_number, SQLSMALLINT c_data_type, unsigned char *buffer, 198 | SQLULEN column_buffer_length, SQLLEN *strlen_or_ind) { 199 | if (handle == SQL_NULL_HSTMT) { 200 | throw Exception("OdbcStatement->BindColumn() handle has not been allocated. Call " 201 | "OdbcStatement#Init() before OdbcStatement#BindColumn()"); 202 | } 203 | 204 | auto return_code = 205 | SQLBindCol(handle, column_number, c_data_type, buffer, column_buffer_length, strlen_or_ind); 206 | if (!SQL_SUCCEEDED(return_code)) { 207 | ThrowExceptionWithDiagnostics("OdbcStatement->BindCol() SQLBindCol", SQL_HANDLE_STMT, handle, 208 | return_code); 209 | } 210 | } 211 | SQLSMALLINT NumResultCols() { 212 | if (handle == SQL_NULL_HSTMT) { 213 | throw Exception("OdbcStatement->NumResultCols() handle has not been allocated. Call " 214 | "OdbcStatement#Init() before OdbcStatement#Prepare()"); 215 | } 216 | if (!prepared) { 217 | throw Exception("OdbcStatement->NumResultCols() statement has " 218 | "not been prepared. Call OdbcStatement#Prepare() before " 219 | "OdbcStatement#NumResultCols()"); 220 | } 221 | 222 | SQLSMALLINT num_result_cols = 0; 223 | auto return_code = SQLNumResultCols(handle, &num_result_cols); 224 | if (!SQL_SUCCEEDED(return_code)) { 225 | ThrowExceptionWithDiagnostics("OdbcStatement->NumResultCols() SQLNumResultCols", SQL_HANDLE_STMT, 226 | handle, return_code); 227 | } 228 | 229 | return num_result_cols; 230 | } 231 | vector DescribeColumns() { 232 | auto num_result_cols = NumResultCols(); 233 | auto column_descriptions = vector(num_result_cols); 234 | 235 | for (SQLUSMALLINT i = 0; i < num_result_cols; i++) { 236 | auto col_desc = &column_descriptions.at(i); 237 | 238 | auto return_code = SQLDescribeCol(handle, i + 1, col_desc->name, sizeof(col_desc->name), 239 | &col_desc->name_length, &col_desc->sql_data_type, &col_desc->size, 240 | &col_desc->decimal_digits, &col_desc->nullable); 241 | if (!SQL_SUCCEEDED(return_code)) { 242 | ThrowExceptionWithDiagnostics("OdbcStatement->DescribeColumns() SQLDescribeCol", SQL_HANDLE_STMT, 243 | handle, return_code); 244 | } 245 | 246 | SqlDataTypeToCDataType(col_desc); 247 | } 248 | 249 | return column_descriptions; 250 | } 251 | void Execute(unique_ptr &opts) { 252 | if (handle == SQL_NULL_HSTMT) { 253 | throw Exception("OdbcStatement->Execute() handle is null"); 254 | } 255 | if (!prepared) { 256 | throw Exception("OdbcStatement->Execute() statement is not prepared"); 257 | } 258 | if (executing) { 259 | throw Exception("OdbcStatement->Execute() previous statement is executing"); 260 | } 261 | 262 | SetAttribute(SQL_ATTR_ROW_BIND_TYPE, SQL_BIND_BY_COLUMN); 263 | SetAttribute(SQL_ATTR_ROW_ARRAY_SIZE, (SQLPOINTER)opts->row_array_size); 264 | 265 | auto return_code = SQLExecute(handle); 266 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO) { 267 | ThrowExceptionWithDiagnostics("OdbcStatement->Execute() SQLExecute", SQL_HANDLE_STMT, handle, 268 | return_code); 269 | } 270 | 271 | executing = true; 272 | } 273 | // TODO: 274 | // - support multiple fetch orientations 275 | SQLLEN Fetch() { 276 | if (handle == SQL_NULL_HSTMT) { 277 | throw Exception("OdbcStatement->Fetch() handle is null"); 278 | } 279 | if (!prepared) { 280 | throw Exception("OdbcStatement->Fetch() statement is not prepared"); 281 | } 282 | if (!executing) { 283 | throw Exception("OdbcStatement->Fetch() statement is not executing"); 284 | } 285 | 286 | SQLLEN rows_fetched = 0; 287 | SetAttribute(SQL_ATTR_ROWS_FETCHED_PTR, (SQLPOINTER)&rows_fetched); 288 | 289 | auto return_code = SQLFetchScroll(handle, SQL_FETCH_NEXT, 0); 290 | if (return_code != SQL_SUCCESS && return_code != SQL_SUCCESS_WITH_INFO && 291 | return_code != SQL_NO_DATA_FOUND) { 292 | ThrowExceptionWithDiagnostics("OdbcStatement->Fetch() SQLFetchScroll", SQL_HANDLE_STMT, handle, 293 | return_code); 294 | } 295 | 296 | return rows_fetched; 297 | } 298 | 299 | protected: 300 | static void SqlDataTypeToCDataType(OdbcColumnDescription *col_desc) { 301 | // TODO: 302 | // - unixodbc doesn't seem to define all possible sql types 303 | switch (col_desc->sql_data_type) { 304 | // case SQL_BIT: 305 | // col_desc->c_data_type = SQL_C_BIT; 306 | // col_desc->length = sizeof(SQLCHAR); 307 | // break; 308 | case SQL_SMALLINT: 309 | col_desc->c_data_type = SQL_C_SHORT; 310 | col_desc->length = sizeof(SQLSMALLINT); 311 | break; 312 | case SQL_INTEGER: 313 | col_desc->c_data_type = SQL_C_LONG; 314 | col_desc->length = sizeof(SQLINTEGER); 315 | break; 316 | case SQL_BIGINT: 317 | col_desc->c_data_type = SQL_C_SBIGINT; 318 | col_desc->length = sizeof(SQLBIGINT); 319 | break; 320 | // case SQL_DECFLOAT: 321 | // col_desc->c_data_type = SQL_C_CHAR; 322 | // break; 323 | case SQL_DECIMAL: 324 | case SQL_NUMERIC: 325 | col_desc->c_data_type = SQL_C_CHAR; 326 | // TODO: 327 | // - this calculation is incorrect 328 | // - it needs to take into account the scale 329 | // - + (precision if decimal digits > 0) 330 | // - + (decimal point if decimal digit > 0) 331 | // - + (newline???) 332 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 333 | break; 334 | case SQL_DOUBLE: 335 | case SQL_FLOAT: 336 | col_desc->c_data_type = SQL_C_DOUBLE; 337 | col_desc->length = sizeof(double); 338 | break; 339 | case SQL_REAL: 340 | col_desc->c_data_type = SQL_C_FLOAT; 341 | col_desc->length = sizeof(float); 342 | break; 343 | case SQL_CHAR: 344 | // case SQL_CLOB: 345 | case SQL_VARCHAR: 346 | case SQL_LONGVARCHAR: 347 | col_desc->c_data_type = SQL_C_CHAR; 348 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 349 | break; 350 | case SQL_BINARY: 351 | // case SQL_BLOB: 352 | case SQL_VARBINARY: 353 | case SQL_LONGVARBINARY: 354 | col_desc->c_data_type = SQL_C_BINARY; 355 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 356 | break; 357 | // case SQL_BLOB_LOCATOR: 358 | // col_desc->c_data_type = SQL_C_BLOB_LOCATOR; 359 | // break; 360 | // case SQL_DBCLOB: 361 | // case SQL_GRAPHIC: 362 | // case SQL_LONGVARGRAPHIC: 363 | // case SQL_VARGRAPHIC: 364 | // col_desc->c_data_type = SQL_C_DBCHAR; 365 | // break; 366 | // case SQL_DBCLOB_LOCATOR: 367 | // col_desc->c_data_type = SQL_C_DBCLOB_LOCATOR; 368 | // break; 369 | // case SQL_CLOB_LOCATOR: 370 | // col_desc->c_data_type = SQL_C_CLOB_LOCATOR; 371 | // break; 372 | // case SQL_ROWID: 373 | // col_desc->c_data_type = SQL_C_CHAR; 374 | // break; 375 | case SQL_TYPE_DATE: 376 | col_desc->c_data_type = SQL_C_TYPE_DATE; 377 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 378 | break; 379 | case SQL_TYPE_TIME: 380 | col_desc->c_data_type = SQL_C_TYPE_TIME; 381 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 382 | break; 383 | case SQL_TYPE_TIMESTAMP: 384 | col_desc->c_data_type = SQL_C_TYPE_TIMESTAMP; 385 | col_desc->length = col_desc->size + sizeof(SQLCHAR); 386 | break; 387 | // case SQL_XML: 388 | // col_desc->c_data_type = SQL_C_BINARY; 389 | // break; 390 | default: 391 | throw Exception("SqlDataTypeToCDataType() unknown sql_data_type=" + 392 | std::to_string(col_desc->sql_data_type)); 393 | break; 394 | } 395 | } 396 | }; 397 | } // namespace duckdb 398 | -------------------------------------------------------------------------------- /src/include/odbc_scan.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "odbc.hpp" 4 | 5 | #include "duckdb.hpp" 6 | #include "duckdb/common/exception_format_value.hpp" 7 | #include "duckdb/function/table_function.hpp" 8 | 9 | #include "sql.h" 10 | #include "sqlext.h" 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | namespace duckdb { 17 | struct OdbcScanBindData : public FunctionData { 18 | string connection_string; 19 | string schema_name; 20 | string table_name; 21 | shared_ptr environment; 22 | shared_ptr connection; 23 | unique_ptr statement; 24 | unique_ptr statement_opts; 25 | 26 | vector names; 27 | vector types; 28 | vector column_descriptions; 29 | 30 | public: 31 | unique_ptr Copy() const override { throw NotImplementedException(""); } 32 | bool Equals(const FunctionData &other) const override { throw NotImplementedException(""); } 33 | }; 34 | 35 | struct OdbcColumnBinding { 36 | OdbcColumnBinding(OdbcColumnDescription col_desc, SQLINTEGER row_array_size) { 37 | column_buffer_length = col_desc.length; 38 | sql_data_type = col_desc.sql_data_type; 39 | c_data_type = col_desc.c_data_type; 40 | 41 | strlen_or_ind = new SQLLEN[row_array_size]; 42 | memset(strlen_or_ind, 0, row_array_size); 43 | 44 | buffer = new unsigned char[row_array_size * column_buffer_length]; 45 | memset(buffer, 0, row_array_size * column_buffer_length); 46 | } 47 | ~OdbcColumnBinding() { 48 | // TODO: 49 | // - why does freeing these cause a segfault? 50 | // - should I copy the values when setting the DuckDB output? 51 | // delete[] strlen_or_ind; 52 | // delete[] buffer; 53 | } 54 | 55 | SQLULEN column_buffer_length; 56 | SQLSMALLINT sql_data_type; 57 | SQLSMALLINT c_data_type; 58 | SQLLEN *strlen_or_ind; 59 | unsigned char *buffer; 60 | }; 61 | 62 | struct OdbcScanLocalState : public LocalTableFunctionState { 63 | OdbcScanLocalState(SQLINTEGER _row_array_size) 64 | : offset(0), row_status(vector(_row_array_size)) {} 65 | 66 | idx_t offset; 67 | vector row_status; 68 | vector column_bindings; 69 | }; 70 | 71 | struct OdbcScanGlobalState : public GlobalTableFunctionState { 72 | OdbcScanGlobalState() {} 73 | }; 74 | 75 | class OdbcScanFunction : public TableFunction { 76 | public: 77 | OdbcScanFunction(); 78 | }; 79 | } // namespace duckdb 80 | -------------------------------------------------------------------------------- /src/include/odbc_scanner_extension.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "duckdb.hpp" 4 | 5 | namespace duckdb { 6 | class Odbc_scannerExtension : public Extension { 7 | public: 8 | void Load(DuckDB &db) override; 9 | std::string Name() override; 10 | }; 11 | } // namespace duckdb 12 | -------------------------------------------------------------------------------- /src/odbc_scan.cpp: -------------------------------------------------------------------------------- 1 | #include "odbc_scan.hpp" 2 | 3 | #include "duckdb.hpp" 4 | 5 | #include "duckdb/function/table_function.hpp" 6 | 7 | namespace duckdb { 8 | static LogicalType OdbcColumnToDuckDBLogicalType(OdbcColumnDescription col_desc) { 9 | if (col_desc.sql_data_type == SQL_CHAR) { 10 | return LogicalType::VARCHAR; 11 | } 12 | if (col_desc.sql_data_type == SQL_VARCHAR) { 13 | return LogicalType::VARCHAR; 14 | } 15 | if (col_desc.sql_data_type == SQL_LONGVARCHAR) { 16 | return LogicalType::VARCHAR; 17 | } 18 | // // TODO: 19 | // // - how should unicode variable length character strings be handled? 20 | // // - VARCHAR_COLLATION(...)? 21 | // if (col_desc.data_type == SQL_WCHAR) { 22 | // return LogicalType::VARCHAR; 23 | // } 24 | // if (col_desc.data_type == SQL_WVARCHAR) { 25 | // return LogicalType::VARCHAR; 26 | // } 27 | // if (col_desc.data_type == SQL_WLONGVARCHAR) { 28 | // return LogicalType::VARCHAR; 29 | // } 30 | if (col_desc.sql_data_type == SQL_DECIMAL) { 31 | return LogicalType::DECIMAL(col_desc.size, col_desc.decimal_digits); 32 | } 33 | if (col_desc.sql_data_type == SQL_NUMERIC) { 34 | return LogicalType::DECIMAL(col_desc.size, col_desc.decimal_digits); 35 | } 36 | if (col_desc.sql_data_type == SQL_SMALLINT) { 37 | return LogicalType::SMALLINT; 38 | } 39 | if (col_desc.sql_data_type == SQL_INTEGER) { 40 | return LogicalType::INTEGER; 41 | } 42 | if (col_desc.sql_data_type == SQL_REAL) { 43 | return LogicalType::FLOAT; 44 | } 45 | if (col_desc.sql_data_type == SQL_FLOAT) { 46 | return LogicalType::FLOAT; 47 | } 48 | if (col_desc.sql_data_type == SQL_DOUBLE) { 49 | return LogicalType::DOUBLE; 50 | } 51 | if (col_desc.sql_data_type == SQL_BIT) { 52 | return LogicalType::BIT; 53 | } 54 | if (col_desc.sql_data_type == SQL_TINYINT) { 55 | return LogicalType::TINYINT; 56 | } 57 | if (col_desc.sql_data_type == SQL_BIGINT) { 58 | return LogicalType::BIGINT; 59 | } 60 | if (col_desc.sql_data_type == SQL_BINARY) { 61 | // DuckDB doesn't support FIXED_SIZE_BINARY yet 62 | // https://github.com/duckdb/duckdb/blob/master/src/include/duckdb/common/types.hpp#L125 63 | return LogicalType::BLOB; 64 | } 65 | if (col_desc.sql_data_type == SQL_VARBINARY) { 66 | // DuckDB doesn't support variable length BINARY yet 67 | // https://github.com/duckdb/duckdb/blob/master/src/include/duckdb/common/types.hpp#L122 68 | return LogicalType::BLOB; 69 | } 70 | if (col_desc.sql_data_type == SQL_LONGVARBINARY) { 71 | // DuckDB doesn't support variable length BINARY yet 72 | // https://github.com/duckdb/duckdb/blob/master/src/include/duckdb/common/types.hpp#L122 73 | return LogicalType::BLOB; 74 | } 75 | if (col_desc.sql_data_type == SQL_TYPE_DATE) { 76 | return LogicalType::DATE; 77 | } 78 | if (col_desc.sql_data_type == SQL_TYPE_TIME) { 79 | return LogicalType::TIME; 80 | } 81 | if (col_desc.sql_data_type == SQL_TYPE_TIMESTAMP) { 82 | return LogicalType::TIMESTAMP; 83 | } 84 | // TODO: 85 | // - handle the following remaining date/time/interval types 86 | // SQL_TYPE_UTCDATETIME UTCDATETIME Year, month, day, hour, minute, 87 | // second, utchour, and utcminute fields. The utchour and utcminute fields 88 | // have 1/10 microsecond precision. SQL_TYPE_UTCTIME UTCTIME Hour, minute, 89 | // second, utchour, and utcminute fields. The utchour and utcminute fields 90 | // have 1/10 microsecond precision.. 91 | // SQL_INTERVAL_MONTH[7] INTERVAL MONTH(p) Number of months between 92 | // two dates; p is the interval leading precision. SQL_INTERVAL_YEAR[7] 93 | // INTERVAL YEAR(p) Number of years between two dates; p is the interval 94 | // leading precision. SQL_INTERVAL_YEAR_TO_MONTH[7] INTERVAL YEAR(p) TO 95 | // MONTH Number of years and months between two dates; p is the interval 96 | // leading precision. SQL_INTERVAL_DAY[7] INTERVAL DAY(p) Number of days 97 | // between two dates; p is the interval leading precision. 98 | // SQL_INTERVAL_HOUR[7] INTERVAL HOUR(p) Number of hours between two 99 | // date/times; p is the interval leading precision. SQL_INTERVAL_MINUTE[7] 100 | // INTERVAL MINUTE(p) Number of minutes between two date/times; p is the 101 | // interval leading precision. SQL_INTERVAL_SECOND[7] INTERVAL SECOND(p,q) 102 | // Number of seconds between two date/times; p is the interval leading 103 | // precision and q is the interval seconds precision. 104 | // SQL_INTERVAL_DAY_TO_HOUR[7] INTERVAL DAY(p) TO HOUR Number of 105 | // days/hours between two date/times; p is the interval leading precision. 106 | // SQL_INTERVAL_DAY_TO_MINUTE[7] INTERVAL DAY(p) TO MINUTE Number 107 | // of days/hours/minutes between two date/times; p is the interval leading 108 | // precision. SQL_INTERVAL_DAY_TO_SECOND[7] INTERVAL DAY(p) TO SECOND(q) 109 | // Number of days/hours/minutes/seconds between two date/times; p is the 110 | // interval leading precision and q is the interval seconds precision. 111 | // SQL_INTERVAL_HOUR_TO_MINUTE[7] INTERVAL HOUR(p) TO MINUTE Number of 112 | // hours/minutes between two date/times; p is the interval leading precision. 113 | // SQL_INTERVAL_HOUR_TO_SECOND[7] INTERVAL HOUR(p) TO SECOND(q) Number of 114 | // hours/minutes/seconds between two date/times; p is the interval leading 115 | // precision and q is the interval seconds precision. 116 | // SQL_INTERVAL_MINUTE_TO_SECOND[7] INTERVAL MINUTE(p) TO SECOND(q) Number 117 | // of minutes/seconds between two date/times; p is the interval leading 118 | // precision and q is the interval seconds precision. 119 | if (col_desc.sql_data_type == SQL_GUID) { 120 | return LogicalType::UUID; 121 | } 122 | 123 | return LogicalType::INVALID; 124 | } 125 | 126 | static void OdbcScan(ClientContext &context, TableFunctionInput &data, DataChunk &output) { 127 | auto &bind_data = data.bind_data->Cast(); 128 | auto local_state = data.local_state->Cast(); 129 | 130 | auto rows_fetched = bind_data.statement->Fetch(); 131 | if (rows_fetched == 0) { 132 | // finished returning values 133 | return; 134 | } 135 | 136 | for (auto r = 0; r < rows_fetched; r++) { 137 | auto row_status = local_state.row_status[r]; 138 | if ((row_status == SQL_ROW_SUCCESS) || (row_status == SQL_ROW_SUCCESS_WITH_INFO)) { 139 | for (auto c = 0; c < local_state.column_bindings.size(); c++) { 140 | auto column_binding = &local_state.column_bindings.at(c); 141 | auto buffer = &column_binding->buffer[r * column_binding->column_buffer_length]; 142 | 143 | switch (column_binding->sql_data_type) { 144 | case SQL_SMALLINT: 145 | output.SetValue(c, local_state.offset, Value(*(std::int16_t *)buffer)); 146 | break; 147 | case SQL_INTEGER: 148 | output.SetValue(c, local_state.offset, Value(*(std::int32_t *)buffer)); 149 | break; 150 | case SQL_BIGINT: 151 | output.SetValue(c, local_state.offset, Value(*(std::int64_t *)buffer)); 152 | break; 153 | case SQL_DOUBLE: 154 | case SQL_FLOAT: 155 | output.SetValue(c, local_state.offset, Value(*(double *)buffer)); 156 | break; 157 | case SQL_DECIMAL: 158 | case SQL_NUMERIC: 159 | output.SetValue(c, local_state.offset, Value((char *)buffer)); 160 | break; 161 | case SQL_CHAR: 162 | // case SQL_CLOB: 163 | case SQL_VARCHAR: 164 | case SQL_LONGVARCHAR: { 165 | output.SetValue(c, local_state.offset, Value((char *)buffer)); 166 | break; 167 | } 168 | case SQL_BINARY: 169 | // case SQL_BLOB: 170 | case SQL_VARBINARY: 171 | case SQL_LONGVARBINARY: 172 | output.SetValue(c, local_state.offset, Value((char *)buffer)); 173 | break; 174 | default: 175 | throw Exception("OdbcScanFunction#OdbcScan() unhandled output " 176 | "mapping from ODBC to DuckDB sql_data_type=" + 177 | std::to_string(column_binding->sql_data_type) + 178 | ", c_data_type=" + std::to_string(column_binding->c_data_type)); 179 | } 180 | } 181 | } else if (row_status == SQL_ROW_NOROW) { 182 | throw Exception("OdbcScanFunction#OdbcScan() row status=" + std::to_string(row_status) + 183 | " SQL_ROW_NOROW"); 184 | } else if (row_status == SQL_ROW_ERROR) { 185 | throw Exception("OdbcScanFunction#OdbcScan() row status=" + std::to_string(row_status) + 186 | " SQL_ROW_ERROR"); 187 | } else if (row_status == SQL_ROW_PROCEED) { 188 | throw Exception("OdbcScanFunction#OdbcScan() row status=" + std::to_string(row_status) + 189 | " SQL_ROW_PROCEED"); 190 | } else if (row_status == SQL_ROW_IGNORE) { 191 | throw Exception("OdbcScanFunction#OdbcScan() row status=" + std::to_string(row_status) + 192 | " SQL_ROW_IGNORE"); 193 | } else { 194 | throw Exception("OdbcScanFunction#OdbcScan() row status=" + std::to_string(row_status) + 195 | " SQL_ROW_UNKNOWN"); 196 | } 197 | 198 | // TODO: 199 | // - handle STANDARD_VECTOR_SIZE 200 | local_state.offset++; 201 | output.SetCardinality(local_state.offset); 202 | } 203 | } 204 | 205 | static unique_ptr OdbcScanBind(ClientContext &context, TableFunctionBindInput &input, 206 | vector &return_types, vector &names) { 207 | auto bind_data = make_uniq(); 208 | bind_data->connection_string = input.inputs[0].GetValue(); 209 | bind_data->schema_name = input.inputs[1].GetValue(); 210 | bind_data->table_name = input.inputs[2].GetValue(); 211 | 212 | bind_data->environment = make_shared(); 213 | bind_data->environment->Init(); 214 | 215 | bind_data->connection = make_shared(); 216 | bind_data->connection->Init(bind_data->environment); 217 | bind_data->connection->Dial(bind_data->connection_string); 218 | 219 | bind_data->statement = make_uniq(bind_data->connection); 220 | bind_data->statement->Init(); 221 | 222 | string sql_statement = "SELECT * FROM "; 223 | if (bind_data->schema_name.compare(string(""))) { 224 | sql_statement += bind_data->schema_name + "."; 225 | } 226 | sql_statement += bind_data->table_name; 227 | bind_data->statement->Prepare(sql_statement); 228 | 229 | auto columns = bind_data->statement->DescribeColumns(); 230 | for (int i = 0; i < columns.size(); i++) { 231 | auto duckdb_type = OdbcColumnToDuckDBLogicalType(columns[i]); 232 | bind_data->column_descriptions.push_back(columns[i]); 233 | bind_data->names.push_back(string((char *)columns[i].name)); 234 | bind_data->types.push_back(duckdb_type); 235 | } 236 | 237 | // bind_data->statement_opts = make_uniq(1); 238 | // bind_data->statement_opts = make_uniq(2); 239 | // bind_data->statement_opts = 240 | // make_uniq(STANDARD_VECTOR_SIZE * 2); 241 | bind_data->statement_opts = make_uniq(STANDARD_VECTOR_SIZE); 242 | bind_data->statement->Execute(bind_data->statement_opts); 243 | 244 | names = bind_data->names; 245 | return_types = bind_data->types; 246 | 247 | return std::move(bind_data); 248 | } 249 | 250 | static unique_ptr OdbcScanInitGlobalState(ClientContext &context, 251 | TableFunctionInitInput &input) { 252 | return make_uniq(); 253 | } 254 | 255 | static unique_ptr OdbcScanInitLocalState(ExecutionContext &context, 256 | TableFunctionInitInput &input, 257 | GlobalTableFunctionState *global_state) { 258 | auto &bind_data = input.bind_data->Cast(); 259 | auto row_array_size = bind_data.statement_opts->row_array_size; 260 | auto local_state = make_uniq(row_array_size); 261 | 262 | bind_data.statement->SetAttribute(SQL_ATTR_ROW_STATUS_PTR, (SQLPOINTER)&local_state->row_status[0]); 263 | 264 | for (SQLSMALLINT c = 0; c < bind_data.column_descriptions.size(); c++) { 265 | auto col_desc = bind_data.column_descriptions.at(c); 266 | 267 | local_state->column_bindings.emplace_back(col_desc, row_array_size); 268 | auto column_binding = &local_state->column_bindings.at(c); 269 | bind_data.statement->BindColumn(c + 1, column_binding->c_data_type, column_binding->buffer, 270 | column_binding->column_buffer_length, column_binding->strlen_or_ind); 271 | } 272 | 273 | return std::move(local_state); 274 | } 275 | 276 | static string OdbcScanToString(const FunctionData *bind_data_p) { 277 | D_ASSERT(bind_data_p); 278 | 279 | auto bind_data = (const OdbcScanBindData *)bind_data_p; 280 | return bind_data->table_name; 281 | } 282 | 283 | OdbcScanFunction::OdbcScanFunction() 284 | : TableFunction("odbc_scan", {LogicalType::VARCHAR, LogicalType::VARCHAR, LogicalType::VARCHAR}, OdbcScan, 285 | OdbcScanBind, OdbcScanInitGlobalState, OdbcScanInitLocalState) { 286 | to_string = OdbcScanToString; 287 | // projection_pushdown = true; 288 | } 289 | } // namespace duckdb 290 | -------------------------------------------------------------------------------- /src/odbc_scanner_extension.cpp: -------------------------------------------------------------------------------- 1 | #define DUCKDB_EXTENSION_MAIN 2 | 3 | #include "odbc_scanner_extension.hpp" 4 | #include "odbc_scan.hpp" 5 | 6 | #include "duckdb.hpp" 7 | 8 | #include "duckdb/common/exception.hpp" 9 | #include "duckdb/common/string_util.hpp" 10 | #include "duckdb/function/scalar_function.hpp" 11 | #include "duckdb/function/table_function.hpp" 12 | #include "duckdb/main/extension_util.hpp" 13 | 14 | #include "duckdb/parser/parsed_data/create_scalar_function_info.hpp" 15 | #include "duckdb/parser/parsed_data/create_table_function_info.hpp" 16 | 17 | namespace duckdb { 18 | static void LoadInternal(DatabaseInstance &instance) { 19 | // table functions 20 | Connection con(instance); 21 | con.BeginTransaction(); 22 | auto &context = *con.context; 23 | auto &catalog = Catalog::GetSystemCatalog(context); 24 | 25 | OdbcScanFunction odbc_scan_fun; 26 | CreateTableFunctionInfo odbc_scan_info(odbc_scan_fun); 27 | catalog.CreateTableFunction(context, odbc_scan_info); 28 | 29 | con.Commit(); 30 | } 31 | 32 | void Odbc_scannerExtension::Load(DuckDB &db) { LoadInternal(*db.instance); } 33 | std::string Odbc_scannerExtension::Name() { return "odbc_scanner"; } 34 | } // namespace duckdb 35 | 36 | extern "C" { 37 | DUCKDB_EXTENSION_API void odbc_scanner_init(duckdb::DatabaseInstance &db) { LoadInternal(db); } 38 | 39 | DUCKDB_EXTENSION_API const char *odbc_scanner_version() { return duckdb::DuckDB::LibraryVersion(); } 40 | } 41 | 42 | #ifndef DUCKDB_EXTENSION_MAIN 43 | #error DUCKDB_EXTENSION_MAIN not defined 44 | #endif 45 | -------------------------------------------------------------------------------- /templates/.clang-format.template: -------------------------------------------------------------------------------- 1 | ColumnLimit: 110 2 | -------------------------------------------------------------------------------- /templates/.clangd.template: -------------------------------------------------------------------------------- 1 | CompileFlags: 2 | Add: 3 | - -I${PWD}/src/include 4 | - -I${UNIX_ODBC_DIR}/include 5 | - -I${PWD}/duckdb/src/include 6 | - -I${PWD}/duckdb/third_party/re2 7 | - -I${PWD}/duckdb/third_party/parquet 8 | - -I${PWD}/duckdb/third_party/thrift 9 | - -I${PWD}/duckdb/third_party/utf8proc 10 | - -I${PWD}/duckdb/third_party/tools/sqlite3_api_wrapper/include 11 | - -I${PWD}/duckdb/third_party/tools/sqlite3_api_wrapper/sqlite3_udf_api/include 12 | - -I${PWD}/duckdb/third_party/imdb/include 13 | -------------------------------------------------------------------------------- /templates/.odbc.ini.template: -------------------------------------------------------------------------------- 1 | [db2 odbctest] 2 | Driver = db2 3 | 4 | [postgres odbc_test] 5 | Driver = postgres 6 | -------------------------------------------------------------------------------- /templates/.odbcinst.ini.template: -------------------------------------------------------------------------------- 1 | [db2] 2 | Driver = ${DB2_DRIVER_PATH} 3 | 4 | [postgres] 5 | Driver = ${POSTGRES_DRIVER_PATH} 6 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Testing the quack extension 2 | This directory contains all the tests for the quack extension. The `sql` directory holds tests that are written as [SQLLogicTests](https://duckdb.org/dev/sqllogictest/intro.html). DuckDB aims to have most its tests in this format as SQL statements, so for the quack extension, this should probably be the goal too. However, client specific testing is also available. 3 | 4 | The root makefile contains targets to build and run all of these tests. To run the SQLLogicTests: 5 | ```bash 6 | make test 7 | ``` 8 | 9 | To run the python tests: 10 | ```sql 11 | make test_python 12 | ``` 13 | 14 | For other client tests check the makefile in the root of this repository. -------------------------------------------------------------------------------- /test/nodejs/analyze_record_test.js: -------------------------------------------------------------------------------- 1 | var duckdb = require('../../duckdb/tools/nodejs'); 2 | var assert = require('assert'); 3 | 4 | describe(`analyze_record`, () => { 5 | let db; 6 | let conn; 7 | before((done) => { 8 | db = new duckdb.Database(':memory:'); 9 | conn = new duckdb.Connection(db); 10 | done(); 11 | }); 12 | 13 | it('function should return expected constant', function (done) { 14 | db.all("SELECT analyze_record('Sam') as value;", function (err, res) { 15 | if (err) throw err; 16 | assert.deepEqual(res, [{value: "TODO: analyze_record Sam 🐥"}]); 17 | done(); 18 | }); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /test/nodejs/detect_record_test.js: -------------------------------------------------------------------------------- 1 | var duckdb = require('../../duckdb/tools/nodejs'); 2 | var assert = require('assert'); 3 | 4 | describe(`detect_record`, () => { 5 | let db; 6 | let conn; 7 | before((done) => { 8 | db = new duckdb.Database(':memory:'); 9 | conn = new duckdb.Connection(db); 10 | done(); 11 | }); 12 | 13 | it('function should return expected constant', function (done) { 14 | db.all("SELECT detect_record('Sam') as value;", function (err, res) { 15 | if (err) throw err; 16 | assert.deepEqual(res, [{value: "TODO: detect_record Sam 🐥"}]); 17 | done(); 18 | }); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /test/nodejs/read_record_test.js: -------------------------------------------------------------------------------- 1 | var duckdb = require('../../duckdb/tools/nodejs'); 2 | var assert = require('assert'); 3 | 4 | describe(`read_record`, () => { 5 | let db; 6 | let conn; 7 | before((done) => { 8 | db = new duckdb.Database(':memory:'); 9 | conn = new duckdb.Connection(db); 10 | done(); 11 | }); 12 | 13 | it('function should return expected constant', function (done) { 14 | db.all("SELECT read_record('Sam') as value;", function (err, res) { 15 | if (err) throw err; 16 | assert.deepEqual(res, [{value: "TODO: read_record Sam 🐥"}]); 17 | done(); 18 | }); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /test/nodejs/write_record_test.js: -------------------------------------------------------------------------------- 1 | var duckdb = require('../../duckdb/tools/nodejs'); 2 | var assert = require('assert'); 3 | 4 | describe(`write_record`, () => { 5 | let db; 6 | let conn; 7 | before((done) => { 8 | db = new duckdb.Database(':memory:'); 9 | conn = new duckdb.Connection(db); 10 | done(); 11 | }); 12 | 13 | it('function should return expected constant', function (done) { 14 | db.all("SELECT write_record('Sam') as value;", function (err, res) { 15 | if (err) throw err; 16 | assert.deepEqual(res, [{value: "TODO: write_record Sam 🐥"}]); 17 | done(); 18 | }); 19 | }); 20 | }); 21 | -------------------------------------------------------------------------------- /test/python/analyze_record.py: -------------------------------------------------------------------------------- 1 | import duckdb 2 | 3 | def test_analyze_record(): 4 | conn = duckdb.connect(''); 5 | conn.execute("SELECT analyze_record('Sam') as value;"); 6 | res = conn.fetchall() 7 | assert(res[0][0] == "TODO: analyze_record Sam 🐥"); 8 | -------------------------------------------------------------------------------- /test/python/detect_record_test.py: -------------------------------------------------------------------------------- 1 | import duckdb 2 | 3 | def test_detect_record(): 4 | conn = duckdb.connect(''); 5 | conn.execute("SELECT detect_record('Sam') as value;"); 6 | res = conn.fetchall() 7 | assert(res[0][0] == "TODO: detect_record Sam 🐥"); 8 | -------------------------------------------------------------------------------- /test/python/read_record_test.py: -------------------------------------------------------------------------------- 1 | import duckdb 2 | 3 | def test_read_record(): 4 | conn = duckdb.connect(''); 5 | conn.execute("SELECT read_record('Sam') as value;"); 6 | res = conn.fetchall() 7 | assert(res[0][0] == "TODO: read_record Sam 🐥"); 8 | -------------------------------------------------------------------------------- /test/python/write_record_test.py: -------------------------------------------------------------------------------- 1 | import duckdb 2 | 3 | def test_write_record(): 4 | conn = duckdb.connect(''); 5 | conn.execute("SELECT write_record('Sam') as value;"); 6 | res = conn.fetchall() 7 | assert(res[0][0] == "TODO: write_record Sam 🐥"); 8 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_big_query.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_big_query.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=big_query;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=big_query;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 37 100.10 27 | # Spiderman 25 200.20 28 | # Wonder Woman 21 300.30 29 | # David Bowie 68 400.40 30 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_db2.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/odbc_scan_db2.test 2 | # description: test odbc_scanner extension 3 | # group: [odbc_scan] 4 | 5 | # Before we load the extension, this will fail 6 | statement error 7 | SELECT * FROM odbc_scan( 8 | 'DSN={db2 odbctest};Hostname=localhost;Database=odbctest;Uid=db2inst1;Pwd=password;Port=50000', 9 | 'DB2INST1', 10 | 'PEOPLE' 11 | ) 12 | ORDER BY salary ASC; 13 | ---- 14 | Catalog Error: Table Function with name odbc_scan does not exist! 15 | 16 | # Require statement will ensure this test is run with this extension loaded 17 | require odbc_scanner 18 | 19 | # Confirm the extension works 20 | query III 21 | SELECT * FROM odbc_scan( 22 | 'DSN={db2 odbctest};Hostname=localhost;Database=odbctest;Uid=db2inst1;Pwd=password;Port=50000', 23 | 'DB2INST1', 24 | 'PEOPLE' 25 | ) 26 | ORDER BY salary ASC; 27 | ---- 28 | Lebron James 37 100.1 29 | Spiderman 25 200.2 30 | Wonder Woman 21 300.3 31 | David Bowie 69 400.4 32 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_mariadb.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_mariadb.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mariadb;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mariadb;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 27 | # Spiderman 28 | # Wonder Woman 29 | # David Bowie 30 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_mongo_db.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_mongo_db.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mongo_db;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mongo_db;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 37 100.10 27 | # Spiderman 25 200.20 28 | # Wonder Woman 21 300.30 29 | # David Bowie 68 400.40 30 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_mssql.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_mssql.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mssql;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mssql;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 27 | # Spiderman 28 | # Wonder Woman 29 | # David Bowie 30 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_mysql.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_mysql.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mysql;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=mysql;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 37 100.10 27 | # Spiderman 25 200.20 28 | # Wonder Woman 21 300.30 29 | # David Bowie 68 400.40 30 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_oracle.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_oracle.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # # statement error 7 | # # SELECT * FROM odbc_scan( 8 | # # 'Driver=/nix/store/03d2ih9gj622raq02gqzm5971dgvxhwh-oracle-driver-11.5.8/lib/liboracle.dylib;Hostname=localhost;Database=kohls;Uid=oracleinst1;Pwd=password;Port=50000', 9 | # # 'oracleinst1', 10 | # # 'people' 11 | # # ); 12 | # # ---- 13 | # # statement error 14 | # # SELECT * FROM odbc_scan( 15 | # # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=postgres;Pwd=password;Port=5432', 16 | # # 'oracleinst1', 17 | # # 'people' 18 | # # ); 19 | # # ---- 20 | # # Catalog Error: Table Function with name odbc_scan does not exist! 21 | # statement error 22 | # SELECT * FROM odbc_scan( 23 | # 'Driver=/nix/store/py6m0q4ij50pwjk6a5f18qhhahrvf2sk-oracle-driver-11.5.8/lib/liboracle.so;Hostname=localhost;Database=odbctest;Uid=oracleinst1;Pwd=password;Port=50000', 24 | # 'DB2INST1', 25 | # 'PEOPLE' 26 | # ); 27 | # ---- 28 | # Catalog Error: Table Function with name odbc_scan does not exist! 29 | # 30 | # # Require statement will ensure this test is run with this extension loaded 31 | # require odbc_scanner 32 | # 33 | # # Confirm the extension works 34 | # # query I 35 | # # SELECT * FROM odbc_scan( 36 | # # 'Driver=/nix/store/03d2ih9gj622raq02gqzm5971dgvxhwh-oracle-driver-11.5.8/lib/liboracle.dylib;Hostname=localhost;Database=kohls;Uid=oracleinst1;Pwd=password;Port=50000', 37 | # # 'oracleinst1', 38 | # # 'people' 39 | # # ); 40 | # # query I 41 | # # SELECT * FROM odbc_scan( 42 | # # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=postgres;Pwd=password;Port=5432', 43 | # # 'oracleinst1', 44 | # # 'people' 45 | # # ); 46 | # # ---- 47 | # query I 48 | # SELECT * FROM odbc_scan( 49 | # 'Driver=/nix/store/py6m0q4ij50pwjk6a5f18qhhahrvf2sk-oracle-driver-11.5.8/lib/liboracle.so;Hostname=localhost;Database=odbctest;Uid=oracleinst1;Pwd=password;Port=50000', 50 | # 'DB2INST1', 51 | # 'PEOPLE' 52 | # ); 53 | # ---- 54 | # Lebron James 37 100.10 55 | # Spiderman 25 200.20 56 | # Wonder Woman 21 300.30 57 | # David Bowie 68 400.40 58 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_postgres.test: -------------------------------------------------------------------------------- 1 | # name: test/sql/odbc_scan_postgres.test 2 | # description: test odbc_scanner extension 3 | # group: [odbc_scan] 4 | 5 | # Before we load the extension, this will fail 6 | statement error 7 | SELECT * FROM odbc_scan( 8 | 'DSN={postgres odbc_test};Server=localhost;Database=odbc_test;Uid=postgres;Pwd=password;Port=5432', 9 | '', 10 | 'people' 11 | ) 12 | ORDER BY salary ASC; 13 | ---- 14 | Catalog Error: Table Function with name odbc_scan does not exist! 15 | 16 | # Require statement will ensure this test is run with this extension loaded 17 | require odbc_scanner 18 | 19 | # Confirm the extension works 20 | query III 21 | SELECT * FROM odbc_scan( 22 | 'DSN={postgres odbc_test};Server=localhost;Database=odbc_test;Uid=postgres;Pwd=password;Port=5432', 23 | '', 24 | 'people' 25 | ) 26 | ORDER BY salary ASC; 27 | ---- 28 | Lebron James 37 100.1 29 | Spiderman 25 200.2 30 | Wonder Woman 21 300.3 31 | David Bowie 69 400.4 32 | -------------------------------------------------------------------------------- /test/sql/odbc_scan_snowflake.test: -------------------------------------------------------------------------------- 1 | # # name: test/sql/odbc_scan_snowflake.test 2 | # # description: test odbc_scanner extension 3 | # # group: [odbc_scan] 4 | # 5 | # # Before we load the extension, this will fail 6 | # statement error 7 | # SELECT * FROM odbc_scan( 8 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=snowflake;Pwd=password;Port=5432', 9 | # 'db2inst1', 10 | # 'people' 11 | # ); 12 | # ---- 13 | # Catalog Error: Table Function with name odbc_scan does not exist! 14 | # 15 | # # Require statement will ensure this test is run with this extension loaded 16 | # require odbc_scanner 17 | # 18 | # # Confirm the extension works 19 | # query I 20 | # SELECT * FROM odbc_scan( 21 | # 'Driver=/opt/homebrew/Cellar/psqlodbc/15.00.0000/lib/psqlodbca.so;Server=localhost;Database=odbc_scanner_duckdb_extension_test;Uid=snowflake;Pwd=password;Port=5432', 22 | # 'db2inst1', 23 | # 'people' 24 | # ); 25 | # ---- 26 | # Lebron James 37 100.10 27 | # Spiderman 25 200.20 28 | # Wonder Woman 21 300.30 29 | # David Bowie 68 400.40 30 | --------------------------------------------------------------------------------