├── .editorconfig ├── .github └── workflows │ ├── codeql.yml │ └── python-publish.yml ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── Dockerfile ├── LICENSE ├── README.md ├── requirements-dev.txt ├── requirements-iris.txt ├── requirements.txt ├── scripts └── build-dist.sh ├── setup.cfg ├── setup.py ├── sqlalchemy_iris ├── __init__.py ├── alembic.py ├── base.py ├── embedded.py ├── information_schema.py ├── intersystems │ ├── __init__.py │ └── dbapi.py ├── iris.py ├── irisasync.py ├── provision.py ├── requirements.py └── types.py ├── test-in-docker.sh ├── test.py ├── testiris.py ├── tests ├── conftest.py ├── test_alembic.py └── test_suite.py └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | [*] 7 | indent_style = space 8 | indent_size = 4 9 | end_of_line = lf 10 | charset = utf-8 11 | trim_trailing_whitespace = true 12 | insert_final_newline = false -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | schedule: 9 | - cron: "29 13 * * 1" 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | permissions: 16 | actions: read 17 | contents: read 18 | security-events: write 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | language: [ python ] 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v3 28 | 29 | - name: Initialize CodeQL 30 | uses: github/codeql-action/init@v2 31 | with: 32 | languages: ${{ matrix.language }} 33 | queries: +security-and-quality 34 | 35 | - name: Autobuild 36 | uses: github/codeql-action/autobuild@v2 37 | 38 | - name: Perform CodeQL Analysis 39 | uses: github/codeql-action/analyze@v2 40 | with: 41 | category: "/language:${{ matrix.language }}" -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | push: 15 | branches: [main] 16 | pull_request: 17 | branches: [main] 18 | 19 | jobs: 20 | test: 21 | strategy: 22 | fail-fast: false 23 | matrix: 24 | image: 25 | - latest-cd 26 | - latest-preview 27 | engine: 28 | - old 29 | - new 30 | driver: 31 | - iris 32 | - intersystems 33 | runs-on: ubuntu-latest 34 | services: 35 | iris: 36 | image: containers.intersystems.com/intersystems/iris-community:${{ matrix.image }} 37 | ports: 38 | - 1972:1972 39 | options: >- 40 | --name iris 41 | --health-cmd "/usr/irissys/dev/Cloud/ICM/waitReady.sh -m 1" 42 | steps: 43 | - uses: actions/checkout@v4 44 | - name: Set up Python 45 | uses: actions/setup-python@v5 46 | with: 47 | python-version: '3.12' 48 | - name: Install requirements 49 | run: | 50 | pip install tox 51 | - name: Run Tests 52 | continue-on-error: true 53 | run: | 54 | docker exec iris iris session iris -U%SYS '##class(Security.Users).UnExpireUserPasswords("*")' 55 | tox -e py312-${{ matrix.engine }}-${{ matrix.driver }} -- --dburi iris+${{ matrix.driver }}://_SYSTEM:SYS@localhost:1972/USER --junit-xml=test-results.xml 56 | - name: Surface failing tests 57 | if: always() 58 | uses: pmeier/pytest-results-action@main 59 | with: 60 | path: test-results.xml 61 | summary: true 62 | display-options: fEX 63 | fail-on-empty: false 64 | title: Test results 65 | deploy: 66 | needs: test 67 | if: github.event_name != 'pull_request' 68 | runs-on: ubuntu-latest 69 | permissions: 70 | id-token: write 71 | contents: write 72 | steps: 73 | - uses: actions/checkout@v4 74 | - run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* 75 | if: github.event_name == 'push' 76 | - name: Set up Python 77 | uses: actions/setup-python@v5 78 | with: 79 | python-version: '3.x' 80 | - name: Install dependencies 81 | id: set-version 82 | run: | 83 | VERSION=$(grep version setup.cfg | cut -d= -f2 | tr -d '[:blank:]') 84 | [ $GITHUB_EVENT_NAME == 'push' ] && VERSION+=b && VERSION+=$(($(git tag -l "*$VERSION*" | cut -db -f2 | sort -n | tail -1)+1)) 85 | [ $GITHUB_EVENT_NAME == 'release' ] && VERSION=${{ github.event.release.tag_name }} && VERSION=${VERSION/v/} 86 | echo VERSION = $VERSION 87 | sed -ie "s/version = .*/version = $VERSION/" setup.cfg 88 | python -m pip install --upgrade pip 89 | pip install build 90 | echo version=$VERSION >> $GITHUB_OUTPUT 91 | NAME="sqlalchemy_iris"-${VERSION}-py3-none-any 92 | echo name=$NAME >> $GITHUB_OUTPUT 93 | - name: Install requirements 94 | run: | 95 | pip install -U pip setuptools wheel \ 96 | -r requirements-dev.txt \ 97 | -r requirements-iris.txt \ 98 | -e . 99 | 100 | - name: Build Python package 101 | run: ./scripts/build-dist.sh 102 | - name: Publish package 103 | uses: pypa/gh-action-pypi-publish@release/v1 104 | - name: Create Beta Release 105 | id: create_release 106 | if: github.event_name == 'push' 107 | uses: softprops/action-gh-release@v2 108 | with: 109 | tag_name: v${{ steps.set-version.outputs.version }} 110 | prerelease: ${{ github.event_name != 'release' }} 111 | files: dist/${{ steps.set-version.outputs.name }}.whl 112 | - uses: actions/checkout@v4 113 | if: github.event_name == 'release' 114 | with: 115 | ref: main 116 | - name: Bump version 117 | if: github.event_name == 'release' 118 | run: | 119 | git config --global user.name 'ProjectBot' 120 | git config --global user.email 'bot@users.noreply.github.com' 121 | VERSION=${{ github.event.release.tag_name }} && VERSION=${VERSION/v/} 122 | VERSION=`echo $VERSION | awk -F. '/[0-9]+\./{$NF++;print}' OFS=.` 123 | sed -ie "s/version = .*/version = $VERSION/" setup.cfg 124 | git add setup.cfg 125 | git commit -m 'auto bump version with release' 126 | git push 127 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /demo 2 | /*settings.py 3 | # /.vscode 4 | /.idea 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # SageMath parsed files 84 | *.sage.py 85 | 86 | # Environments 87 | .env 88 | .venv 89 | env/ 90 | venv/ 91 | ENV/ 92 | env.bak/ 93 | venv.bak/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | .spyproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | 108 | *.mac 109 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python: Module", 9 | "type": "python", 10 | "request": "launch", 11 | "module": "pytest", 12 | "justMyCode": false 13 | } 14 | ] 15 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": [], 3 | "python.testing.unittestEnabled": false, 4 | "python.testing.pytestEnabled": true 5 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE=intersystemsdc/iris-community 2 | FROM $BASE 3 | 4 | COPY --chown=irisowner:irisowner . /home/irisowner/sqlalchemy-iris 5 | 6 | WORKDIR /home/irisowner/sqlalchemy-iris 7 | 8 | ENV PIP_TARGET=/usr/irissys/mgr/python 9 | 10 | RUN pip install -r requirements-dev.txt -r requirements-iris.txt && \ 11 | pip install -e . 12 | 13 | ENTRYPOINT /home/irisowner/sqlalchemy-iris/test-in-docker.sh -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Dmitry Maslennikov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | sqlalchemy-iris 2 | === 3 | 4 | An InterSystems IRIS dialect for SQLAlchemy. 5 | 6 | Pre-requisites 7 | --- 8 | 9 | This dialect requires SQLAlchemy, InterSystems DB-API driver. They are specified as requirements so ``pip`` 10 | will install them if they are not already in place. To install, just: 11 | 12 | ```shell 13 | pip install sqlalchemy-iris 14 | ``` 15 | 16 | Or to use InterSystems official driver support 17 | 18 | ```shell 19 | pip install sqlalchemy-iris[intersystems] 20 | ``` 21 | 22 | Usage 23 | --- 24 | 25 | In your Python app, you can connect to the database via: 26 | 27 | ```python 28 | from sqlalchemy import create_engine 29 | engine = create_engine("iris://_SYSTEM:SYS@localhost:1972/USER") 30 | ``` 31 | 32 | To use with Python Embedded mode, when run next to IRIS 33 | 34 | ```python 35 | from sqlalchemy import create_engine 36 | engine = create_engine("iris+emb:///USER") 37 | ``` 38 | 39 | To use with InterSystems official driver, does not work in Python Embedded mode 40 | 41 | ```python 42 | from sqlalchemy import create_engine 43 | engine = create_engine("iris+intersystems://_SYSTEM:SYS@localhost:1972/USER") 44 | ``` 45 | 46 | IRIS Cloud SQL requires SSLContext 47 | 48 | ```python 49 | url = engine.URL.create( 50 | drivername="iris", 51 | host=host, 52 | port=443, 53 | username='SQLAdmin', 54 | password=password, 55 | database='USER', 56 | ) 57 | 58 | sslcontext = ssl.create_default_context(cafile="certificateSQLaaS.pem") 59 | 60 | engine = create_engine(url, connect_args={"sslcontext": sslcontext}) 61 | ``` 62 | 63 | InterSystems IRIS 64 | --- 65 | 66 | You can run your instance of InterSystems IRIS Community Edition with Docker 67 | 68 | ```shell 69 | docker run -d --name iris \ 70 | -p 1972:1972 \ 71 | -p 52773:52773 \ 72 | -e IRIS_USERNAME=_SYSTEM \ 73 | -e IRIS_PASSWORD=SYS \ 74 | intersystemsdc/iris-community:preview 75 | ``` 76 | 77 | Examples 78 | === 79 | 80 | IRISVector 81 | --- 82 | 83 | ```python 84 | from sqlalchemy import Column, MetaData, Table, select 85 | from sqlalchemy.sql.sqltypes import Integer, UUID 86 | from sqlalchemy_iris import IRISVector 87 | from sqlalchemy import create_engine 88 | from sqlalchemy.orm import DeclarativeBase 89 | import uuid 90 | 91 | DATABASE_URL = "iris://_SYSTEM:SYS@localhost:1972/USER" 92 | engine = create_engine(DATABASE_URL, echo=False) 93 | 94 | # Create a table metadata 95 | metadata = MetaData() 96 | 97 | 98 | class Base(DeclarativeBase): 99 | pass 100 | 101 | 102 | def main(): 103 | demo_table = Table( 104 | "demo_table", 105 | metadata, 106 | Column("id", Integer, primary_key=True, autoincrement=True), 107 | Column("uuid", UUID), 108 | Column("embedding", IRISVector(item_type=float, max_items=3)), 109 | ) 110 | 111 | demo_table.drop(engine, checkfirst=True) 112 | demo_table.create(engine, checkfirst=True) 113 | with engine.connect() as conn: 114 | conn.execute( 115 | demo_table.insert(), 116 | [ 117 | {"uuid": uuid.uuid4(), "embedding": [1, 2, 3]}, 118 | {"uuid": uuid.uuid4(), "embedding": [2, 3, 4]}, 119 | ], 120 | ) 121 | conn.commit() 122 | result = conn.execute( 123 | demo_table.select() 124 | ).fetchall() 125 | print("result", result) 126 | 127 | 128 | main() 129 | ``` 130 | 131 | _Port 1972 is used for binary communication (this driver, xDBC and so on), and 52773 is for web (Management Portal, IRIS based web-applications and API's)._ 132 | 133 | The System Management Portal is available by URL: `http://localhost:52773/csp/sys/UtilHome.csp` 134 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # wheel 2 | # flake8 3 | pytest 4 | # black 5 | # twine 6 | alembic 7 | testcontainers-iris 8 | pytest-github-actions-annotate-failures 9 | -------------------------------------------------------------------------------- /requirements-iris.txt: -------------------------------------------------------------------------------- 1 | https://github.com/intersystems-community/intersystems-irispython/releases/download/3.9.2/intersystems_iris-3.9.2-py3-none-any.whl -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | SQLAlchemy>=1.3 -------------------------------------------------------------------------------- /scripts/build-dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | packages=("iris" "intersystems_iris" "irisnative") 4 | for package in ${packages[@]}; 5 | do 6 | rm -f ./$package 7 | package_path=`python -c "import importlib.util; print(importlib.util.find_spec('${package}').submodule_search_locations[0])"` 8 | ln -s $package_path ./$package 9 | done 10 | 11 | set -eo pipefail 12 | 13 | PROJECT="$( cd "$(dirname "$0")/.." ; pwd -P )" 14 | 15 | PYTHON_BIN=${PYTHON_BIN:-python3} 16 | 17 | echo "$PYTHON_BIN" 18 | 19 | set -x 20 | 21 | rm -rf "$PROJECT"/dist 22 | rm -rf "$PROJECT"/build 23 | mkdir -p "$PROJECT"/dist 24 | 25 | cd "$PROJECT" 26 | $PYTHON_BIN setup.py sdist bdist_wheel 27 | 28 | for package in ${packages[@]}; 29 | do 30 | rm -f $package 31 | done 32 | rm -rf intersystems-irispython 33 | 34 | set +x 35 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = sqlalchemy-iris 3 | version = 0.17.1 4 | description = InterSystems IRIS for SQLAlchemy 5 | long_description = file: README.md 6 | url = https://github.com/caretdev/sqlalchemy-iris 7 | maintainer = CaretDev 8 | maintainer_email = dmitry@caretdev.com 9 | license = MIT 10 | long_description_content_type = text/markdown 11 | classifiers = 12 | Development Status :: 4 - Beta 13 | Intended Audience :: Developers 14 | License :: OSI Approved :: MIT License 15 | Programming Language :: Python 16 | Programming Language :: Python :: 3 17 | Programming Language :: Python :: 3.8 18 | Programming Language :: Python :: 3.9 19 | Programming Language :: Python :: 3.10 20 | Topic :: Database :: Front-Ends 21 | Operating System :: OS Independent 22 | keywords="SQLAlchemy InterSystems IRIS", 23 | project_urls = 24 | Source = https://github.com/caretdev/sqlalchemy-iris 25 | Tracker = https://github.com/caretdev/sqlalchemy-iris/issues 26 | 27 | [options] 28 | python_requires = >=3.8 29 | packages = find: 30 | 31 | [options.extras_require] 32 | intersystems = 33 | intersystems-irispython==5.1.0 34 | 35 | [tool:pytest] 36 | addopts= --tb native -v -r fxX -p no:warnings 37 | 38 | [db] 39 | default=iris://_SYSTEM:SYS@localhost:1972/USER 40 | iris=iris://_SYSTEM:SYS@localhost:1972/USER 41 | irisintersystems=iris+intersystems://_SYSTEM:SYS@localhost:1972/USER 42 | irisasync=iris+irisasync://_SYSTEM:SYS@localhost:1972/USER 43 | irisemb=iris+emb:/// 44 | sqlite=sqlite:///:memory: 45 | 46 | [sqla_testing] 47 | requirement_cls=sqlalchemy_iris.requirements:Requirements 48 | profile_file=test/profiles.txt 49 | 50 | [flake8] 51 | max-line-length=120 52 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | install_requires=[ 5 | "SQLAlchemy>=1.3" 6 | ], 7 | entry_points={ 8 | "sqlalchemy.dialects": [ 9 | "iris = sqlalchemy_iris.iris:IRISDialect_iris", 10 | "iris.emb = sqlalchemy_iris.embedded:IRISDialect_emb", 11 | "iris.irisasync = sqlalchemy_iris.irisasync:IRISDialect_irisasync", 12 | "iris.intersystems = sqlalchemy_iris.intersystems:IRISDialect_intersystems", 13 | ] 14 | }, 15 | ) 16 | -------------------------------------------------------------------------------- /sqlalchemy_iris/__init__.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.dialects import registry as _registry 2 | 3 | from . import base 4 | from . import iris 5 | 6 | try: 7 | import alembic # noqa 8 | except ImportError: 9 | pass 10 | else: 11 | from .alembic import IRISImpl # noqa 12 | 13 | from .base import BIGINT 14 | from .base import BIT 15 | from .base import DATE 16 | from .base import DOUBLE 17 | from .base import INTEGER 18 | from .base import LONGVARBINARY 19 | from .base import LONGVARCHAR 20 | from .base import NUMERIC 21 | from .base import SMALLINT 22 | from .base import TIME 23 | from .base import TIMESTAMP 24 | from .base import TINYINT 25 | from .base import VARBINARY 26 | from .base import VARCHAR 27 | from .base import IRISListBuild 28 | from .base import IRISVector 29 | 30 | base.dialect = dialect = iris.dialect 31 | 32 | _registry.register("iris.iris", "sqlalchemy_iris.iris", "IRISDialect_iris") 33 | _registry.register("iris.emb", "sqlalchemy_iris.embedded", "IRISDialect_emb") 34 | _registry.register("iris.irisasync", "sqlalchemy_iris.irisasync", "IRISDialect_irisasync") 35 | _registry.register("iris.intersystems", "sqlalchemy_iris.intersystems", "IRISDialect_intersystems") 36 | 37 | __all__ = [ 38 | "BIGINT", 39 | "BIT", 40 | "DATE", 41 | "DOUBLE", 42 | "INTEGER", 43 | "LONGVARBINARY", 44 | "LONGVARCHAR", 45 | "NUMERIC", 46 | "SMALLINT", 47 | "TIME", 48 | "TIMESTAMP", 49 | "TINYINT", 50 | "VARBINARY", 51 | "VARCHAR", 52 | "IRISListBuild", 53 | "IRISVector", 54 | "dialect", 55 | ] 56 | -------------------------------------------------------------------------------- /sqlalchemy_iris/alembic.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | 4 | from typing import Optional 5 | from typing import Any 6 | 7 | from sqlalchemy.ext.compiler import compiles 8 | from sqlalchemy.sql.base import Executable 9 | from sqlalchemy.sql.elements import ClauseElement 10 | from sqlalchemy.sql.schema import CheckConstraint 11 | from sqlalchemy.sql.type_api import TypeEngine 12 | from sqlalchemy.sql import table 13 | from sqlalchemy import types 14 | 15 | from alembic.ddl import DefaultImpl 16 | from alembic.ddl.base import ColumnNullable 17 | from alembic.ddl.base import ColumnType 18 | from alembic.ddl.base import ColumnName 19 | from alembic.ddl.base import AddColumn 20 | from alembic.ddl.base import DropColumn 21 | from alembic.ddl.base import Column 22 | from alembic.ddl.base import alter_table 23 | from alembic.ddl.base import alter_column 24 | from alembic.ddl.base import drop_column 25 | from alembic.ddl.base import format_type 26 | from alembic.ddl.base import format_column_name 27 | from .base import IRISDDLCompiler 28 | 29 | log = logging.getLogger(__name__) 30 | 31 | # IRIS Interprets these types as %Streams, and no direct type change is available 32 | _as_stream = [ 33 | types.LargeBinary, 34 | types.BLOB, 35 | types.CLOB, 36 | ] 37 | 38 | 39 | class IRISImpl(DefaultImpl): 40 | __dialect__ = "iris" 41 | 42 | type_synonyms = DefaultImpl.type_synonyms + ( 43 | {"BLOB", "LONGVARBINARY"}, 44 | {"DOUBLE", "FLOAT"}, 45 | {"DATETIME", "TIMESTAMP"}, 46 | ) 47 | 48 | def compare_type(self, inspector_column: Column, metadata_column: Column) -> bool: 49 | # Don't change type of IDENTITY column 50 | if ( 51 | metadata_column.primary_key 52 | and metadata_column is metadata_column.table._autoincrement_column 53 | ): 54 | return False 55 | 56 | return super().compare_type(inspector_column, metadata_column) 57 | 58 | def compare_server_default( 59 | self, 60 | inspector_column: Column, 61 | metadata_column: Column, 62 | rendered_metadata_default, 63 | rendered_inspector_default, 64 | ): 65 | # don't do defaults for IDENTITY columns 66 | if ( 67 | metadata_column.primary_key 68 | and metadata_column is metadata_column.table._autoincrement_column 69 | ): 70 | return False 71 | 72 | if rendered_metadata_default is not None: 73 | rendered_metadata_default = re.sub( 74 | r"[\(\) \"\']", "", rendered_metadata_default 75 | ) 76 | 77 | if rendered_inspector_default is not None: 78 | rendered_inspector_default = re.sub( 79 | r"[\(\) \"\']", "", rendered_inspector_default 80 | ) 81 | 82 | return rendered_inspector_default != rendered_metadata_default 83 | 84 | def alter_column( 85 | self, 86 | table_name: str, 87 | column_name: str, 88 | type_: Optional[TypeEngine] = None, 89 | existing_type: Optional[TypeEngine] = None, 90 | schema: Optional[str] = None, 91 | name: Optional[str] = None, 92 | **kw: Any, 93 | ) -> None: 94 | if existing_type.__class__ not in _as_stream and type_.__class__ in _as_stream: 95 | """ 96 | To change column type to %Stream 97 | * rename the column with a new name with suffix `__superset_tmp` 98 | * create a new column with the old name 99 | * copy data from an old column to new column 100 | * drop old column 101 | * fix missing parameters, such as nullable 102 | """ 103 | tmp_column = f"{column_name}__superset_tmp" 104 | self._exec(ColumnName(table_name, column_name, tmp_column, schema=schema)) 105 | new_kw = {} 106 | self._exec( 107 | AddColumn( 108 | table_name, 109 | Column(column_name, type_=type_, **new_kw), 110 | schema=schema, 111 | ) 112 | ) 113 | tab = table( 114 | table_name, 115 | Column(column_name, key="new_col"), 116 | Column(tmp_column, key="old_col"), 117 | schema=schema, 118 | ) 119 | self._exec(tab.update().values({tab.c.new_col: tab.c.old_col})) 120 | self._exec(DropColumn(table_name, Column(tmp_column), schema=schema)) 121 | new_kw = {} 122 | for k in ["server_default", "nullable", "autoincrement"]: 123 | if f"existing_{k}" in kw: 124 | new_kw[k] = kw[f"existing_{k}"] 125 | return super().alter_column( 126 | table_name, column_name, schema=schema, name=name, **new_kw 127 | ) 128 | return super().alter_column( 129 | table_name, 130 | column_name, 131 | type_=type_, 132 | existing_type=existing_type, 133 | schema=schema, 134 | name=name, 135 | **kw, 136 | ) 137 | 138 | def add_constraint(self, const: Any) -> None: 139 | if isinstance(const, CheckConstraint): 140 | # just ignore it 141 | return 142 | super().add_constraint(const) 143 | 144 | 145 | class _ExecDropForeignKey(Executable, ClauseElement): 146 | inherit_cache = False 147 | 148 | def __init__( 149 | self, table_name: str, foreignkey_name: Column, schema: Optional[str] 150 | ) -> None: 151 | self.table_name = table_name 152 | self.foreignkey_name = foreignkey_name 153 | self.schema = schema 154 | 155 | 156 | @compiles(_ExecDropForeignKey, "iris") 157 | def _exec_drop_foreign_key( 158 | element: _ExecDropForeignKey, compiler: IRISDDLCompiler, **kw 159 | ) -> str: 160 | return "%s DROP FOREIGN KEY %s" % ( 161 | alter_table(compiler, element.table_name, element.schema), 162 | format_column_name(compiler, element.foreignkey_name), 163 | ) 164 | 165 | 166 | @compiles(ColumnNullable, "iris") 167 | def visit_column_nullable( 168 | element: ColumnNullable, compiler: IRISDDLCompiler, **kw 169 | ) -> str: 170 | return "%s %s %s" % ( 171 | alter_table(compiler, element.table_name, element.schema), 172 | alter_column(compiler, element.column_name), 173 | "NULL" if element.nullable else "NOT NULL", 174 | ) 175 | 176 | 177 | @compiles(ColumnType, "iris") 178 | def visit_column_type(element: ColumnType, compiler: IRISDDLCompiler, **kw) -> str: 179 | return "%s %s %s" % ( 180 | alter_table(compiler, element.table_name, element.schema), 181 | alter_column(compiler, element.column_name), 182 | "%s" % format_type(compiler, element.type_), 183 | ) 184 | 185 | 186 | @compiles(ColumnName, "iris") 187 | def visit_rename_column(element: ColumnName, compiler: IRISDDLCompiler, **kw) -> str: 188 | return "%s %s RENAME %s" % ( 189 | alter_table(compiler, element.table_name, element.schema), 190 | alter_column(compiler, element.column_name), 191 | format_column_name(compiler, element.newname), 192 | ) 193 | 194 | 195 | @compiles(DropColumn, "iris") 196 | def visit_drop_column(element: DropColumn, compiler: IRISDDLCompiler, **kw) -> str: 197 | return "%s %s CASCADE" % ( 198 | alter_table(compiler, element.table_name, element.schema), 199 | drop_column(compiler, element.column.name, **kw), 200 | ) 201 | -------------------------------------------------------------------------------- /sqlalchemy_iris/base.py: -------------------------------------------------------------------------------- 1 | import re 2 | from decimal import Decimal 3 | import intersystems_iris.dbapi._DBAPI as dbapi 4 | from . import information_schema as ischema 5 | from sqlalchemy import exc 6 | from sqlalchemy.orm import aliased 7 | from sqlalchemy.engine import default 8 | from sqlalchemy.engine import reflection 9 | from sqlalchemy.sql import compiler 10 | from sqlalchemy.sql import util as sql_util 11 | from sqlalchemy.sql import between 12 | from sqlalchemy.sql import func 13 | from sqlalchemy.sql.functions import ReturnTypeFromArgs 14 | from sqlalchemy.sql.elements import Null 15 | from sqlalchemy.sql.elements import quoted_name 16 | from sqlalchemy.sql import expression 17 | from sqlalchemy.sql import schema 18 | from sqlalchemy import sql, text 19 | from sqlalchemy import util 20 | from sqlalchemy import types as sqltypes 21 | 22 | from sqlalchemy import __version__ as sqlalchemy_version 23 | 24 | if sqlalchemy_version.startswith("2."): 25 | from sqlalchemy.engine import ObjectKind 26 | from sqlalchemy.engine import ObjectScope 27 | from sqlalchemy.engine.reflection import ReflectionDefaults 28 | else: 29 | from enum import Flag 30 | 31 | class ObjectKind(Flag): 32 | TABLE = 1 33 | VIEW = 2 34 | ANY = TABLE | VIEW 35 | 36 | class ObjectScope(Flag): 37 | DEFAULT = 1 38 | TEMPORARY = 2 39 | ANY = DEFAULT | TEMPORARY 40 | 41 | class ReflectionDefaults: 42 | @classmethod 43 | def columns(cls): 44 | return [] 45 | 46 | @classmethod 47 | def pk_constraint(cls): 48 | return { 49 | "name": None, 50 | "constrained_columns": [], 51 | } 52 | 53 | @classmethod 54 | def foreign_keys(cls): 55 | return [] 56 | 57 | @classmethod 58 | def indexes(cls): 59 | return [] 60 | 61 | @classmethod 62 | def unique_constraints(cls): 63 | return [] 64 | 65 | @classmethod 66 | def check_constraints(cls): 67 | return [] 68 | 69 | 70 | from sqlalchemy.types import BIGINT 71 | from sqlalchemy.types import VARCHAR 72 | from sqlalchemy.types import CHAR 73 | from sqlalchemy.types import INTEGER 74 | from sqlalchemy.types import DATE 75 | from sqlalchemy.types import TIMESTAMP 76 | from sqlalchemy.types import TIME 77 | from sqlalchemy.types import NUMERIC 78 | from sqlalchemy.types import BINARY 79 | from sqlalchemy.types import VARBINARY 80 | from sqlalchemy.types import TEXT 81 | from sqlalchemy.types import SMALLINT 82 | 83 | from .types import BIT 84 | from .types import DOUBLE 85 | from .types import LONGVARCHAR 86 | from .types import LONGVARBINARY 87 | from .types import TINYINT 88 | 89 | from .types import IRISBoolean 90 | from .types import IRISTime 91 | from .types import IRISTimeStamp 92 | from .types import IRISDate 93 | from .types import IRISDateTime 94 | from .types import IRISListBuild # noqa 95 | from .types import IRISVector # noqa 96 | 97 | 98 | ischema_names = { 99 | "BIGINT": BIGINT, 100 | "BIT": BIT, 101 | "DATE": DATE, 102 | "DOUBLE": DOUBLE, 103 | "INTEGER": INTEGER, 104 | "LONGVARBINARY": LONGVARBINARY, 105 | "LONGVARCHAR": LONGVARCHAR, 106 | "NUMERIC": NUMERIC, 107 | "SMALLINT": SMALLINT, 108 | "TIME": IRISTime, 109 | "TIMESTAMP": IRISTimeStamp, 110 | "TINYINT": TINYINT, 111 | "VARBINARY": VARBINARY, 112 | "VARCHAR": VARCHAR, 113 | } 114 | 115 | RESERVED_WORDS = set( 116 | [ 117 | "%afterhaving", 118 | "%allindex", 119 | "%alphaup", 120 | "%alter", 121 | "%begtrans", 122 | "%checkpriv", 123 | "%classname", 124 | "%classparameter", 125 | "%dbugfull", 126 | "%deldata", 127 | "%description", 128 | "%exact", 129 | "%external", 130 | "%file", 131 | "%firsttable", 132 | "%flatten", 133 | "%foreach", 134 | "%full", 135 | "%id", 136 | "%idadded", 137 | "%ignoreindex", 138 | "%ignoreindices", 139 | "%inlist", 140 | "%inorder", 141 | "%internal", 142 | "%intext", 143 | "%intrans", 144 | "%intransaction", 145 | "%key", 146 | "%matches", 147 | "%mcode", 148 | "%merge", 149 | "%minus", 150 | "%mvr", 151 | "%nocheck", 152 | "%nodeldata", 153 | "%noflatten", 154 | "%nofplan", 155 | "%noindex", 156 | "%nolock", 157 | "%nomerge", 158 | "%noparallel", 159 | "%noreduce", 160 | "%noruntime", 161 | "%nosvso", 162 | "%notopopt", 163 | "%notrigger", 164 | "%nounionoropt", 165 | "%numrows", 166 | "%odbcin", 167 | "%odbcout", 168 | "%parallel", 169 | "%plus", 170 | "%profile", 171 | "%profile_all", 172 | "%publicrowid", 173 | "%routine", 174 | "%rowcount", 175 | "%runtimein", 176 | "%runtimeout", 177 | "%startswith", 178 | "%starttable", 179 | "%sqlstring", 180 | "%sqlupper", 181 | "%string", 182 | "%tablename", 183 | "%truncate", 184 | "%upper", 185 | "%value", 186 | "%vid", 187 | "absolute", 188 | "add", 189 | "all", 190 | "allocate", 191 | "alter", 192 | "and", 193 | "any", 194 | "are", 195 | "as", 196 | "asc", 197 | "assertion", 198 | "at", 199 | "authorization", 200 | "avg", 201 | "begin", 202 | "between", 203 | "bit", 204 | "bit_length", 205 | "both", 206 | "by", 207 | "cascade", 208 | "case", 209 | "cast |", 210 | "char", 211 | "character", 212 | "character_length", 213 | "char_length", 214 | "check", 215 | "close", 216 | "coalesce", 217 | "collate", 218 | "commit", 219 | "connect", 220 | "connection", 221 | "constraint", 222 | "constraints", 223 | "continue", 224 | "convert", 225 | "corresponding", 226 | "count", 227 | "create", 228 | "cross", 229 | "current", 230 | "current_date", 231 | "current_time", 232 | "current_timestamp", 233 | "current_user", 234 | "cursor", 235 | "date", 236 | "deallocate", 237 | "dec", 238 | "decimal", 239 | "declare", 240 | "default", 241 | "deferrable", 242 | "deferred", 243 | "delete", 244 | "desc", 245 | "describe", 246 | "descriptor", 247 | "diagnostics", 248 | "disconnect", 249 | "distinct", 250 | "domain", 251 | "double", 252 | "drop", 253 | "else", 254 | "end", 255 | "endexec", 256 | "escape", 257 | "except", 258 | "exception", 259 | "exec", 260 | "execute", 261 | "exists", 262 | "external", 263 | "extract", 264 | "false", 265 | "fetch", 266 | "first", 267 | "float", 268 | "for", 269 | "foreign", 270 | "found", 271 | "from", 272 | "full", 273 | "get", 274 | "global", 275 | "go", 276 | "goto", 277 | "grant", 278 | "group", 279 | "having", 280 | "hour", 281 | "identity", 282 | "immediate", 283 | "in", 284 | "indicator", 285 | "initially", 286 | "inner", 287 | "input", 288 | "insensitive", 289 | "insert", 290 | "int", 291 | "integer", 292 | "intersect", 293 | "interval", 294 | "into", 295 | "is", 296 | "isolation", 297 | "join", 298 | "language", 299 | "last", 300 | "leading", 301 | "left", 302 | "level", 303 | "like", 304 | "local", 305 | "lower", 306 | "match", 307 | "max", 308 | "min", 309 | "minute", 310 | "module", 311 | "names", 312 | "national", 313 | "natural", 314 | "nchar", 315 | "next", 316 | "no", 317 | "not", 318 | "null", 319 | "nullif", 320 | "numeric", 321 | "octet_length", 322 | "of", 323 | "on", 324 | "only", 325 | "open", 326 | "option", 327 | "or", 328 | "outer", 329 | "output", 330 | "overlaps", 331 | "pad", 332 | "partial", 333 | "prepare", 334 | "preserve", 335 | "primary", 336 | "prior", 337 | "privileges", 338 | "procedure", 339 | "public", 340 | "read", 341 | "real", 342 | "references", 343 | "relative", 344 | "restrict", 345 | "revoke", 346 | "right", 347 | "role", 348 | "rollback", 349 | "rows", 350 | "schema", 351 | "scroll", 352 | "second", 353 | "section", 354 | "select", 355 | "session_user", 356 | "set", 357 | "shard", 358 | "smallint", 359 | "some", 360 | "space", 361 | "sqlerror", 362 | "sqlstate", 363 | "statistics", 364 | "substring", 365 | "sum", 366 | "sysdate", 367 | "system_user", 368 | "table", 369 | "temporary", 370 | "then", 371 | "time", 372 | "timezone_hour", 373 | "timezone_minute", 374 | "to", 375 | "top", 376 | "trailing", 377 | "transaction", 378 | "trim", 379 | "true", 380 | "union", 381 | "unique", 382 | "update", 383 | "upper", 384 | "user", 385 | "using", 386 | "values", 387 | "varchar", 388 | "varying", 389 | "when", 390 | "whenever", 391 | "where", 392 | "with", 393 | "work", 394 | "write", 395 | ] 396 | ) 397 | 398 | 399 | class IRISCompiler(sql.compiler.SQLCompiler): 400 | """IRIS specific idiosyncrasies""" 401 | 402 | def visit_exists_unary_operator( 403 | self, element, operator, within_columns_clause=False, **kw 404 | ): 405 | if within_columns_clause: 406 | return "(SELECT 1 WHERE EXISTS(%s))" % self.process(element.element, **kw) 407 | else: 408 | return "EXISTS(%s)" % self.process(element.element, **kw) 409 | 410 | def limit_clause(self, select, **kw): 411 | return "" 412 | 413 | def fetch_clause(self, select, **kw): 414 | return "" 415 | 416 | def visit_empty_set_expr(self, type_, **kw): 417 | return "SELECT 1 WHERE 1!=1" 418 | 419 | def _get_limit_or_fetch(self, select): 420 | if select._fetch_clause is None: 421 | return select._limit_clause 422 | else: 423 | return select._fetch_clause 424 | 425 | def visit_delete(self, delete_stmt, **kw): 426 | if not delete_stmt._where_criteria and delete_stmt.table.foreign_keys: 427 | # https://community.intersystems.com/post/sql-foreign-key-constraint-check-delete 428 | table = delete_stmt.table 429 | nocheck = False 430 | for fk in table.foreign_keys: 431 | nocheck = not fk.ondelete and fk.parent.table == table 432 | if not nocheck: 433 | break 434 | 435 | if nocheck is True: 436 | delete_stmt = delete_stmt.prefix_with("%NOCHECK", dialect="iris") 437 | text = super().visit_delete(delete_stmt, **kw) 438 | return text 439 | 440 | def for_update_clause(self, select, **kw): 441 | return "" 442 | 443 | def visit_true(self, expr, **kw): 444 | return "1" 445 | 446 | def visit_false(self, expr, **kw): 447 | return "0" 448 | 449 | def visit_is_true_unary_operator(self, element, operator, **kw): 450 | return "%s = 1" % self.process(element.element, **kw) 451 | 452 | def visit_is_false_unary_operator(self, element, operator, **kw): 453 | return "%s = 0" % self.process(element.element, **kw) 454 | 455 | def visit_is__binary(self, binary, operator, **kw): 456 | op = "IS" if isinstance(binary.right, Null) else "=" 457 | return "%s %s %s" % ( 458 | self.process(binary.left), 459 | op, 460 | self.process(binary.right), 461 | ) 462 | 463 | def visit_is_not_binary(self, binary, operator, **kw): 464 | op = "IS NOT" if isinstance(binary.right, Null) else "<>" 465 | return "%s %s %s" % ( 466 | self.process(binary.left), 467 | op, 468 | self.process(binary.right), 469 | ) 470 | 471 | def get_select_precolumns(self, select, **kw): 472 | text = "" 473 | if select._distinct or select._distinct_on: 474 | if select._distinct_on: 475 | text += ( 476 | "DISTINCT ON (" 477 | + ", ".join( 478 | [self.process(col, **kw) for col in select._distinct_on] 479 | ) 480 | + ") " 481 | ) 482 | else: 483 | text += "DISTINCT " 484 | 485 | if select._has_row_limiting_clause and self._use_top(select): 486 | text += "TOP %s " % self.process(self._get_limit_or_fetch(select), **kw) 487 | 488 | return text 489 | 490 | def _use_top(self, select): 491 | return (select._offset_clause is None) and ( 492 | select._simple_int_clause(select._limit_clause) 493 | or select._simple_int_clause(select._fetch_clause) 494 | ) 495 | 496 | def visit_irisexact_func(self, fn, **kw): 497 | return "%EXACT" + self.function_argspec(fn) 498 | 499 | def _use_exact_for_ordered_string(self, select): 500 | """ 501 | `SELECT string_value FROM some_table ORDER BY string_value` 502 | Will return `string_value` in uppercase 503 | So, this method fixes query to use %EXACT() function 504 | `SELECT %EXACT(string_value) AS string_value FROM some_table ORDER BY string_value` 505 | """ 506 | 507 | def _add_exact(column): 508 | if isinstance(column.type, sqltypes.String): 509 | return IRISExact(column).label( 510 | column._label if column._label else column.name 511 | ) 512 | return column 513 | 514 | _order_by_clauses = [ 515 | sql_util.unwrap_label_reference(elem) 516 | for elem in select._order_by_clause.clauses 517 | if isinstance(elem, schema.Column) 518 | ] 519 | if _order_by_clauses: 520 | select._raw_columns = [ 521 | ( 522 | _add_exact(c) 523 | if isinstance(c, schema.Column) and c in _order_by_clauses 524 | else c 525 | ) 526 | for c in select._raw_columns 527 | ] 528 | 529 | return select 530 | 531 | def translate_select_structure(self, select_stmt, **kwargs): 532 | select = select_stmt 533 | if getattr(select, "_iris_visit", None) is True: 534 | return select 535 | 536 | select._iris_visit = True 537 | select = select._generate() 538 | 539 | select = self._use_exact_for_ordered_string(select) 540 | 541 | if not (select._has_row_limiting_clause and not self._use_top(select)): 542 | return select 543 | 544 | """Look for ``LIMIT`` and OFFSET in a select statement, and if 545 | so tries to wrap it in a subquery with ``row_number()`` criterion. 546 | 547 | """ 548 | _order_by_clauses = [ 549 | sql_util.unwrap_label_reference(elem) 550 | for elem in select._order_by_clause.clauses 551 | ] 552 | if not _order_by_clauses: 553 | _order_by_clauses = [text("%id")] 554 | 555 | limit_clause = self._get_limit_or_fetch(select) 556 | offset_clause = select._offset_clause 557 | 558 | label = "iris_rn" 559 | select = ( 560 | select.add_columns( 561 | sql.func.ROW_NUMBER().over(order_by=_order_by_clauses).label(label) 562 | ) 563 | .order_by(None) 564 | .alias() 565 | ) 566 | 567 | iris_rn = sql.column(label) 568 | limitselect = sql.select(*[c for c in select.c if c.key != label]) 569 | if offset_clause is not None: 570 | if limit_clause is not None: 571 | limitselect = limitselect.where( 572 | between(iris_rn, offset_clause + 1, limit_clause + offset_clause) 573 | ).order_by(iris_rn) 574 | else: 575 | limitselect = limitselect.where(iris_rn > offset_clause) 576 | else: 577 | limitselect = limitselect.where(iris_rn <= (limit_clause)) 578 | return limitselect 579 | 580 | def order_by_clause(self, select, **kw): 581 | order_by = self.process(select._order_by_clause, **kw) 582 | 583 | if order_by and (not self.is_subquery() or select._limit): 584 | return " ORDER BY " + order_by 585 | else: 586 | return "" 587 | 588 | def visit_concat_op_binary(self, binary, operator, **kw): 589 | return "STRING(%s, %s)" % ( 590 | self.process(binary.left, **kw), 591 | self.process(binary.right, **kw), 592 | ) 593 | 594 | def visit_concat_func( 595 | self, func, **kw 596 | ): 597 | args = [self.process(clause, **kw) for clause in func.clauses.clauses] 598 | return ' || '.join(args) 599 | 600 | def visit_mod_binary(self, binary, operator, **kw): 601 | return ( 602 | self.process(binary.left, **kw) + " # " + self.process(binary.right, **kw) 603 | ) 604 | 605 | def visit_regexp_match_op_binary(self, binary, operator, **kw): 606 | # InterSystems use own format for %MATCHES, it does not support Regular Expressions 607 | raise exc.CompileError("InterSystems IRIS does not support REGEXP") 608 | 609 | def visit_not_regexp_match_op_binary(self, binary, operator, **kw): 610 | # InterSystems use own format for %MATCHES, it does not support Regular Expressions 611 | raise exc.CompileError("InterSystems IRIS does not support REGEXP") 612 | 613 | def visit_case(self, clause, **kwargs): 614 | x = "CASE " 615 | if clause.value is not None: 616 | x += clause.value._compiler_dispatch(self, **kwargs) + " " 617 | for cond, result in clause.whens: 618 | x += ( 619 | "WHEN " 620 | + cond._compiler_dispatch(self, **kwargs) 621 | + " THEN " 622 | # Explicit CAST required on 2023.1 623 | + ( 624 | self.visit_cast(sql.cast(result, result.type), **kwargs) 625 | if isinstance(result, sql.elements.BindParameter) 626 | else result._compiler_dispatch(self, **kwargs) 627 | ) 628 | + " " 629 | ) 630 | if clause.else_ is not None: 631 | x += ( 632 | "ELSE " 633 | + ( 634 | self.visit_cast(sql.cast(clause.else_, clause.else_.type), **kwargs) 635 | if isinstance(clause.else_, sql.elements.BindParameter) 636 | else clause.else_._compiler_dispatch(self, **kwargs) 637 | ) 638 | + " " 639 | ) 640 | x += "END" 641 | return x 642 | 643 | 644 | class IRISDDLCompiler(sql.compiler.DDLCompiler): 645 | """IRIS syntactic idiosyncrasies""" 646 | 647 | def visit_create_schema(self, create, **kw): 648 | return "" 649 | 650 | def visit_drop_schema(self, drop, **kw): 651 | return "" 652 | 653 | def visit_check_constraint(self, constraint, **kw): 654 | pass 655 | 656 | def create_table_constraints(self, table, **kw): 657 | description = "" 658 | comment = table.comment 659 | if comment: 660 | # hack to keep \r, kind of 661 | comment = comment.replace('\r', '\n\t') 662 | literal = self.sql_compiler.render_literal_value(comment, sqltypes.String()) 663 | description = "%DESCRIPTION " + literal 664 | 665 | constraints = super().create_table_constraints(table, **kw) 666 | if constraints and description: 667 | description = ", \n\t" + description 668 | return constraints + description 669 | 670 | def visit_add_constraint(self, create, **kw): 671 | if isinstance(create.element, schema.CheckConstraint): 672 | raise exc.CompileError("Can't add CHECK constraint") 673 | return super().visit_add_constraint(create, **kw) 674 | 675 | def visit_computed_column(self, generated, **kwargs): 676 | text = self.sql_compiler.process( 677 | generated.sqltext, include_table=True, literal_binds=True 678 | ) 679 | text = re.sub(r"(?}", text) 680 | # text = text.replace("'", '"') 681 | text = "COMPUTECODE {Set {*} = %s}" % (text,) 682 | if generated.persisted is False: 683 | text += " CALCULATED" 684 | else: 685 | text += ' COMPUTEONCHANGE ("%%UPDATE")' 686 | return text 687 | 688 | def get_column_specification(self, column, **kwargs): 689 | colspec = [ 690 | self.preparer.format_column(column), 691 | ] 692 | 693 | if column.primary_key and column is column.table._autoincrement_column: 694 | # colspec.append("SERIAL") 695 | # IDENTITY and ALLOWIDENTITYINSERT = 1 in table instead of SERIAL to solve issue with LAST_IDENTITY() 696 | colspec.append("IDENTITY") 697 | else: 698 | colspec.append( 699 | self.dialect.type_compiler.process( 700 | column.type, 701 | type_expression=column, 702 | identifier_preparer=self.preparer, 703 | ) 704 | ) 705 | 706 | default = self.get_column_default_string(column) 707 | if default is not None: 708 | colspec.append("DEFAULT " + default) 709 | 710 | if column.computed is not None: 711 | colspec.append(self.process(column.computed)) 712 | 713 | if not column.nullable: 714 | colspec.append("NOT NULL") 715 | 716 | comment = column.comment 717 | if comment is not None: 718 | comment = comment.replace('\r', '\n\t') 719 | literal = self.sql_compiler.render_literal_value(comment, sqltypes.String()) 720 | colspec.append("%DESCRIPTION " + literal) 721 | 722 | return " ".join(colspec) 723 | 724 | def post_create_table(self, table): 725 | return " WITH %CLASSPARAMETER ALLOWIDENTITYINSERT = 1" 726 | 727 | def visit_create_index( 728 | self, create, include_schema=False, include_table_schema=True, **kw 729 | ): 730 | text = super().visit_create_index( 731 | create, include_schema, include_table_schema, **kw 732 | ) 733 | 734 | index = create.element 735 | preparer = self.preparer 736 | 737 | # handle other included columns 738 | includeclause = index.dialect_options["iris"]["include"] 739 | if includeclause: 740 | inclusions = [ 741 | index.table.c[col] if isinstance(col, str) else col 742 | for col in includeclause 743 | ] 744 | 745 | text += " WITH DATA (%s)" % ", ".join( 746 | [preparer.quote(c.name) for c in inclusions] 747 | ) 748 | 749 | return text 750 | 751 | def visit_drop_index(self, drop, **kw): 752 | return "DROP INDEX %s ON %s" % ( 753 | self._prepared_index_name(drop.element, include_schema=False), 754 | self.preparer.format_table(drop.element.table), 755 | ) 756 | 757 | 758 | class IRISTypeCompiler(compiler.GenericTypeCompiler): 759 | def visit_BOOLEAN(self, type_, **kw): 760 | return self.visit_BIT(type_) 761 | 762 | def visit_BIT(self, type_, **kw): 763 | return "BIT" 764 | 765 | def visit_VARCHAR(self, type_, **kw): 766 | # If length is not specified, use 50 as default in IRIS 767 | if type_.length is None: 768 | type_ = VARCHAR(50) 769 | return "VARCHAR(%d)" % type_.length 770 | 771 | def visit_TEXT(self, type_, **kw): 772 | return "VARCHAR(65535)" 773 | 774 | def visit_LONGVARBINARY(self, type_, **kw): 775 | return "LONGVARBINARY" 776 | 777 | def visit_DOUBLE(self, type_, **kw): 778 | return "DOUBLE" 779 | 780 | def visit_TINYINT(self, type_, **kw): 781 | return "TINYINT" 782 | 783 | def visit_UUID(self, type_, **kw): 784 | return "UNIQUEIDENTIFIER" 785 | 786 | 787 | class IRISIdentifierPreparer(sql.compiler.IdentifierPreparer): 788 | """Install IRIS specific reserved words.""" 789 | 790 | reserved_words = compiler.RESERVED_WORDS.copy() 791 | reserved_words.update(RESERVED_WORDS) 792 | illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(["_"]) 793 | 794 | def __init__(self, dialect): 795 | super(IRISIdentifierPreparer, self).__init__(dialect, omit_schema=False) 796 | 797 | # def _escape_identifier(self, value): 798 | # value = value.replace(self.escape_quote, self.escape_to_quote) 799 | # return value.replace(".", "_") 800 | 801 | def format_column( 802 | self, 803 | column, 804 | use_table=False, 805 | name=None, 806 | table_name=None, 807 | use_schema=False, 808 | anon_map=None, 809 | ): 810 | if name is None: 811 | name = column.name 812 | 813 | # if '.' in name: 814 | # name = name.replace('.', '_') 815 | 816 | return super().format_column( 817 | column, use_table, name, table_name, use_schema, anon_map 818 | ) 819 | 820 | 821 | class IRISExecutionContext(default.DefaultExecutionContext): 822 | def get_lastrowid(self): 823 | try: 824 | return self.cursor.lastrowid 825 | except Exception: 826 | cursor = self.cursor 827 | cursor.execute("SELECT LAST_IDENTITY()") 828 | lastrowid = cursor.fetchone()[0] 829 | return lastrowid 830 | 831 | def create_cursor(self): 832 | cursor = self._dbapi_connection.cursor() 833 | return cursor 834 | 835 | 836 | colspecs = { 837 | sqltypes.Boolean: IRISBoolean, 838 | sqltypes.Date: IRISDate, 839 | sqltypes.DateTime: IRISDateTime, 840 | sqltypes.TIMESTAMP: IRISTimeStamp, 841 | sqltypes.Time: IRISTime, 842 | } 843 | if sqlalchemy_version.startswith("2."): 844 | from .types import IRISUniqueIdentifier 845 | 846 | colspecs[sqltypes.UUID] = IRISUniqueIdentifier 847 | 848 | 849 | class IRISExact(ReturnTypeFromArgs): 850 | """The IRIS SQL %EXACT() function.""" 851 | 852 | inherit_cache = True 853 | 854 | 855 | class IRISDialect(default.DefaultDialect): 856 | name = "iris" 857 | 858 | embedded = False 859 | 860 | default_schema_name = "SQLUser" 861 | 862 | default_paramstyle = "format" 863 | supports_statement_cache = True 864 | 865 | supports_native_decimal = True 866 | supports_sane_rowcount = True 867 | supports_sane_multi_rowcount = True 868 | supports_alter = True 869 | supports_schemas = True 870 | supports_views = True 871 | supports_default_values = True 872 | 873 | supports_native_boolean = True 874 | non_native_boolean_check_constraint = False 875 | 876 | supports_multivalues_insert = True 877 | 878 | supports_sequences = False 879 | 880 | returns_native_bytes = True 881 | 882 | div_is_floordiv = False 883 | 884 | postfetch_lastrowid = True 885 | supports_simple_order_by_label = False 886 | supports_empty_insert = False 887 | supports_is_distinct_from = False 888 | 889 | supports_vectors = None 890 | 891 | supports_cte = True 892 | 893 | colspecs = colspecs 894 | 895 | ischema_names = ischema_names 896 | 897 | statement_compiler = IRISCompiler 898 | ddl_compiler = IRISDDLCompiler 899 | preparer = IRISIdentifierPreparer 900 | type_compiler = IRISTypeCompiler 901 | execution_ctx_cls = IRISExecutionContext 902 | 903 | update_returning = False 904 | insert_returning = True 905 | insert_executemany_returning = True 906 | insert_executemany_returning_sort_by_parameter_order = True 907 | update_executemany_returning = False 908 | delete_executemany_returning = False 909 | 910 | construct_arguments = [ 911 | (schema.Index, {"include": None}), 912 | ] 913 | 914 | def __init__(self, **kwargs): 915 | default.DefaultDialect.__init__(self, **kwargs) 916 | 917 | def _get_server_version_info(self, connection): 918 | server_version = connection.connection._connection_info._server_version 919 | server_version = ( 920 | server_version[server_version.find("Version") + 8 :] 921 | .split(" ")[0] 922 | .split(".") 923 | ) 924 | return tuple([int("".join(filter(str.isdigit, v))) for v in server_version]) 925 | 926 | _isolation_lookup = set( 927 | [ 928 | "READ UNCOMMITTED", 929 | "READ COMMITTED", 930 | "READ VERIFIED", 931 | ] 932 | ) 933 | 934 | def _get_default_schema_name(self, connection): 935 | return IRISDialect.default_schema_name 936 | 937 | def on_connect(self): 938 | super_ = super().on_connect() 939 | 940 | def on_connect(conn): 941 | if super_ is not None: 942 | super_(conn) 943 | 944 | try: 945 | with conn.cursor() as cursor: 946 | # Distance or similarity 947 | cursor.execute( 948 | "select vector_cosine(to_vector('1'), to_vector('1'))" 949 | ) 950 | self.supports_vectors = True 951 | except: # noqa 952 | self.supports_vectors = False 953 | self._dictionary_access = False 954 | with conn.cursor() as cursor: 955 | res = cursor.execute("%CHECKPRIV SELECT ON %Dictionary.PropertyDefinition") 956 | self._dictionary_access = res == 0 957 | 958 | # if not self.supports_vectors: 959 | # util.warn("No native support for VECTOR or not activated by license") 960 | if not self._dictionary_access: 961 | util.warn( 962 | """ 963 | There are no access to %Dictionary, may be required for some advanced features, 964 | such as Calculated fields, and include columns in indexes 965 | """.replace( 966 | "\n", "" 967 | ) 968 | ) 969 | 970 | return on_connect 971 | 972 | def _get_option(self, connection, option): 973 | with connection.cursor() as cursor: 974 | cursor.execute("SELECT %SYSTEM_SQL.Util_GetOption(?)", (option, )) 975 | row = cursor.fetchone() 976 | return row[0] if row else None 977 | 978 | def _set_option(self, connection, option, value): 979 | with connection.cursor() as cursor: 980 | cursor.execute("SELECT %SYSTEM_SQL.Util_SetOption(?, ?)", [option, value]) 981 | row = cursor.fetchone() 982 | if row: 983 | return row[0] 984 | return None 985 | 986 | def get_isolation_level_values(self, dbapi_connection): 987 | levels = set(self._isolation_lookup) 988 | levels.add("AUTOCOMMIT") 989 | return levels 990 | 991 | def get_isolation_level(self, connection): 992 | try: 993 | level = int(self._get_option(connection, "IsolationMode")) 994 | except dbapi.DatabaseError: 995 | # caught access violation error 996 | # by default it's 0 997 | level = 0 998 | if level == 0: 999 | return "READ UNCOMMITTED" 1000 | elif level == 1: 1001 | return "READ COMMITTED" 1002 | elif level == 3: 1003 | return "READ VERIFIED" 1004 | return None 1005 | 1006 | def set_isolation_level(self, connection, level_str): 1007 | if level_str == "AUTOCOMMIT": 1008 | connection.setAutoCommit(True) 1009 | else: 1010 | connection.setAutoCommit(False) 1011 | if level_str not in ["READ COMMITTED", "READ VERIFIED"]: 1012 | level_str = "READ UNCOMMITTED" 1013 | with connection.cursor() as cursor: 1014 | cursor.execute("SET TRANSACTION ISOLATION LEVEL " + level_str) 1015 | 1016 | @classmethod 1017 | def dbapi(cls): 1018 | # dbapi.paramstyle = "format" 1019 | return dbapi 1020 | 1021 | def is_disconnect(self, e, connection, cursor): 1022 | if isinstance(e, self.dbapi.InterfaceError): 1023 | return "Connection is closed" in str(e) 1024 | return False 1025 | 1026 | def do_ping(self, dbapi_connection): 1027 | cursor = None 1028 | try: 1029 | cursor = dbapi_connection.cursor() 1030 | try: 1031 | cursor.execute(self._dialect_specific_select_one) 1032 | finally: 1033 | cursor.close() 1034 | except self.dbapi.Error as err: 1035 | if self.is_disconnect(err, dbapi_connection, cursor): 1036 | return False 1037 | else: 1038 | raise 1039 | else: 1040 | return True 1041 | 1042 | def create_connect_args(self, url): 1043 | opts = {} 1044 | 1045 | opts["application_name"] = "sqlalchemy" 1046 | opts["hostname"] = url.host 1047 | opts["port"] = int(url.port) if url.port else 1972 1048 | opts["namespace"] = url.database if url.database else "USER" 1049 | opts["username"] = url.username if url.username else "" 1050 | opts["password"] = url.password if url.password else "" 1051 | 1052 | opts["autoCommit"] = False 1053 | 1054 | opts["embedded"] = self.embedded 1055 | if opts["hostname"] and "@" in opts["hostname"]: 1056 | _h = opts["hostname"].split("@") 1057 | opts["password"] += "@" + _h[0 : len(_h) - 1].join("@") 1058 | opts["hostname"] = _h[len(_h) - 1] 1059 | 1060 | return ([], opts) 1061 | 1062 | _debug_queries = False 1063 | # _debug_queries = True 1064 | 1065 | def _debug(self, query, params, many=False, wrap=True): 1066 | if not self._debug_queries: 1067 | return 1068 | if many: 1069 | print("-" * 120) 1070 | for p in params: 1071 | self._debug(query, p, wrap=False) 1072 | print("-" * 120) 1073 | return 1074 | for p in params: 1075 | if isinstance(p, Decimal): 1076 | v = str(p) 1077 | elif p is None: 1078 | v = "NULL" 1079 | else: 1080 | v = "%r" % (p,) 1081 | query = query.replace("?", v, 1) 1082 | if wrap: 1083 | print("-" * 120) 1084 | print(query + ";") 1085 | if wrap: 1086 | print("-" * 120) 1087 | 1088 | def do_execute(self, cursor, query, params, context=None): 1089 | if query.endswith(";"): 1090 | query = query[:-1] 1091 | self._debug(query, params) 1092 | cursor.execute(query, params) 1093 | 1094 | def do_executemany(self, cursor, query, params, context=None): 1095 | if query.endswith(";"): 1096 | query = query[:-1] 1097 | self._debug(query, params, True) 1098 | cursor.executemany(query, params) 1099 | 1100 | def do_begin(self, connection): 1101 | pass 1102 | 1103 | def do_rollback(self, connection): 1104 | connection.rollback() 1105 | 1106 | def do_commit(self, connection): 1107 | connection.commit() 1108 | 1109 | def do_savepoint(self, connection, name): 1110 | connection.execute(expression.SavepointClause(name)) 1111 | 1112 | def do_release_savepoint(self, connection, name): 1113 | pass 1114 | 1115 | def get_schema(self, schema=None): 1116 | if schema is None: 1117 | return "SQLUser" 1118 | return schema 1119 | 1120 | @reflection.cache 1121 | def get_table_options(self, connection, table_name, schema=None, **kw): 1122 | if not self.has_table(connection=connection, table_name=table_name, schema=schema): 1123 | raise exc.NoSuchTableError( 1124 | f"{schema}.{table_name}" if schema else table_name 1125 | ) from None 1126 | return {} 1127 | 1128 | @reflection.cache 1129 | def get_table_comment(self, connection, table_name, schema=None, **kw): 1130 | if not self.has_table(connection=connection, table_name=table_name, schema=schema): 1131 | raise exc.NoSuchTableError( 1132 | f"{schema}.{table_name}" if schema else table_name 1133 | ) from None 1134 | 1135 | tables = ischema.tables 1136 | schema_name = self.get_schema(schema) 1137 | 1138 | s = sql.select(tables.c.description).where( 1139 | sql.and_( 1140 | tables.c.table_schema == str(schema_name), 1141 | tables.c.table_name == str(table_name), 1142 | ) 1143 | ) 1144 | comment = connection.execute(s).scalar() 1145 | if comment: 1146 | # make it as \r 1147 | comment = comment.replace(' \t\t\t\t', '\r') 1148 | # restore \n 1149 | comment = comment.replace(' \t\t\t', '\n') 1150 | return {"text": comment} 1151 | 1152 | @reflection.cache 1153 | def get_schema_names(self, connection, **kw): 1154 | s = sql.select(ischema.schemata.c.schema_name).order_by( 1155 | ischema.schemata.c.schema_name 1156 | ) 1157 | schema_names = [r[0] for r in connection.execute(s)] 1158 | return schema_names 1159 | 1160 | @reflection.cache 1161 | def get_table_names(self, connection, schema=None, **kw): 1162 | tables = ischema.tables 1163 | schema_name = self.get_schema(schema) 1164 | s = ( 1165 | sql.select(tables.c.table_name) 1166 | .where( 1167 | sql.and_( 1168 | tables.c.table_schema == str(schema_name), 1169 | tables.c.table_type == "BASE TABLE", 1170 | ) 1171 | ) 1172 | .order_by(tables.c.table_name) 1173 | ) 1174 | table_names = [r[0] for r in connection.execute(s)] 1175 | return table_names 1176 | 1177 | @reflection.cache 1178 | def get_temp_table_names(self, connection, **kw): 1179 | tables = ischema.tables 1180 | s = ( 1181 | sql.select(tables.c.table_name) 1182 | .where( 1183 | sql.and_( 1184 | tables.c.table_schema == self.default_schema_name, 1185 | tables.c.table_type == "GLOBAL TEMPORARY", 1186 | ) 1187 | ) 1188 | .order_by(tables.c.table_name) 1189 | ) 1190 | table_names = [r[0] for r in connection.execute(s)] 1191 | return table_names 1192 | 1193 | @reflection.cache 1194 | def has_table(self, connection, table_name, schema=None, **kw): 1195 | tables = ischema.tables 1196 | schema_name = self.get_schema(schema) 1197 | 1198 | s = sql.select(func.count()).where( 1199 | sql.and_( 1200 | tables.c.table_schema == str(schema_name), 1201 | tables.c.table_name == str(table_name), 1202 | ) 1203 | ) 1204 | return bool(connection.execute(s).scalar()) 1205 | 1206 | def _get_all_objects(self, connection, schema, filter_names, scope, kind, **kw): 1207 | tables = ischema.tables 1208 | schema_name = self.get_schema(schema) 1209 | 1210 | s = ( 1211 | sql.select( 1212 | tables.c.table_name, 1213 | ) 1214 | .select_from(tables) 1215 | .where( 1216 | tables.c.table_schema == str(schema_name), 1217 | ) 1218 | ) 1219 | 1220 | table_types = [] 1221 | if ObjectScope.TEMPORARY in scope and ObjectKind.TABLE in kind: 1222 | table_types.append("GLOBAL TEMPORARY") 1223 | if ObjectScope.DEFAULT in scope and ObjectKind.VIEW in kind: 1224 | table_types.append("VIEW") 1225 | if ObjectScope.DEFAULT in scope and ObjectKind.TABLE in kind: 1226 | table_types.append("BASE TABLE") 1227 | 1228 | if not table_types: 1229 | return [] 1230 | s = s.where(tables.c.table_type.in_(table_types)) 1231 | 1232 | if filter_names: 1233 | s = s.where(tables.c.table_name.in_([str(name) for name in filter_names])) 1234 | 1235 | result = connection.execute(s).scalars() 1236 | return result.all() 1237 | 1238 | @reflection.cache 1239 | def get_indexes(self, connection, table_name, schema=None, unique=False, **kw): 1240 | data = self.get_multi_indexes( 1241 | connection, 1242 | schema=schema, 1243 | filter_names=[table_name], 1244 | scope=ObjectScope.ANY, 1245 | kind=ObjectKind.ANY, 1246 | unique=unique, 1247 | **kw, 1248 | ) 1249 | return self._value_or_raise(data, table_name, schema) 1250 | 1251 | def get_multi_indexes( 1252 | self, connection, schema, filter_names, scope, kind, unique=False, **kw 1253 | ): 1254 | schema_name = self.get_schema(schema) 1255 | indexes = ischema.indexes 1256 | tables = ischema.tables 1257 | index_def = ischema.index_definition 1258 | 1259 | all_objects = self._get_all_objects( 1260 | connection, schema, filter_names, scope, kind 1261 | ) 1262 | if not all_objects: 1263 | return util.defaultdict(list) 1264 | 1265 | s = ( 1266 | sql.select( 1267 | indexes.c.table_name, 1268 | indexes.c.index_name, 1269 | indexes.c.column_name, 1270 | indexes.c.primary_key, 1271 | indexes.c.non_unique, 1272 | indexes.c.asc_or_desc, 1273 | ) 1274 | .select_from(indexes) 1275 | .where( 1276 | sql.and_( 1277 | indexes.c.table_schema == str(schema_name), 1278 | indexes.c.table_name.in_(all_objects), 1279 | indexes.c.primary_key == sql.false(), 1280 | ) 1281 | ) 1282 | .order_by( 1283 | indexes.c.table_name, 1284 | indexes.c.index_name, 1285 | indexes.c.ordinal_position, 1286 | ) 1287 | ) 1288 | if unique: 1289 | s = s.where(indexes.c.non_unique != sql.true()) 1290 | 1291 | if self._dictionary_access: 1292 | s = s.add_columns( 1293 | index_def.c.Data, 1294 | ).outerjoin( 1295 | index_def, 1296 | sql.and_( 1297 | index_def.c.SqlName == indexes.c.index_name, 1298 | index_def.c.parent 1299 | == sql.select(tables.c.classname) 1300 | .where( 1301 | indexes.c.table_name == tables.c.table_name, 1302 | indexes.c.table_schema == tables.c.table_schema, 1303 | ) 1304 | .scalar_subquery(), 1305 | ), 1306 | ) 1307 | else: 1308 | s = s.add_columns(None) 1309 | 1310 | rs = connection.execute(s) 1311 | 1312 | flat_indexes = util.defaultdict(dict) 1313 | default = ReflectionDefaults.indexes 1314 | 1315 | indexes = util.defaultdict(dict) 1316 | for table_name in all_objects: 1317 | indexes[(schema, table_name)] = default() 1318 | 1319 | for row in rs: 1320 | ( 1321 | idxtable, 1322 | idxname, 1323 | colname, 1324 | _, 1325 | nuniq, 1326 | _, 1327 | include, 1328 | ) = row 1329 | 1330 | if (schema, idxtable) not in indexes: 1331 | continue 1332 | 1333 | indexrec = flat_indexes[(schema, idxtable, idxname)] 1334 | if "name" not in indexrec: 1335 | indexrec["name"] = self.normalize_name(idxname) 1336 | indexrec["column_names"] = [] 1337 | if not unique: 1338 | indexrec["unique"] = not nuniq 1339 | else: 1340 | indexrec["duplicates_index"] = idxname 1341 | 1342 | indexrec["column_names"].append(self.normalize_name(colname)) 1343 | include = include.split(",") if include else [] 1344 | if not unique or include: 1345 | indexrec["include_columns"] = include 1346 | if include: 1347 | indexrec["dialect_options"] = {"iris_include": include} 1348 | 1349 | for schema, idxtable, idxname in flat_indexes: 1350 | indexes[(schema, idxtable)].append( 1351 | flat_indexes[(schema, idxtable, idxname)] 1352 | ) 1353 | 1354 | return indexes 1355 | 1356 | def get_pk_constraint(self, connection, table_name, schema=None, **kw): 1357 | data = self.get_multi_pk_constraint( 1358 | connection, 1359 | schema, 1360 | filter_names=[table_name], 1361 | scope=ObjectScope.ANY, 1362 | kind=ObjectKind.ANY, 1363 | **kw, 1364 | ) 1365 | return self._value_or_raise(data, table_name, schema) 1366 | 1367 | def get_multi_pk_constraint( 1368 | self, 1369 | connection, 1370 | schema, 1371 | filter_names, 1372 | scope, 1373 | kind, 1374 | **kw, 1375 | ): 1376 | schema_name = self.get_schema(schema) 1377 | key_constraints = ischema.key_constraints 1378 | constraints = ischema.constraints 1379 | 1380 | all_objects = self._get_all_objects( 1381 | connection, schema, filter_names, scope, kind 1382 | ) 1383 | if not all_objects: 1384 | return util.defaultdict(list) 1385 | 1386 | s = ( 1387 | sql.select( 1388 | key_constraints.c.table_name, 1389 | key_constraints.c.constraint_name, 1390 | key_constraints.c.column_name, 1391 | ) 1392 | .join( 1393 | constraints, 1394 | sql.and_( 1395 | key_constraints.c.constraint_name == constraints.c.constraint_name, 1396 | key_constraints.c.table_schema == constraints.c.table_schema, 1397 | ), 1398 | ) 1399 | .where( 1400 | sql.and_( 1401 | key_constraints.c.table_schema == str(schema_name), 1402 | key_constraints.c.table_name.in_(all_objects), 1403 | constraints.c.constraint_type == "PRIMARY KEY", 1404 | ) 1405 | ) 1406 | .order_by( 1407 | key_constraints.c.table_name, 1408 | key_constraints.c.constraint_name, 1409 | key_constraints.c.ordinal_position, 1410 | ) 1411 | ) 1412 | 1413 | rs = connection.execute(s) 1414 | 1415 | primary_keys = util.defaultdict(dict) 1416 | default = ReflectionDefaults.pk_constraint 1417 | 1418 | constraint_name = None 1419 | for row in rs: 1420 | ( 1421 | table_name, 1422 | name, 1423 | colname, 1424 | ) = row 1425 | constraint_name = self.normalize_name(name) 1426 | 1427 | table_pk = primary_keys[(schema, table_name)] 1428 | if not table_pk: 1429 | table_pk["name"] = constraint_name 1430 | table_pk["constrained_columns"] = [colname] 1431 | else: 1432 | table_pk["constrained_columns"].append(colname) 1433 | 1434 | return ( 1435 | (key, primary_keys[key] if key in primary_keys else default()) 1436 | for key in ( 1437 | (schema, self.normalize_name(obj_name)) for obj_name in all_objects 1438 | ) 1439 | ) 1440 | 1441 | def _value_or_raise(self, data, table, schema): 1442 | table = self.normalize_name(str(table)) 1443 | try: 1444 | return dict(data)[(schema, table)] 1445 | except KeyError: 1446 | raise exc.NoSuchTableError( 1447 | f"{schema}.{table}" if schema else table 1448 | ) from None 1449 | 1450 | @reflection.cache 1451 | def get_unique_constraints(self, connection, table_name, schema=None, **kw): 1452 | data = self.get_multi_unique_constraints( 1453 | connection, 1454 | schema=schema, 1455 | filter_names=[table_name], 1456 | scope=ObjectScope.ANY, 1457 | kind=ObjectKind.ANY, 1458 | **kw, 1459 | ) 1460 | return self._value_or_raise(data, table_name, schema) 1461 | 1462 | def get_multi_unique_constraints( 1463 | self, 1464 | connection, 1465 | schema, 1466 | filter_names, 1467 | scope, 1468 | kind, 1469 | **kw, 1470 | ): 1471 | return self.get_multi_indexes( 1472 | connection, schema, filter_names, scope, kind, unique=True, **kw 1473 | ) 1474 | 1475 | @reflection.cache 1476 | def get_foreign_keys(self, connection, table_name, schema=None, **kw): 1477 | data = self.get_multi_foreign_keys( 1478 | connection, 1479 | schema, 1480 | filter_names=[table_name], 1481 | scope=ObjectScope.ANY, 1482 | kind=ObjectKind.ANY, 1483 | **kw, 1484 | ) 1485 | return self._value_or_raise(data, table_name, schema) 1486 | 1487 | def get_multi_foreign_keys( 1488 | self, 1489 | connection, 1490 | schema, 1491 | filter_names, 1492 | scope, 1493 | kind, 1494 | **kw, 1495 | ): 1496 | schema_name = self.get_schema(schema) 1497 | ref_constraints = ischema.ref_constraints 1498 | key_constraints = ischema.key_constraints 1499 | key_constraints_ref = aliased(ischema.key_constraints) 1500 | 1501 | all_objects = self._get_all_objects( 1502 | connection, schema, filter_names, scope, kind 1503 | ) 1504 | if not all_objects: 1505 | return util.defaultdict(list) 1506 | 1507 | s = ( 1508 | sql.select( 1509 | key_constraints.c.table_name, 1510 | key_constraints.c.constraint_name, 1511 | key_constraints.c.column_name, 1512 | key_constraints_ref.c.table_schema, 1513 | key_constraints_ref.c.table_name, 1514 | key_constraints_ref.c.column_name, 1515 | ref_constraints.c.match_option, 1516 | ref_constraints.c.update_rule, 1517 | ref_constraints.c.delete_rule, 1518 | ) 1519 | .join( 1520 | key_constraints, 1521 | sql.and_( 1522 | key_constraints.c.table_schema 1523 | == ref_constraints.c.constraint_schema, 1524 | key_constraints.c.constraint_name 1525 | == ref_constraints.c.constraint_name, 1526 | ), 1527 | ) 1528 | .join( 1529 | key_constraints_ref, 1530 | sql.and_( 1531 | key_constraints_ref.c.constraint_schema 1532 | == ref_constraints.c.unique_constraint_schema, 1533 | key_constraints_ref.c.constraint_name 1534 | == ref_constraints.c.unique_constraint_name, 1535 | key_constraints_ref.c.ordinal_position 1536 | == key_constraints.c.ordinal_position, 1537 | ), 1538 | ) 1539 | .where( 1540 | sql.and_( 1541 | key_constraints.c.table_schema == str(schema_name), 1542 | key_constraints.c.table_name.in_(all_objects), 1543 | ) 1544 | ) 1545 | .order_by( 1546 | key_constraints.c.constraint_name, 1547 | key_constraints.c.ordinal_position, 1548 | ) 1549 | ) 1550 | 1551 | rs = connection.execution_options(future_result=True).execute(s) 1552 | 1553 | fkeys = util.defaultdict(dict) 1554 | 1555 | for row in rs.mappings(): 1556 | table_name = row[key_constraints.c.table_name] 1557 | rfknm = row[key_constraints.c.constraint_name] 1558 | scol = row[key_constraints.c.column_name] 1559 | rschema = row[key_constraints_ref.c.table_schema] 1560 | rtbl = row[key_constraints_ref.c.table_name] 1561 | rcol = row[key_constraints_ref.c.column_name] 1562 | _ = row[ref_constraints.c.match_option] 1563 | fkuprule = row[ref_constraints.c.update_rule] 1564 | fkdelrule = row[ref_constraints.c.delete_rule] 1565 | 1566 | table_fkey = fkeys[(schema, table_name)] 1567 | 1568 | if rfknm not in table_fkey: 1569 | table_fkey[rfknm] = fkey = { 1570 | "name": rfknm, 1571 | "constrained_columns": [], 1572 | "referred_schema": ( 1573 | rschema if rschema != self.default_schema_name else None 1574 | ), 1575 | "referred_table": rtbl, 1576 | "referred_columns": [], 1577 | "options": {}, 1578 | } 1579 | else: 1580 | fkey = table_fkey[rfknm] 1581 | 1582 | if fkuprule != "NO ACTION": 1583 | fkey["options"]["onupdate"] = fkuprule 1584 | 1585 | if fkdelrule != "NO ACTION": 1586 | fkey["options"]["ondelete"] = fkdelrule 1587 | 1588 | if scol not in fkey["constrained_columns"]: 1589 | fkey["constrained_columns"].append(scol) 1590 | if rcol not in fkey["referred_columns"]: 1591 | fkey["referred_columns"].append(rcol) 1592 | 1593 | default = ReflectionDefaults.foreign_keys 1594 | 1595 | return ( 1596 | (key, list(fkeys[key].values()) if key in fkeys else default()) 1597 | for key in ( 1598 | (schema, self.normalize_name(obj_name)) for obj_name in all_objects 1599 | ) 1600 | ) 1601 | 1602 | def get_columns(self, connection, table_name, schema=None, **kw): 1603 | data = self.get_multi_columns( 1604 | connection, 1605 | schema, 1606 | filter_names=[table_name], 1607 | scope=ObjectScope.ANY, 1608 | kind=ObjectKind.ANY, 1609 | **kw, 1610 | ) 1611 | return self._value_or_raise(data, table_name, schema) 1612 | 1613 | def get_multi_columns( 1614 | self, 1615 | connection, 1616 | schema, 1617 | filter_names, 1618 | scope, 1619 | kind, 1620 | **kw, 1621 | ): 1622 | schema_name = self.get_schema(schema) 1623 | tables = ischema.tables 1624 | columns = ischema.columns 1625 | property = ischema.property_definition 1626 | 1627 | all_objects = self._get_all_objects( 1628 | connection, schema, filter_names, scope, kind 1629 | ) 1630 | if not all_objects: 1631 | return util.defaultdict(list) 1632 | 1633 | s = ( 1634 | sql.select( 1635 | columns.c.table_name, 1636 | columns.c.column_name, 1637 | columns.c.data_type, 1638 | columns.c.is_nullable, 1639 | columns.c.character_maximum_length, 1640 | columns.c.numeric_precision, 1641 | columns.c.numeric_scale, 1642 | columns.c.column_default, 1643 | columns.c.collation_name, 1644 | columns.c.auto_increment, 1645 | columns.c.description, 1646 | ) 1647 | .select_from(columns) 1648 | .where( 1649 | columns.c.table_schema == str(schema_name), 1650 | ) 1651 | .order_by(columns.c.ordinal_position) 1652 | ) 1653 | if all_objects: 1654 | s = s.where(columns.c.table_name.in_(all_objects)) 1655 | 1656 | if self._dictionary_access: 1657 | s = s.add_columns( 1658 | property.c.SqlComputeCode, 1659 | property.c.Calculated, 1660 | property.c.Transient, 1661 | ).outerjoin( 1662 | property, 1663 | sql.and_( 1664 | sql.or_( 1665 | property.c.Name == columns.c.column_name, 1666 | property.c.SqlFieldName == columns.c.column_name, 1667 | ), 1668 | property.c.parent 1669 | == sql.select(tables.c.classname) 1670 | .where( 1671 | columns.c.table_name == tables.c.table_name, 1672 | columns.c.table_schema == tables.c.table_schema, 1673 | ) 1674 | .scalar_subquery(), 1675 | ), 1676 | ) 1677 | 1678 | c = connection.execution_options(future_result=True).execute(s) 1679 | 1680 | cols = util.defaultdict(list) 1681 | 1682 | for row in c.mappings(): 1683 | table_name = row[columns.c.table_name] 1684 | name = row[columns.c.column_name] 1685 | type_ = row[columns.c.data_type].upper() 1686 | nullable = row[columns.c.is_nullable] 1687 | charlen = row[columns.c.character_maximum_length] 1688 | numericprec = row[columns.c.numeric_precision] 1689 | numericscale = row[columns.c.numeric_scale] 1690 | default = row[columns.c.column_default] 1691 | collation = row[columns.c.collation_name] 1692 | autoincrement = row[columns.c.auto_increment] 1693 | sqlComputeCode = calculated = transient = None 1694 | if self._dictionary_access: 1695 | sqlComputeCode = row[property.c.SqlComputeCode] 1696 | calculated = row[property.c.Calculated] 1697 | transient = row[property.c.Transient] 1698 | comment = row[columns.c.description] 1699 | if comment: 1700 | # make it as \r 1701 | comment = comment.replace(' \t\t\t\t', '\r') 1702 | # restore \n 1703 | comment = comment.replace(' \t\t\t', '\n') 1704 | 1705 | coltype = self.ischema_names.get(type_, None) 1706 | 1707 | kwargs = {} 1708 | if coltype in ( 1709 | VARCHAR, 1710 | BINARY, 1711 | TEXT, 1712 | VARBINARY, 1713 | ): 1714 | if charlen == -1: 1715 | charlen = None 1716 | kwargs["length"] = 0 1717 | else: 1718 | try: 1719 | kwargs["length"] = int(charlen) 1720 | except ValueError: 1721 | kwargs["length"] = 0 1722 | if collation: 1723 | kwargs["collation"] = collation 1724 | if coltype is None: 1725 | util.warn("Did not recognize type '%s' of column '%s'" % (type_, name)) 1726 | coltype = sqltypes.NULLTYPE 1727 | elif coltype is VARCHAR and charlen == 1: 1728 | # VARCHAR(1) as CHAR 1729 | coltype = CHAR 1730 | else: 1731 | if issubclass(coltype, sqltypes.Numeric): 1732 | kwargs["precision"] = int(numericprec) 1733 | 1734 | if not issubclass(coltype, sqltypes.Float): 1735 | kwargs["scale"] = int(numericscale) 1736 | 1737 | coltype = coltype(**kwargs) 1738 | 1739 | default = "" if default == "$c(0)" else default 1740 | if default: 1741 | default = str(default) 1742 | if default.startswith('"'): 1743 | default = "'%s'" % (default[1:-1].replace("'", "''"),) 1744 | 1745 | cdict = { 1746 | "name": name, 1747 | "type": coltype, 1748 | "nullable": nullable, 1749 | "default": default, 1750 | "autoincrement": autoincrement, 1751 | "comment": comment, 1752 | } 1753 | if sqlComputeCode and "set {*} = " in sqlComputeCode.lower(): 1754 | sqltext = sqlComputeCode 1755 | sqltext = sqltext.split(" = ")[1] 1756 | sqltext = re.sub(r"{(\b\w+\b)}", r"\g<1>", sqltext) 1757 | persisted = not calculated and not transient 1758 | cdict["computed"] = { 1759 | "sqltext": sqltext, 1760 | "persisted": persisted, 1761 | } 1762 | cols[(schema, table_name)].append(cdict) 1763 | 1764 | return cols 1765 | 1766 | @reflection.cache 1767 | def get_view_names(self, connection, schema=None, **kw): 1768 | schema_name = self.get_schema(schema) 1769 | views = ischema.views 1770 | s = ( 1771 | sql.select(views.c.table_name) 1772 | .where( 1773 | views.c.table_schema == str(schema_name), 1774 | ) 1775 | .order_by(views.c.table_name) 1776 | ) 1777 | view_names = [r[0] for r in connection.execute(s)] 1778 | return view_names 1779 | 1780 | @reflection.cache 1781 | def get_view_definition(self, connection, view_name, schema=None, **kw): 1782 | schema_name = self.get_schema(schema) 1783 | views = ischema.views 1784 | 1785 | view_def = connection.execute( 1786 | sql.select(views.c.view_definition).where( 1787 | views.c.table_schema == str(schema_name), 1788 | views.c.table_name == str(view_name), 1789 | ) 1790 | ).scalar() 1791 | 1792 | if view_def: 1793 | return view_def 1794 | raise exc.NoSuchTableError(f"{schema}.{view_name}") 1795 | 1796 | def normalize_name(self, name): 1797 | if self.identifier_preparer._requires_quotes(name): 1798 | return quoted_name(name, quote=True) 1799 | return name 1800 | -------------------------------------------------------------------------------- /sqlalchemy_iris/embedded.py: -------------------------------------------------------------------------------- 1 | from .base import IRISDialect 2 | 3 | 4 | class IRISDialect_emb(IRISDialect): 5 | driver = "emb" 6 | 7 | embedded = True 8 | 9 | supports_statement_cache = True 10 | 11 | def _get_option(self, connection, option): 12 | return connection.iris.cls("%SYSTEM.SQL.Util").GetOption(option) 13 | 14 | def _set_option(self, connection, option, value): 15 | return connection.iris.cls("%SYSTEM.SQL.Util").SetOption(option) 16 | 17 | @classmethod 18 | def import_dbapi(cls): 19 | import intersystems_iris.dbapi._DBAPI as dbapi 20 | 21 | return dbapi 22 | 23 | def _get_server_version_info(self, connection): 24 | server_version = connection._dbapi_connection.iris.system.Version.GetNumber() 25 | server_version = server_version.split(".") 26 | return tuple([int("".join(filter(str.isdigit, v))) for v in server_version]) 27 | 28 | 29 | dialect = IRISDialect_emb 30 | -------------------------------------------------------------------------------- /sqlalchemy_iris/information_schema.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.types import TypeDecorator 2 | from sqlalchemy import Column 3 | from sqlalchemy import MetaData 4 | from sqlalchemy import Table 5 | from sqlalchemy.types import Integer 6 | from sqlalchemy.types import String 7 | from sqlalchemy.types import Boolean 8 | 9 | 10 | ischema = MetaData() 11 | 12 | 13 | class YESNO(TypeDecorator): 14 | impl = String 15 | 16 | cache_ok = True 17 | 18 | def __init__(self, length=None, **kwargs): 19 | super().__init__(length, **kwargs) 20 | 21 | def process_literal_param(self, value, dialect): 22 | return 'YES' if value else 'NO' 23 | 24 | process_bind_param = process_literal_param 25 | 26 | def process_result_value(self, value, dialect): 27 | return value == 'YES' 28 | 29 | 30 | schemata = Table( 31 | "SCHEMATA", 32 | ischema, 33 | Column("CATALOG_NAME", String, key="catalog_name"), 34 | Column("SCHEMA_NAME", String, key="schema_name"), 35 | Column("SCHEMA_OWNER", String, key="schema_owner"), 36 | schema="INFORMATION_SCHEMA", 37 | ) 38 | 39 | tables = Table( 40 | "TABLES", 41 | ischema, 42 | Column("TABLE_CATALOG", String, key="table_catalog"), 43 | Column("TABLE_SCHEMA", String, key="table_schema"), 44 | Column("TABLE_NAME", String, key="table_name"), 45 | Column("TABLE_TYPE", String, key="table_type"), 46 | Column("CLASSNAME", String, key="classname"), 47 | Column("DESCRIPTION", String, key="description"), 48 | schema="INFORMATION_SCHEMA", 49 | ) 50 | 51 | columns = Table( 52 | "COLUMNS", 53 | ischema, 54 | Column("TABLE_CATALOG", String, key="table_catalog"), 55 | Column("TABLE_SCHEMA", String, key="table_schema"), 56 | Column("TABLE_NAME", String, key="table_name"), 57 | Column("COLUMN_NAME", String, key="column_name"), 58 | Column("ORDINAL_POSITION", Integer, key="ordinal_position"), 59 | Column("COLUMN_DEFAULT", Integer, key="column_default"), 60 | Column("IS_NULLABLE", YESNO, key="is_nullable"), 61 | Column("IS_IDENTITY", YESNO, key="is_identity"), 62 | Column("IS_GENERATED", YESNO, key="is_generated"), 63 | Column("DATA_TYPE", String, key="data_type"), 64 | Column( 65 | "CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length" 66 | ), 67 | Column("NUMERIC_PRECISION", Integer, key="numeric_precision"), 68 | Column("NUMERIC_SCALE", Integer, key="numeric_scale"), 69 | Column("COLLATION_NAME", String, key="collation_name"), 70 | Column("AUTO_INCREMENT", YESNO, key="auto_increment"), 71 | Column("UNIQUE_COLUMN", YESNO, key="unique_column"), 72 | Column("PRIMARY_KEY", YESNO, key="primary_key"), 73 | Column("DESCRIPTION", String, key="description"), 74 | schema="INFORMATION_SCHEMA", 75 | ) 76 | property_definition = Table( 77 | "PropertyDefinition", 78 | ischema, 79 | Column("parent", String), 80 | Column("Name", String), 81 | Column("SqlFieldName", String), 82 | Column("SqlComputeCode", String), 83 | Column("SqlComputed", Boolean), 84 | Column("Calculated", Boolean), 85 | Column("Transient", Boolean), 86 | schema="%Dictionary", 87 | ) 88 | 89 | indexes = Table( 90 | "INDEXES", 91 | ischema, 92 | Column("TABLE_CATALOG", String, key="table_catalog"), 93 | Column("TABLE_SCHEMA", String, key="table_schema"), 94 | Column("TABLE_NAME", String, key="table_name"), 95 | Column("NON_UNIQUE", Boolean, key="non_unique"), 96 | Column("INDEX_CATALOG", String, key="index_catalog"), 97 | Column("INDEX_SCHEMA", String, key="index_schema"), 98 | Column("INDEX_NAME", String, key="index_name"), 99 | Column("ORDINAL_POSITION", Integer, key="ordinal_position"), 100 | Column("COLUMN_NAME", String, key="column_name"), 101 | Column("ASC_OR_DESC", String, key="asc_or_desc"), 102 | Column("PRIMARY_KEY", Boolean, key="primary_key"), 103 | schema="INFORMATION_SCHEMA", 104 | ) 105 | 106 | index_definition = Table( 107 | "IndexDefinition", 108 | ischema, 109 | Column("parent", String), 110 | Column("SqlName", String), 111 | Column("Data", String), 112 | schema="%Dictionary", 113 | ) 114 | 115 | key_constraints = Table( 116 | "KEY_COLUMN_USAGE", 117 | ischema, 118 | Column("CONSTRAINT_SCHEMA", String, key="constraint_schema"), 119 | Column("CONSTRAINT_NAME", String, key="constraint_name"), 120 | Column("TABLE_SCHEMA", String, key="table_schema"), 121 | Column("TABLE_NAME", String, key="table_name"), 122 | Column("COLUMN_NAME", String, key="column_name"), 123 | Column("ORDINAL_POSITION", Integer, key="ordinal_position"), 124 | Column("CONSTRAINT_TYPE", String, key="constraint_type"), 125 | schema="INFORMATION_SCHEMA", 126 | ) 127 | 128 | constraints = Table( 129 | "TABLE_CONSTRAINTS", 130 | ischema, 131 | Column("TABLE_SCHEMA", String, key="table_schema"), 132 | Column("TABLE_NAME", String, key="table_name"), 133 | Column("CONSTRAINT_NAME", String, key="constraint_name"), 134 | Column("CONSTRAINT_TYPE", String, key="constraint_type"), 135 | schema="INFORMATION_SCHEMA", 136 | ) 137 | 138 | column_constraints = Table( 139 | "CONSTRAINT_COLUMN_USAGE", 140 | ischema, 141 | Column("TABLE_SCHEMA", String, key="table_schema"), 142 | Column("TABLE_NAME", String, key="table_name"), 143 | Column("COLUMN_NAME", String, key="column_name"), 144 | Column("CONSTRAINT_NAME", String, key="constraint_name"), 145 | schema="INFORMATION_SCHEMA", 146 | ) 147 | 148 | ref_constraints = Table( 149 | "REFERENTIAL_CONSTRAINTS", 150 | ischema, 151 | Column("CONSTRAINT_CATALOG", String, key="constraint_catalog"), 152 | Column("CONSTRAINT_SCHEMA", String, key="constraint_schema"), 153 | Column("CONSTRAINT_TABLE_NAME", String, key="constraint_table_name"), 154 | Column("CONSTRAINT_NAME", String, key="constraint_name"), 155 | Column( 156 | "UNIQUE_CONSTRAINT_CATALOG", 157 | String, 158 | key="unique_constraint_catalog", 159 | ), 160 | Column( 161 | "UNIQUE_CONSTRAINT_SCHEMA", 162 | String, 163 | key="unique_constraint_schema", 164 | ), 165 | Column( 166 | "UNIQUE_CONSTRAINT_TABLE", String, key="unique_constraint_table" 167 | ), 168 | Column( 169 | "UNIQUE_CONSTRAINT_NAME", String, key="unique_constraint_name" 170 | ), 171 | Column("MATCH_OPTION", String, key="match_option"), 172 | Column("UPDATE_RULE", String, key="update_rule"), 173 | Column("DELETE_RULE", String, key="delete_rule"), 174 | schema="INFORMATION_SCHEMA", 175 | ) 176 | 177 | views = Table( 178 | "VIEWS", 179 | ischema, 180 | Column("TABLE_CATALOG", String, key="table_catalog"), 181 | Column("TABLE_SCHEMA", String, key="table_schema"), 182 | Column("TABLE_NAME", String, key="table_name"), 183 | Column("VIEW_DEFINITION", String, key="view_definition"), 184 | Column("CHECK_OPTION", String, key="check_option"), 185 | Column("IS_UPDATABLE", String, key="is_updatable"), 186 | schema="INFORMATION_SCHEMA", 187 | ) 188 | -------------------------------------------------------------------------------- /sqlalchemy_iris/intersystems/__init__.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Any 3 | from ..base import IRISDialect 4 | from ..base import IRISExecutionContext 5 | from . import dbapi 6 | from .dbapi import connect 7 | from .dbapi import IntegrityError, OperationalError, DatabaseError 8 | from sqlalchemy.engine.cursor import CursorFetchStrategy 9 | 10 | 11 | def remap_exception(func): 12 | def wrapper(cursor, *args, **kwargs): 13 | attempt = 0 14 | while attempt < 3: 15 | attempt += 1 16 | try: 17 | cursor.sqlcode = 0 18 | return func(cursor, *args, **kwargs) 19 | except RuntimeError as ex: 20 | # [SQLCODE: <-119>:... 21 | message = ex.args[0] 22 | if "" in message: 23 | # just random error happens in the driver, try again 24 | continue 25 | sqlcode = re.findall(r"^\[SQLCODE: <(-\d+)>:", message) 26 | if not sqlcode: 27 | raise Exception(message) 28 | sqlcode = int(sqlcode[0]) 29 | if abs(sqlcode) in [108, 119, 121, 122]: 30 | raise IntegrityError(sqlcode, message) 31 | if abs(sqlcode) in [1, 12]: 32 | raise OperationalError(sqlcode, message) 33 | raise DatabaseError(sqlcode, message) 34 | 35 | return wrapper 36 | 37 | 38 | class InterSystemsCursorFetchStrategy(CursorFetchStrategy): 39 | 40 | def fetchone( 41 | self, 42 | result, 43 | dbapi_cursor, 44 | hard_close: bool = False, 45 | ) -> Any: 46 | row = dbapi_cursor.fetchone() 47 | return tuple(row) if row else None 48 | 49 | 50 | class InterSystemsExecutionContext(IRISExecutionContext): 51 | cursor_fetch_strategy = InterSystemsCursorFetchStrategy() 52 | 53 | 54 | class IRISDialect_intersystems(IRISDialect): 55 | driver = "intersystems" 56 | 57 | # execution_ctx_cls = InterSystemsExecutionContext 58 | 59 | supports_statement_cache = True 60 | 61 | supports_cte = False 62 | 63 | supports_sane_rowcount = False 64 | supports_sane_multi_rowcount = False 65 | 66 | insert_returning = False 67 | insert_executemany_returning = False 68 | 69 | logfile = None 70 | 71 | server_version = None 72 | 73 | def __init__(self, logfile: str = None, **kwargs): 74 | self.logfile = logfile 75 | IRISDialect.__init__(self, **kwargs) 76 | 77 | @classmethod 78 | def import_dbapi(cls): 79 | return dbapi 80 | 81 | def connect(self, *cargs, **kwarg): 82 | host = kwarg.get("hostname", "localhost") 83 | port = kwarg.get("port", 1972) 84 | namespace = kwarg.get("namespace", "USER") 85 | username = kwarg.get("username", "_SYSTEM") 86 | password = kwarg.get("password", "SYS") 87 | timeout = kwarg.get("timeout", 10) 88 | sharedmemory = kwarg.get("sharedmemory", False) 89 | logfile = kwarg.get("logfile", self.logfile) 90 | sslconfig = kwarg.get("sslconfig", False) 91 | autoCommit = kwarg.get("autoCommit", False) 92 | isolationLevel = kwarg.get("isolationLevel", 1) 93 | return connect( 94 | host, 95 | port, 96 | namespace, 97 | username, 98 | password, 99 | timeout, 100 | sharedmemory, 101 | logfile, 102 | sslconfig, 103 | autoCommit, 104 | isolationLevel, 105 | ) 106 | 107 | def create_connect_args(self, url): 108 | opts = {} 109 | 110 | opts["application_name"] = "sqlalchemy" 111 | opts["host"] = url.host 112 | opts["port"] = int(url.port) if url.port else 1972 113 | opts["namespace"] = url.database if url.database else "USER" 114 | opts["username"] = url.username if url.username else "" 115 | opts["password"] = url.password if url.password else "" 116 | 117 | opts["autoCommit"] = False 118 | 119 | if opts["host"] and "@" in opts["host"]: 120 | _h = opts["host"].split("@") 121 | opts["password"] += "@" + _h[0 : len(_h) - 1].join("@") 122 | opts["host"] = _h[len(_h) - 1] 123 | 124 | return ([], opts) 125 | 126 | def on_connect(self): 127 | super_ = super().on_connect() 128 | 129 | def on_connect(conn): 130 | if super_ is not None: 131 | super_(conn) 132 | 133 | server_version = dbapi.createIRIS(conn).classMethodValue( 134 | "%SYSTEM.Version", "GetNumber" 135 | ) 136 | server_version = server_version.split(".") 137 | self.server_version = tuple( 138 | [int("".join(filter(str.isdigit, v))) for v in server_version] 139 | ) 140 | 141 | return on_connect 142 | 143 | def _get_server_version_info(self, connection): 144 | return self.server_version 145 | 146 | def set_isolation_level(self, connection, level_str): 147 | if level_str == "AUTOCOMMIT": 148 | connection.autocommit = True 149 | else: 150 | connection.autocommit = False 151 | if level_str not in ["READ COMMITTED", "READ VERIFIED"]: 152 | level_str = "READ UNCOMMITTED" 153 | with connection.cursor() as cursor: 154 | cursor.execute("SET TRANSACTION ISOLATION LEVEL " + level_str) 155 | 156 | """ 157 | @remap_exception 158 | def do_execute(self, cursor, query, params, context=None): 159 | if query.endswith(";"): 160 | query = query[:-1] 161 | self._debug(query, params) 162 | cursor.execute(query, params) 163 | 164 | @remap_exception 165 | def do_executemany(self, cursor, query, params, context=None): 166 | if query.endswith(";"): 167 | query = query[:-1] 168 | self._debug(query, params, many=True) 169 | if params and (len(params[0]) <= 1): 170 | params = [param[0] if len(param) else None for param in params] 171 | cursor.executemany(query, params) 172 | 173 | """ 174 | 175 | dialect = IRISDialect_intersystems 176 | -------------------------------------------------------------------------------- /sqlalchemy_iris/intersystems/dbapi.py: -------------------------------------------------------------------------------- 1 | try: 2 | import iris 3 | 4 | class Cursor(iris.irissdk.dbapiCursor): 5 | pass 6 | 7 | class DataRow(iris.irissdk.dbapiDataRow): 8 | pass 9 | 10 | except ImportError: 11 | pass 12 | 13 | 14 | def connect(*args, **kwargs): 15 | return iris.connect(*args, **kwargs) 16 | 17 | 18 | def createIRIS(*args, **kwargs): 19 | return iris.createIRIS(*args, **kwargs) 20 | 21 | 22 | # globals 23 | apilevel = "2.0" 24 | threadsafety = 0 25 | paramstyle = "qmark" 26 | 27 | Binary = bytes 28 | STRING = str 29 | BINARY = bytes 30 | NUMBER = float 31 | ROWID = str 32 | 33 | 34 | class Error(Exception): 35 | pass 36 | 37 | 38 | class Warning(Exception): 39 | pass 40 | 41 | 42 | class InterfaceError(Error): 43 | pass 44 | 45 | 46 | class DatabaseError(Error): 47 | pass 48 | 49 | 50 | class InternalError(DatabaseError): 51 | pass 52 | 53 | 54 | class OperationalError(DatabaseError): 55 | pass 56 | 57 | 58 | class ProgrammingError(DatabaseError): 59 | pass 60 | 61 | 62 | class IntegrityError(DatabaseError): 63 | pass 64 | 65 | 66 | class DataError(DatabaseError): 67 | pass 68 | 69 | 70 | class NotSupportedError(DatabaseError): 71 | pass 72 | -------------------------------------------------------------------------------- /sqlalchemy_iris/iris.py: -------------------------------------------------------------------------------- 1 | from .base import IRISDialect 2 | 3 | 4 | class IRISDialect_iris(IRISDialect): 5 | driver = "iris" 6 | 7 | supports_statement_cache = True 8 | 9 | @classmethod 10 | def import_dbapi(cls): 11 | import intersystems_iris.dbapi._DBAPI as dbapi 12 | return dbapi 13 | 14 | 15 | dialect = IRISDialect_iris 16 | -------------------------------------------------------------------------------- /sqlalchemy_iris/irisasync.py: -------------------------------------------------------------------------------- 1 | from .base import IRISDialect 2 | 3 | 4 | class IRISDialect_irisasync(IRISDialect): 5 | driver = "irisasync" 6 | 7 | is_async = True 8 | supports_statement_cache = True 9 | 10 | @classmethod 11 | def import_dbapi(cls): 12 | import intersystems_iris.dbapi._DBAPI as dbapi 13 | 14 | return dbapi 15 | 16 | 17 | dialect = IRISDialect_irisasync 18 | -------------------------------------------------------------------------------- /sqlalchemy_iris/provision.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.testing.provision import temp_table_keyword_args 2 | 3 | 4 | @temp_table_keyword_args.for_db("iris") 5 | def _iris_temp_table_keyword_args(cfg, eng): 6 | return {"prefixes": ["GLOBAL TEMPORARY"]} 7 | -------------------------------------------------------------------------------- /sqlalchemy_iris/requirements.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.testing.requirements import SuiteRequirements 2 | from sqlalchemy.testing.exclusions import against 3 | from sqlalchemy.testing.exclusions import only_on 4 | 5 | try: 6 | from alembic.testing.requirements import SuiteRequirements as AlembicRequirements 7 | except: # noqa 8 | from sqlalchemy.testing.requirements import Requirements as BaseRequirements 9 | 10 | class AlembicRequirements(BaseRequirements): 11 | pass 12 | 13 | 14 | from sqlalchemy.testing import exclusions 15 | 16 | 17 | class Requirements(SuiteRequirements, AlembicRequirements): 18 | 19 | @property 20 | def community_driver(self): 21 | return exclusions.only_if( 22 | lambda config: not config.db.dialect.driver == "intersystems", 23 | "Only on community driver" 24 | ) 25 | 26 | @property 27 | def intersystems_driver(self): 28 | return exclusions.only_if( 29 | lambda config: config.db.dialect.driver == "intersystems", 30 | "InterSystems official driver" 31 | ) 32 | 33 | @property 34 | def array_type(self): 35 | return exclusions.closed() 36 | 37 | @property 38 | def table_ddl_if_exists(self): 39 | return exclusions.open() 40 | 41 | @property 42 | def uuid_data_type(self): 43 | return exclusions.open() 44 | 45 | @property 46 | def check_constraints(self): 47 | """Target database must support check constraints.""" 48 | 49 | return exclusions.closed() 50 | 51 | @property 52 | def views(self): 53 | """Target database must support VIEWs.""" 54 | 55 | return exclusions.open() 56 | 57 | @property 58 | def supports_distinct_on(self): 59 | """If a backend supports the DISTINCT ON in a select""" 60 | return exclusions.open() 61 | 62 | @property 63 | def reflects_pk_names(self): 64 | return exclusions.open() 65 | 66 | @property 67 | def date_historic(self): 68 | """target dialect supports representation of Python 69 | datetime.datetime() objects with historic (pre 1970) values.""" 70 | 71 | return exclusions.open() 72 | 73 | @property 74 | def datetime_historic(self): 75 | """target dialect supports representation of Python 76 | datetime.datetime() objects with historic (pre 1970) values.""" 77 | 78 | return exclusions.open() 79 | 80 | @property 81 | def reflect_table_options(self): 82 | return exclusions.open() 83 | 84 | @property 85 | def comment_reflection(self): 86 | return exclusions.open() 87 | 88 | @property 89 | def insert_returning(self): 90 | return exclusions.skip_if( 91 | lambda config: not config.db.dialect.insert_returning, 92 | "driver doesn't support insert returning", 93 | ) 94 | 95 | @property 96 | def computed_columns(self): 97 | "Supports computed columns" 98 | return exclusions.open() 99 | 100 | @property 101 | def computed_columns_stored(self): 102 | "Supports computed columns with `persisted=True`" 103 | return exclusions.open() 104 | 105 | @property 106 | def computed_columns_virtual(self): 107 | "Supports computed columns with `persisted=False`" 108 | return exclusions.open() 109 | 110 | @property 111 | def computed_columns_default_persisted(self): 112 | """If the default persistence is virtual or stored when `persisted` 113 | is omitted""" 114 | return exclusions.open() 115 | 116 | @property 117 | def computed_columns_reflect_persisted(self): 118 | """If persistence information is returned by the reflection of 119 | computed columns""" 120 | return exclusions.open() 121 | 122 | @property 123 | def two_phase_transactions(self): 124 | """Target database must support two-phase transactions.""" 125 | 126 | return exclusions.closed() 127 | 128 | @property 129 | def binary_comparisons(self): 130 | """target database/driver can allow BLOB/BINARY fields to be compared 131 | against a bound parameter value. 132 | """ 133 | 134 | return exclusions.closed() 135 | 136 | @property 137 | def binary_literals(self): 138 | """target backend supports simple binary literals, e.g. an 139 | expression like:: 140 | 141 | SELECT CAST('foo' AS BINARY) 142 | 143 | Where ``BINARY`` is the type emitted from :class:`.LargeBinary`, 144 | e.g. it could be ``BLOB`` or similar. 145 | """ 146 | 147 | return exclusions.open() 148 | 149 | @property 150 | def foreign_key_constraint_option_reflection_ondelete(self): 151 | return exclusions.open() 152 | 153 | @property 154 | def fk_constraint_option_reflection_ondelete_restrict(self): 155 | return exclusions.closed() 156 | 157 | @property 158 | def fk_constraint_option_reflection_ondelete_noaction(self): 159 | return exclusions.open() 160 | 161 | @property 162 | def foreign_key_constraint_option_reflection_onupdate(self): 163 | return exclusions.open() 164 | 165 | @property 166 | def fk_constraint_option_reflection_onupdate_restrict(self): 167 | return exclusions.closed() 168 | 169 | @property 170 | def precision_numerics_many_significant_digits(self): 171 | """target backend supports values with many digits on both sides, 172 | such as 319438950232418390.273596, 87673.594069654243 173 | 174 | """ 175 | return exclusions.closed() 176 | 177 | @property 178 | def symbol_names_w_double_quote(self): 179 | """Target driver can create tables with a name like 'some " table'""" 180 | return exclusions.closed() 181 | 182 | @property 183 | def unique_constraint_reflection(self): 184 | return exclusions.open() 185 | 186 | @property 187 | def index_reflects_included_columns(self): 188 | return exclusions.open() 189 | 190 | @property 191 | def intersect(self): 192 | """Target database must support INTERSECT or equivalent.""" 193 | return exclusions.closed() 194 | 195 | @property 196 | def except_(self): 197 | """Target database must support EXCEPT or equivalent (i.e. MINUS).""" 198 | return exclusions.closed() 199 | 200 | @property 201 | def boolean_col_expressions(self): 202 | """Target database must support boolean expressions as columns""" 203 | 204 | return exclusions.closed() 205 | 206 | @property 207 | def memory_process_intensive(self): 208 | """Driver is able to handle the memory tests which run in a subprocess 209 | and iterate through hundreds of connections 210 | 211 | """ 212 | return exclusions.closed() 213 | 214 | @property 215 | def ctes(self): 216 | """Target database supports CTEs""" 217 | return exclusions.skip_if( 218 | lambda config: not config.db.dialect.supports_cte, 219 | "driver doesn't support CTEs", 220 | ) 221 | 222 | @property 223 | def ctes_with_update_delete(self): 224 | """target database supports CTES that ride on top of a normal UPDATE 225 | or DELETE statement which refers to the CTE in a correlated subquery. 226 | 227 | """ 228 | 229 | return exclusions.open() 230 | 231 | @property 232 | def ctes_on_dml(self): 233 | """target database supports CTES which consist of INSERT, UPDATE 234 | or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)""" 235 | 236 | return exclusions.open() 237 | 238 | @property 239 | def autocommit(self): 240 | """target dialect supports 'AUTOCOMMIT' as an isolation_level""" 241 | return exclusions.open() 242 | 243 | def get_isolation_levels(self, config): 244 | levels = set(config.db.dialect._isolation_lookup) 245 | 246 | default = "READ UNCOMMITTED" 247 | levels.add("AUTOCOMMIT") 248 | 249 | return {"default": default, "supported": levels} 250 | 251 | @property 252 | def regexp_match(self): 253 | """backend supports the regexp_match operator.""" 254 | # InterSystems use own format for %MATCHES and %PATTERN, it does not support Regular Expressions 255 | return exclusions.closed() 256 | 257 | @property 258 | def unique_constraints_reflect_as_index(self): 259 | """Target database reflects unique constraints as indexes.""" 260 | 261 | return exclusions.open() 262 | 263 | @property 264 | def temp_table_names(self): 265 | """target dialect supports listing of temporary table names""" 266 | return exclusions.open() 267 | 268 | @property 269 | def unique_index_reflect_as_unique_constraints(self): 270 | """Target database reflects unique indexes as unique constrains.""" 271 | 272 | return exclusions.open() 273 | 274 | # alembic 275 | 276 | @property 277 | def fk_onupdate_restrict(self): 278 | return exclusions.closed() 279 | 280 | @property 281 | def fk_ondelete_restrict(self): 282 | return exclusions.closed() 283 | 284 | def _iris_vector(self, config): 285 | if not against(config, "iris >= 2024.1"): 286 | return False 287 | else: 288 | return config.db.dialect.supports_vectors 289 | 290 | @property 291 | def iris_vector(self): 292 | return only_on(lambda config: self._iris_vector(config)) 293 | 294 | @property 295 | def index_ddl_if_exists(self): 296 | """target platform supports IF NOT EXISTS / IF EXISTS for indexes.""" 297 | 298 | return exclusions.closed() 299 | 300 | @property 301 | def foreign_keys(self): 302 | """Target database must support foreign keys.""" 303 | 304 | return exclusions.open() 305 | 306 | @property 307 | def foreign_keys_reflect_as_index(self): 308 | """Target database creates an index that's reflected for 309 | foreign keys.""" 310 | 311 | return exclusions.closed() 312 | 313 | @property 314 | def table_value_constructor(self): 315 | """Database / dialect supports a query like:: 316 | 317 | SELECT * FROM VALUES ( (c1, c2), (c1, c2), ...) 318 | AS some_table(col1, col2) 319 | 320 | SQLAlchemy generates this with the :func:`_sql.values` function. 321 | 322 | """ 323 | return exclusions.closed() 324 | 325 | @property 326 | def standard_cursor_sql(self): 327 | """Target database passes SQL-92 style statements to cursor.execute() 328 | when a statement like select() or insert() is run. 329 | 330 | A very small portion of dialect-level tests will ensure that certain 331 | conditions are present in SQL strings, and these tests use very basic 332 | SQL that will work on any SQL-like platform in order to assert results. 333 | 334 | It's normally a given for any pep-249 DBAPI that a statement like 335 | "SELECT id, name FROM table WHERE some_table.id=5" will work. 336 | However, there are dialects that don't actually produce SQL Strings 337 | and instead may work with symbolic objects instead, or dialects that 338 | aren't working with SQL, so for those this requirement can be marked 339 | as excluded. 340 | 341 | """ 342 | 343 | return exclusions.open() 344 | 345 | @property 346 | def on_update_cascade(self): 347 | """target database must support ON UPDATE..CASCADE behavior in 348 | foreign keys.""" 349 | 350 | return exclusions.open() 351 | 352 | @property 353 | def non_updating_cascade(self): 354 | """target database must *not* support ON UPDATE..CASCADE behavior in 355 | foreign keys.""" 356 | return exclusions.closed() 357 | 358 | @property 359 | def deferrable_fks(self): 360 | return exclusions.closed() 361 | 362 | @property 363 | def on_update_or_deferrable_fks(self): 364 | # TODO: exclusions should be composable, 365 | # somehow only_if([x, y]) isn't working here, negation/conjunctions 366 | # getting confused. 367 | return exclusions.only_if( 368 | lambda: self.on_update_cascade.enabled 369 | or self.deferrable_fks.enabled 370 | ) 371 | 372 | @property 373 | def self_referential_foreign_keys(self): 374 | """Target database must support self-referential foreign keys.""" 375 | 376 | return exclusions.open() 377 | 378 | @property 379 | def foreign_key_ddl(self): 380 | """Target database must support the DDL phrases for FOREIGN KEY.""" 381 | 382 | return exclusions.open() 383 | 384 | @property 385 | def named_constraints(self): 386 | """target database must support names for constraints.""" 387 | 388 | return exclusions.open() 389 | 390 | @property 391 | def implicitly_named_constraints(self): 392 | """target database must apply names to unnamed constraints.""" 393 | 394 | return exclusions.open() 395 | 396 | @property 397 | def unusual_column_name_characters(self): 398 | """target database allows column names that have unusual characters 399 | in them, such as dots, spaces, slashes, or percent signs. 400 | 401 | The column names are as always in such a case quoted, however the 402 | DB still needs to support those characters in the name somehow. 403 | 404 | """ 405 | return exclusions.open() 406 | 407 | @property 408 | def subqueries(self): 409 | """Target database must support subqueries.""" 410 | 411 | return exclusions.open() 412 | 413 | @property 414 | def offset(self): 415 | """target database can render OFFSET, or an equivalent, in a 416 | SELECT. 417 | """ 418 | 419 | return exclusions.open() 420 | 421 | @property 422 | def bound_limit_offset(self): 423 | """target database can render LIMIT and/or OFFSET using a bound 424 | parameter 425 | """ 426 | 427 | return exclusions.open() 428 | 429 | @property 430 | def sql_expression_limit_offset(self): 431 | """target database can render LIMIT and/or OFFSET with a complete 432 | SQL expression, such as one that uses the addition operator. 433 | parameter 434 | """ 435 | 436 | return exclusions.open() 437 | 438 | @property 439 | def parens_in_union_contained_select_w_limit_offset(self): 440 | """Target database must support parenthesized SELECT in UNION 441 | when LIMIT/OFFSET is specifically present. 442 | 443 | E.g. (SELECT ...) UNION (SELECT ..) 444 | """ 445 | return exclusions.open() 446 | 447 | @property 448 | def parens_in_union_contained_select_wo_limit_offset(self): 449 | """Target database must support parenthesized SELECT in UNION 450 | when OFFSET/LIMIT is specifically not present. 451 | 452 | E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..) 453 | """ 454 | return exclusions.open() 455 | 456 | @property 457 | def nullable_booleans(self): 458 | """Target database allows boolean columns to store NULL.""" 459 | 460 | return exclusions.open() 461 | 462 | @property 463 | def nullsordering(self): 464 | """Target backends that support nulls ordering.""" 465 | 466 | return exclusions.closed() 467 | 468 | @property 469 | def standalone_binds(self): 470 | """target database/driver supports bound parameters as column 471 | expressions without being in the context of a typed column. 472 | """ 473 | return exclusions.open() 474 | 475 | @property 476 | def standalone_null_binds_whereclause(self): 477 | """target database/driver supports bound parameters with NULL in the 478 | WHERE clause, in situations where it has to be typed. 479 | 480 | """ 481 | return exclusions.open() 482 | 483 | @property 484 | def window_functions(self): 485 | """Target database must support window functions.""" 486 | return exclusions.closed() 487 | 488 | @property 489 | def autoincrement_insert(self): 490 | """target platform generates new surrogate integer primary key values 491 | when insert() is executed, excluding the pk column.""" 492 | 493 | return exclusions.open() 494 | 495 | @property 496 | def fetch_rows_post_commit(self): 497 | """target platform will allow cursor.fetchone() to proceed after a 498 | COMMIT. 499 | 500 | Typically this refers to an INSERT statement with RETURNING which 501 | is invoked within "autocommit". If the row can be returned 502 | after the autocommit, then this rule can be open. 503 | 504 | """ 505 | 506 | return exclusions.open() 507 | 508 | @property 509 | def group_by_complex_expression(self): 510 | """target platform supports SQL expressions in GROUP BY 511 | 512 | e.g. 513 | 514 | SELECT x + y AS somelabel FROM table GROUP BY x + y 515 | 516 | """ 517 | 518 | return exclusions.open() 519 | 520 | @property 521 | def sane_rowcount(self): 522 | return exclusions.skip_if( 523 | lambda config: not config.db.dialect.supports_sane_rowcount, 524 | "driver doesn't support 'sane' rowcount", 525 | ) 526 | 527 | @property 528 | def sane_multi_rowcount(self): 529 | return exclusions.fails_if( 530 | lambda config: not config.db.dialect.supports_sane_multi_rowcount, 531 | "driver %(driver)s %(doesnt_support)s 'sane' multi row count", 532 | ) 533 | 534 | @property 535 | def sane_rowcount_w_returning(self): 536 | return exclusions.fails_if( 537 | lambda config: not ( 538 | config.db.dialect.supports_sane_rowcount_returning 539 | ), 540 | "driver doesn't support 'sane' rowcount when returning is on", 541 | ) 542 | 543 | @property 544 | def empty_inserts(self): 545 | """target platform supports INSERT with no values, i.e. 546 | INSERT DEFAULT VALUES or equivalent.""" 547 | 548 | return exclusions.only_if( 549 | lambda config: config.db.dialect.supports_empty_insert 550 | or config.db.dialect.supports_default_values 551 | or config.db.dialect.supports_default_metavalue, 552 | "empty inserts not supported", 553 | ) 554 | 555 | @property 556 | def empty_inserts_executemany(self): 557 | """target platform supports INSERT with no values, i.e. 558 | INSERT DEFAULT VALUES or equivalent, within executemany()""" 559 | 560 | return self.empty_inserts 561 | 562 | @property 563 | def insert_from_select(self): 564 | """target platform supports INSERT from a SELECT.""" 565 | 566 | return exclusions.open() 567 | 568 | @property 569 | def delete_returning(self): 570 | """target platform supports DELETE ... RETURNING.""" 571 | 572 | return exclusions.only_if( 573 | lambda config: config.db.dialect.delete_returning, 574 | "%(database)s %(does_support)s 'DELETE ... RETURNING'", 575 | ) 576 | 577 | @property 578 | def update_returning(self): 579 | """target platform supports UPDATE ... RETURNING.""" 580 | 581 | return exclusions.only_if( 582 | lambda config: config.db.dialect.update_returning, 583 | "%(database)s %(does_support)s 'UPDATE ... RETURNING'", 584 | ) 585 | 586 | @property 587 | def insert_executemany_returning(self): 588 | """target platform supports RETURNING when INSERT is used with 589 | executemany(), e.g. multiple parameter sets, indicating 590 | as many rows come back as do parameter sets were passed. 591 | 592 | """ 593 | 594 | return exclusions.only_if( 595 | lambda config: config.db.dialect.insert_executemany_returning, 596 | "%(database)s %(does_support)s 'RETURNING of " 597 | "multiple rows with INSERT executemany'", 598 | ) 599 | 600 | @property 601 | def insertmanyvalues(self): 602 | return exclusions.only_if( 603 | lambda config: config.db.dialect.supports_multivalues_insert 604 | and config.db.dialect.insert_returning 605 | and config.db.dialect.use_insertmanyvalues, 606 | "%(database)s %(does_support)s 'insertmanyvalues functionality", 607 | ) 608 | 609 | @property 610 | def tuple_in(self): 611 | """Target platform supports the syntax 612 | "(x, y) IN ((x1, y1), (x2, y2), ...)" 613 | """ 614 | 615 | return exclusions.closed() 616 | 617 | @property 618 | def tuple_in_w_empty(self): 619 | """Target platform tuple IN w/ empty set""" 620 | return self.tuple_in 621 | 622 | @property 623 | def duplicate_names_in_cursor_description(self): 624 | """target platform supports a SELECT statement that has 625 | the same name repeated more than once in the columns list.""" 626 | 627 | return exclusions.open() 628 | 629 | @property 630 | def denormalized_names(self): 631 | """Target database must have 'denormalized', i.e. 632 | UPPERCASE as case insensitive names.""" 633 | 634 | return exclusions.skip_if( 635 | lambda config: not config.db.dialect.requires_name_normalize, 636 | "Backend does not require denormalized names.", 637 | ) 638 | 639 | @property 640 | def multivalues_inserts(self): 641 | """target database must support multiple VALUES clauses in an 642 | INSERT statement.""" 643 | 644 | return exclusions.skip_if( 645 | lambda config: not config.db.dialect.supports_multivalues_insert, 646 | "Backend does not support multirow inserts.", 647 | ) 648 | 649 | @property 650 | def implements_get_lastrowid(self): 651 | """target dialect implements the executioncontext.get_lastrowid() 652 | method without reliance on RETURNING. 653 | 654 | """ 655 | return exclusions.open() 656 | 657 | @property 658 | def arraysize(self): 659 | """dialect includes the required pep-249 attribute 660 | ``cursor.arraysize``""" 661 | 662 | return exclusions.open() 663 | 664 | @property 665 | def emulated_lastrowid(self): 666 | """target dialect retrieves cursor.lastrowid, or fetches 667 | from a database-side function after an insert() construct executes, 668 | within the get_lastrowid() method. 669 | 670 | Only dialects that "pre-execute", or need RETURNING to get last 671 | inserted id, would return closed/fail/skip for this. 672 | 673 | """ 674 | return exclusions.closed() 675 | 676 | @property 677 | def emulated_lastrowid_even_with_sequences(self): 678 | """target dialect retrieves cursor.lastrowid or an equivalent 679 | after an insert() construct executes, even if the table has a 680 | Sequence on it. 681 | 682 | """ 683 | return exclusions.closed() 684 | 685 | @property 686 | def dbapi_lastrowid(self): 687 | """target platform includes a 'lastrowid' accessor on the DBAPI 688 | cursor object. 689 | 690 | """ 691 | return exclusions.closed() 692 | 693 | @property 694 | def schemas(self): 695 | """Target database must support external schemas, and have one 696 | named 'test_schema'.""" 697 | 698 | return only_on(lambda config: config.db.dialect.supports_schemas) 699 | 700 | @property 701 | def cross_schema_fk_reflection(self): 702 | """target system must support reflection of inter-schema 703 | foreign keys""" 704 | return exclusions.closed() 705 | 706 | @property 707 | def foreign_key_constraint_name_reflection(self): 708 | """Target supports reflection of FOREIGN KEY constraints and 709 | will return the name of the constraint that was used in the 710 | "CONSTRAINT FOREIGN KEY" DDL. 711 | """ 712 | return exclusions.closed() 713 | 714 | @property 715 | def implicit_default_schema(self): 716 | """target system has a strong concept of 'default' schema that can 717 | be referred to implicitly. 718 | """ 719 | return exclusions.closed() 720 | 721 | @property 722 | def default_schema_name_switch(self): 723 | """target dialect implements provisioning module including 724 | set_default_schema_on_connection""" 725 | 726 | return exclusions.closed() 727 | 728 | @property 729 | def server_side_cursors(self): 730 | """Target dialect must support server side cursors.""" 731 | 732 | return exclusions.only_if( 733 | [lambda config: config.db.dialect.supports_server_side_cursors], 734 | "no server side cursors support", 735 | ) 736 | 737 | @property 738 | def sequences(self): 739 | """Target database must support SEQUENCEs.""" 740 | 741 | return exclusions.only_if( 742 | [lambda config: config.db.dialect.supports_sequences], 743 | "no sequence support", 744 | ) 745 | 746 | @property 747 | def no_sequences(self): 748 | """the opposite of "sequences", DB does not support sequences at 749 | all.""" 750 | 751 | return exclusions.NotPredicate(self.sequences) 752 | 753 | @property 754 | def sequences_optional(self): 755 | """Target database supports sequences, but also optionally 756 | as a means of generating new PK values.""" 757 | 758 | return exclusions.only_if( 759 | [ 760 | lambda config: config.db.dialect.supports_sequences 761 | and config.db.dialect.sequences_optional 762 | ], 763 | "no sequence support, or sequences not optional", 764 | ) 765 | 766 | @property 767 | def supports_lastrowid(self): 768 | """target database / driver supports cursor.lastrowid as a means 769 | of retrieving the last inserted primary key value. 770 | 771 | note that if the target DB supports sequences also, this is still 772 | assumed to work. This is a new use case brought on by MariaDB 10.3. 773 | 774 | """ 775 | return exclusions.only_if( 776 | [lambda config: config.db.dialect.postfetch_lastrowid] 777 | ) 778 | 779 | @property 780 | def no_lastrowid_support(self): 781 | """the opposite of supports_lastrowid""" 782 | return exclusions.only_if( 783 | [lambda config: not config.db.dialect.postfetch_lastrowid] 784 | ) 785 | 786 | @property 787 | def table_reflection(self): 788 | """target database has general support for table reflection""" 789 | return exclusions.open() 790 | 791 | @property 792 | def reflect_tables_no_columns(self): 793 | """target database supports creation and reflection of tables with no 794 | columns, or at least tables that seem to have no columns.""" 795 | 796 | return exclusions.closed() 797 | 798 | @property 799 | def comment_reflection_full_unicode(self): 800 | """Indicates if the database support table comment reflection in the 801 | full unicode range, including emoji etc. 802 | """ 803 | return exclusions.closed() 804 | 805 | @property 806 | def constraint_comment_reflection(self): 807 | """indicates if the database support comments on constraints 808 | and their reflection""" 809 | return exclusions.closed() 810 | 811 | @property 812 | def view_column_reflection(self): 813 | """target database must support retrieval of the columns in a view, 814 | similarly to how a table is inspected. 815 | 816 | This does not include the full CREATE VIEW definition. 817 | 818 | """ 819 | return self.views 820 | 821 | @property 822 | def view_reflection(self): 823 | """target database must support inspection of the full CREATE VIEW 824 | definition.""" 825 | return self.views 826 | 827 | @property 828 | def schema_reflection(self): 829 | return self.schemas 830 | 831 | @property 832 | def schema_create_delete(self): 833 | """target database supports schema create and dropped with 834 | 'CREATE SCHEMA' and 'DROP SCHEMA'""" 835 | return exclusions.closed() 836 | 837 | @property 838 | def primary_key_constraint_reflection(self): 839 | return exclusions.open() 840 | 841 | @property 842 | def foreign_key_constraint_reflection(self): 843 | return exclusions.open() 844 | 845 | @property 846 | def temp_table_reflection(self): 847 | return exclusions.open() 848 | 849 | @property 850 | def temp_table_reflect_indexes(self): 851 | return self.temp_table_reflection 852 | 853 | @property 854 | def has_temp_table(self): 855 | """target dialect supports checking a single temp table name""" 856 | return exclusions.closed() 857 | 858 | @property 859 | def temporary_tables(self): 860 | """target database supports temporary tables""" 861 | return exclusions.open() 862 | 863 | @property 864 | def temporary_views(self): 865 | """target database supports temporary views""" 866 | return exclusions.closed() 867 | 868 | @property 869 | def index_reflection(self): 870 | return exclusions.open() 871 | 872 | @property 873 | def indexes_with_ascdesc(self): 874 | """target database supports CREATE INDEX with per-column ASC/DESC.""" 875 | return exclusions.open() 876 | 877 | @property 878 | def reflect_indexes_with_ascdesc(self): 879 | """target database supports reflecting INDEX with per-column 880 | ASC/DESC.""" 881 | return exclusions.open() 882 | 883 | @property 884 | def reflect_indexes_with_ascdesc_as_expression(self): 885 | """target database supports reflecting INDEX with per-column 886 | ASC/DESC but reflects them as expressions.""" 887 | return exclusions.closed() 888 | 889 | @property 890 | def indexes_with_expressions(self): 891 | """target database supports CREATE INDEX against SQL expressions.""" 892 | return exclusions.closed() 893 | 894 | @property 895 | def reflect_indexes_with_expressions(self): 896 | """target database supports reflection of indexes with 897 | SQL expressions.""" 898 | return exclusions.closed() 899 | 900 | @property 901 | def inline_check_constraint_reflection(self): 902 | """target dialect supports reflection of inline check constraints""" 903 | return exclusions.closed() 904 | 905 | @property 906 | def check_constraint_reflection(self): 907 | """target dialect supports reflection of check constraints""" 908 | return exclusions.closed() 909 | 910 | @property 911 | def duplicate_key_raises_integrity_error(self): 912 | """target dialect raises IntegrityError when reporting an INSERT 913 | with a primary key violation. (hint: it should) 914 | 915 | """ 916 | return exclusions.open() 917 | 918 | @property 919 | def unbounded_varchar(self): 920 | """Target database must support VARCHAR with no length""" 921 | 922 | return exclusions.open() 923 | 924 | @property 925 | def unicode_data_no_special_types(self): 926 | """Target database/dialect can receive / deliver / compare data with 927 | non-ASCII characters in plain VARCHAR, TEXT columns, without the need 928 | for special "national" datatypes like NVARCHAR or similar. 929 | 930 | """ 931 | return exclusions.open() 932 | 933 | @property 934 | def unicode_data(self): 935 | """Target database/dialect must support Python unicode objects with 936 | non-ASCII characters represented, delivered as bound parameters 937 | as well as in result rows. 938 | 939 | """ 940 | return exclusions.open() 941 | 942 | @property 943 | def unicode_ddl(self): 944 | """Target driver must support some degree of non-ascii symbol 945 | names. 946 | """ 947 | return exclusions.open() 948 | 949 | @property 950 | def datetime_interval(self): 951 | """target dialect supports rendering of a datetime.timedelta as a 952 | literal string, e.g. via the TypeEngine.literal_processor() method. 953 | 954 | """ 955 | return exclusions.closed() 956 | 957 | @property 958 | def datetime_literals(self): 959 | """target dialect supports rendering of a date, time, or datetime as a 960 | literal string, e.g. via the TypeEngine.literal_processor() method. 961 | 962 | """ 963 | # works stable only on Community driver 964 | return self.community_driver 965 | 966 | @property 967 | def datetime(self): 968 | """target dialect supports representation of Python 969 | datetime.datetime() objects.""" 970 | 971 | return exclusions.open() 972 | 973 | @property 974 | def datetime_timezone(self): 975 | """target dialect supports representation of Python 976 | datetime.datetime() with tzinfo with DateTime(timezone=True).""" 977 | 978 | return exclusions.closed() 979 | 980 | @property 981 | def time_timezone(self): 982 | """target dialect supports representation of Python 983 | datetime.time() with tzinfo with Time(timezone=True).""" 984 | 985 | return exclusions.closed() 986 | 987 | @property 988 | def date_implicit_bound(self): 989 | """target dialect when given a date object will bind it such 990 | that the database server knows the object is a date, and not 991 | a plain string. 992 | 993 | """ 994 | return exclusions.open() 995 | 996 | @property 997 | def time_implicit_bound(self): 998 | """target dialect when given a time object will bind it such 999 | that the database server knows the object is a time, and not 1000 | a plain string. 1001 | 1002 | """ 1003 | return exclusions.open() 1004 | 1005 | @property 1006 | def datetime_implicit_bound(self): 1007 | """target dialect when given a datetime object will bind it such 1008 | that the database server knows the object is a datetime, and not 1009 | a plain string. 1010 | 1011 | """ 1012 | return exclusions.open() 1013 | 1014 | @property 1015 | def datetime_microseconds(self): 1016 | """target dialect supports representation of Python 1017 | datetime.datetime() with microsecond objects.""" 1018 | 1019 | return exclusions.open() 1020 | 1021 | @property 1022 | def timestamp_microseconds(self): 1023 | """target dialect supports representation of Python 1024 | datetime.datetime() with microsecond objects but only 1025 | if TIMESTAMP is used.""" 1026 | return exclusions.closed() 1027 | 1028 | @property 1029 | def timestamp_microseconds_implicit_bound(self): 1030 | """target dialect when given a datetime object which also includes 1031 | a microseconds portion when using the TIMESTAMP data type 1032 | will bind it such that the database server knows 1033 | the object is a datetime with microseconds, and not a plain string. 1034 | 1035 | """ 1036 | return self.timestamp_microseconds 1037 | 1038 | @property 1039 | def date(self): 1040 | """target dialect supports representation of Python 1041 | datetime.date() objects.""" 1042 | 1043 | return exclusions.open() 1044 | 1045 | @property 1046 | def date_coerces_from_datetime(self): 1047 | """target dialect accepts a datetime object as the target 1048 | of a date column.""" 1049 | 1050 | return exclusions.open() 1051 | 1052 | @property 1053 | def time(self): 1054 | """target dialect supports representation of Python 1055 | datetime.time() objects.""" 1056 | 1057 | return exclusions.open() 1058 | 1059 | @property 1060 | def time_microseconds(self): 1061 | """target dialect supports representation of Python 1062 | datetime.time() with microsecond objects.""" 1063 | 1064 | return exclusions.open() 1065 | 1066 | @property 1067 | def isolation_level(self): 1068 | """target dialect supports general isolation level settings. 1069 | 1070 | Note that this requirement, when enabled, also requires that 1071 | the get_isolation_levels() method be implemented. 1072 | 1073 | """ 1074 | return exclusions.open() 1075 | 1076 | @property 1077 | def get_isolation_level_values(self): 1078 | """target dialect supports the 1079 | :meth:`_engine.Dialect.get_isolation_level_values` 1080 | method added in SQLAlchemy 2.0. 1081 | 1082 | """ 1083 | 1084 | def go(config): 1085 | with config.db.connect() as conn: 1086 | try: 1087 | conn.dialect.get_isolation_level_values( 1088 | conn.connection.dbapi_connection 1089 | ) 1090 | except NotImplementedError: 1091 | return False 1092 | else: 1093 | return True 1094 | 1095 | return exclusions.only_if(go) 1096 | 1097 | @property 1098 | def json_type(self): 1099 | """target platform implements a native JSON type.""" 1100 | 1101 | return exclusions.closed() 1102 | 1103 | @property 1104 | def json_array_indexes(self): 1105 | """target platform supports numeric array indexes 1106 | within a JSON structure""" 1107 | 1108 | return self.json_type 1109 | 1110 | @property 1111 | def json_index_supplementary_unicode_element(self): 1112 | return exclusions.open() 1113 | 1114 | @property 1115 | def legacy_unconditional_json_extract(self): 1116 | """Backend has a JSON_EXTRACT or similar function that returns a 1117 | valid JSON string in all cases. 1118 | 1119 | Used to test a legacy feature and is not needed. 1120 | 1121 | """ 1122 | return exclusions.closed() 1123 | 1124 | @property 1125 | def precision_numerics_general(self): 1126 | """target backend has general support for moderately high-precision 1127 | numerics.""" 1128 | return exclusions.open() 1129 | 1130 | @property 1131 | def precision_numerics_enotation_small(self): 1132 | """target backend supports Decimal() objects using E notation 1133 | to represent very small values.""" 1134 | return exclusions.closed() 1135 | 1136 | @property 1137 | def precision_numerics_enotation_large(self): 1138 | """target backend supports Decimal() objects using E notation 1139 | to represent very large values.""" 1140 | return exclusions.open() 1141 | 1142 | @property 1143 | def cast_precision_numerics_many_significant_digits(self): 1144 | """same as precision_numerics_many_significant_digits but within the 1145 | context of a CAST statement 1146 | 1147 | """ 1148 | return self.precision_numerics_many_significant_digits 1149 | 1150 | @property 1151 | def implicit_decimal_binds(self): 1152 | """target backend will return a selected Decimal as a Decimal, not 1153 | a string. 1154 | """ 1155 | 1156 | return exclusions.open() 1157 | 1158 | @property 1159 | def numeric_received_as_decimal_untyped(self): 1160 | """target backend will return result columns that are explicitly 1161 | against NUMERIC or similar precision-numeric datatypes (not including 1162 | FLOAT or INT types) as Python Decimal objects, and not as floats 1163 | or ints, including when no SQLAlchemy-side typing information is 1164 | associated with the statement (e.g. such as a raw SQL string). 1165 | 1166 | This should be enabled if either the DBAPI itself returns Decimal 1167 | objects, or if the dialect has set up DBAPI-specific return type 1168 | handlers such that Decimal objects come back automatically. 1169 | 1170 | """ 1171 | return exclusions.open() 1172 | 1173 | @property 1174 | def nested_aggregates(self): 1175 | """target database can select an aggregate from a subquery that's 1176 | also using an aggregate 1177 | 1178 | """ 1179 | return exclusions.open() 1180 | 1181 | @property 1182 | def recursive_fk_cascade(self): 1183 | """target database must support ON DELETE CASCADE on a self-referential 1184 | foreign key 1185 | 1186 | """ 1187 | return exclusions.open() 1188 | 1189 | @property 1190 | def precision_numerics_retains_significant_digits(self): 1191 | """A precision numeric type will return empty significant digits, 1192 | i.e. a value such as 10.000 will come back in Decimal form with 1193 | the .000 maintained.""" 1194 | 1195 | return exclusions.closed() 1196 | 1197 | @property 1198 | def infinity_floats(self): 1199 | """The Float type can persist and load float('inf'), float('-inf').""" 1200 | 1201 | return exclusions.closed() 1202 | 1203 | @property 1204 | def float_or_double_precision_behaves_generically(self): 1205 | return exclusions.closed() 1206 | 1207 | @property 1208 | def precision_generic_float_type(self): 1209 | """target backend will return native floating point numbers with at 1210 | least seven decimal places when using the generic Float type. 1211 | 1212 | """ 1213 | return exclusions.open() 1214 | 1215 | @property 1216 | def literal_float_coercion(self): 1217 | """target backend will return the exact float value 15.7563 1218 | with only four significant digits from this statement: 1219 | 1220 | SELECT :param 1221 | 1222 | where :param is the Python float 15.7563 1223 | 1224 | i.e. it does not return 15.75629997253418 1225 | 1226 | """ 1227 | return exclusions.open() 1228 | 1229 | @property 1230 | def floats_to_four_decimals(self): 1231 | """target backend can return a floating-point number with four 1232 | significant digits (such as 15.7563) accurately 1233 | (i.e. without FP inaccuracies, such as 15.75629997253418). 1234 | 1235 | """ 1236 | return exclusions.open() 1237 | 1238 | @property 1239 | def fetch_null_from_numeric(self): 1240 | """target backend doesn't crash when you try to select a NUMERIC 1241 | value that has a value of NULL. 1242 | 1243 | Added to support Pyodbc bug #351. 1244 | """ 1245 | 1246 | return exclusions.open() 1247 | 1248 | @property 1249 | def float_is_numeric(self): 1250 | """target backend uses Numeric for Float/Dual""" 1251 | 1252 | return exclusions.open() 1253 | 1254 | @property 1255 | def text_type(self): 1256 | """Target database must support an unbounded Text() " 1257 | "type such as TEXT or CLOB""" 1258 | 1259 | return exclusions.open() 1260 | 1261 | @property 1262 | def empty_strings_varchar(self): 1263 | """target database can persist/return an empty string with a 1264 | varchar. 1265 | 1266 | """ 1267 | return exclusions.open() 1268 | 1269 | @property 1270 | def empty_strings_text(self): 1271 | """target database can persist/return an empty string with an 1272 | unbounded text.""" 1273 | 1274 | return exclusions.open() 1275 | 1276 | @property 1277 | def expressions_against_unbounded_text(self): 1278 | """target database supports use of an unbounded textual field in a 1279 | WHERE clause.""" 1280 | 1281 | return exclusions.open() 1282 | 1283 | @property 1284 | def selectone(self): 1285 | """target driver must support the literal statement 'select 1'""" 1286 | return exclusions.open() 1287 | 1288 | @property 1289 | def savepoints(self): 1290 | """Target database must support savepoints.""" 1291 | 1292 | return exclusions.closed() 1293 | 1294 | @property 1295 | def update_from(self): 1296 | """Target must support UPDATE..FROM syntax""" 1297 | return exclusions.closed() 1298 | 1299 | @property 1300 | def delete_from(self): 1301 | """Target must support DELETE FROM..FROM or DELETE..USING syntax""" 1302 | return exclusions.closed() 1303 | 1304 | @property 1305 | def update_where_target_in_subquery(self): 1306 | """Target must support UPDATE (or DELETE) where the same table is 1307 | present in a subquery in the WHERE clause. 1308 | """ 1309 | return exclusions.open() 1310 | 1311 | @property 1312 | def mod_operator_as_percent_sign(self): 1313 | """target database must use a plain percent '%' as the 'modulus' 1314 | operator.""" 1315 | return exclusions.closed() 1316 | 1317 | @property 1318 | def percent_schema_names(self): 1319 | """target backend supports weird identifiers with percent signs 1320 | in them, e.g. 'some % column'. 1321 | 1322 | this is a very weird use case but often has problems because of 1323 | DBAPIs that use python formatting. It's not a critical use 1324 | case either. 1325 | 1326 | """ 1327 | return exclusions.closed() 1328 | 1329 | @property 1330 | def order_by_col_from_union(self): 1331 | """target database supports ordering by a column from a SELECT 1332 | inside of a UNION 1333 | 1334 | E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id 1335 | 1336 | """ 1337 | return exclusions.open() 1338 | 1339 | @property 1340 | def order_by_collation(self): 1341 | def check(config): 1342 | try: 1343 | self.get_order_by_collation(config) 1344 | return False 1345 | except NotImplementedError: 1346 | return True 1347 | 1348 | return exclusions.skip_if(check) 1349 | 1350 | def get_order_by_collation(self, config): 1351 | raise NotImplementedError() 1352 | 1353 | @property 1354 | def unicode_connections(self): 1355 | """Target driver must support non-ASCII characters being passed at 1356 | all. 1357 | """ 1358 | return exclusions.open() 1359 | 1360 | @property 1361 | def graceful_disconnects(self): 1362 | """Target driver must raise a DBAPI-level exception, such as 1363 | InterfaceError, when the underlying connection has been closed 1364 | and the execute() method is called. 1365 | """ 1366 | return exclusions.open() 1367 | 1368 | @property 1369 | def independent_connections(self): 1370 | """ 1371 | Target must support simultaneous, independent database connections. 1372 | """ 1373 | return exclusions.open() 1374 | 1375 | @property 1376 | def independent_readonly_connections(self): 1377 | """ 1378 | Target must support simultaneous, independent database connections 1379 | that will be used in a readonly fashion. 1380 | 1381 | """ 1382 | return exclusions.open() 1383 | 1384 | @property 1385 | def async_dialect(self): 1386 | """dialect makes use of await_() to invoke operations on the DBAPI.""" 1387 | 1388 | return exclusions.closed() 1389 | 1390 | @property 1391 | def supports_is_distinct_from(self): 1392 | """Supports some form of "x IS [NOT] DISTINCT FROM y" construct. 1393 | Different dialects will implement their own flavour 1394 | 1395 | .. seealso:: 1396 | 1397 | :meth:`.ColumnOperators.is_distinct_from` 1398 | 1399 | """ 1400 | return exclusions.skip_if( 1401 | lambda config: not config.db.dialect.supports_is_distinct_from, 1402 | "driver doesn't support an IS DISTINCT FROM construct", 1403 | ) 1404 | 1405 | @property 1406 | def identity_columns(self): 1407 | """If a backend supports GENERATED { ALWAYS | BY DEFAULT } 1408 | AS IDENTITY""" 1409 | return exclusions.closed() 1410 | 1411 | @property 1412 | def identity_columns_standard(self): 1413 | """If a backend supports GENERATED { ALWAYS | BY DEFAULT } 1414 | AS IDENTITY with a standard syntax. 1415 | """ 1416 | return exclusions.closed() 1417 | 1418 | @property 1419 | def regexp_replace(self): 1420 | """backend supports the regexp_replace operator.""" 1421 | return exclusions.closed() 1422 | 1423 | @property 1424 | def fetch_first(self): 1425 | """backend supports the fetch first clause.""" 1426 | return exclusions.open() 1427 | 1428 | @property 1429 | def fetch_percent(self): 1430 | """backend supports the fetch first clause with percent.""" 1431 | return exclusions.closed() 1432 | 1433 | @property 1434 | def fetch_ties(self): 1435 | """backend supports the fetch first clause with ties.""" 1436 | return exclusions.closed() 1437 | 1438 | @property 1439 | def fetch_no_order_by(self): 1440 | """backend supports the fetch first without order by""" 1441 | return exclusions.open() 1442 | 1443 | @property 1444 | def fetch_offset_with_options(self): 1445 | """backend supports the offset when using fetch first with percent 1446 | or ties. 1447 | """ 1448 | return exclusions.open() 1449 | 1450 | @property 1451 | def fetch_expression(self): 1452 | """backend supports fetch / offset with expression in them, like 1453 | 1454 | SELECT * FROM some_table 1455 | OFFSET 1 + 1 ROWS FETCH FIRST 1 + 1 ROWS ONLY 1456 | """ 1457 | return exclusions.open() 1458 | 1459 | @property 1460 | def autoincrement_without_sequence(self): 1461 | """If autoincrement=True on a column does not require an explicit 1462 | sequence. 1463 | """ 1464 | return exclusions.open() 1465 | 1466 | @property 1467 | def generic_classes(self): 1468 | "If X[Y] can be implemented with ``__class_getitem__``. py3.7+" 1469 | return exclusions.open() 1470 | 1471 | @property 1472 | def json_deserializer_binary(self): 1473 | "indicates if the json_deserializer function is called with bytes" 1474 | return exclusions.closed() 1475 | 1476 | @property 1477 | def materialized_views(self): 1478 | """Target database must support MATERIALIZED VIEWs.""" 1479 | return exclusions.closed() 1480 | 1481 | @property 1482 | def materialized_views_reflect_pk(self): 1483 | """Target database reflect MATERIALIZED VIEWs pks.""" 1484 | return exclusions.closed() 1485 | 1486 | @property 1487 | def supports_bitwise_or(self): 1488 | """Target database supports bitwise or""" 1489 | return exclusions.closed() 1490 | 1491 | @property 1492 | def supports_bitwise_and(self): 1493 | """Target database supports bitwise and""" 1494 | return exclusions.closed() 1495 | 1496 | @property 1497 | def supports_bitwise_not(self): 1498 | """Target database supports bitwise not""" 1499 | return exclusions.closed() 1500 | 1501 | @property 1502 | def supports_bitwise_xor(self): 1503 | """Target database supports bitwise xor""" 1504 | return exclusions.closed() 1505 | 1506 | @property 1507 | def supports_bitwise_shift(self): 1508 | """Target database supports bitwise left or right shift""" 1509 | return exclusions.closed() 1510 | -------------------------------------------------------------------------------- /sqlalchemy_iris/types.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from decimal import Decimal 3 | from sqlalchemy import func, text 4 | from sqlalchemy.sql import sqltypes 5 | from sqlalchemy.types import UserDefinedType 6 | from uuid import UUID as _python_UUID 7 | from intersystems_iris import IRISList 8 | from sqlalchemy import __version__ as sqlalchemy_version 9 | 10 | HOROLOG_ORDINAL = datetime.date(1840, 12, 31).toordinal() 11 | 12 | 13 | class IRISBoolean(sqltypes.Boolean): 14 | def _should_create_constraint(self, compiler, **kw): 15 | return False 16 | 17 | def bind_processor(self, dialect): 18 | def process(value): 19 | if isinstance(value, int): 20 | return 1 if value > 0 else 0 21 | elif isinstance(value, bool): 22 | return 1 if value is True else 0 23 | return None 24 | 25 | return process 26 | 27 | def result_processor(self, dialect, coltype): 28 | def process(value): 29 | if isinstance(value, int): 30 | return value > 0 31 | return value 32 | 33 | return process 34 | 35 | 36 | class IRISDate(sqltypes.Date): 37 | def bind_processor(self, dialect): 38 | def process(value): 39 | if value is None: 40 | return None 41 | horolog = value.toordinal() - HOROLOG_ORDINAL 42 | return str(horolog) 43 | 44 | return process 45 | 46 | def result_processor(self, dialect, coltype): 47 | def process(value): 48 | if value is None: 49 | return None 50 | if isinstance(value, datetime.date): 51 | return value 52 | if isinstance(value, str) and "-" in value[1:]: 53 | return datetime.datetime.strptime(value, "%Y-%m-%d").date() 54 | horolog = int(value) + HOROLOG_ORDINAL 55 | return datetime.date.fromordinal(horolog) 56 | 57 | return process 58 | 59 | def literal_processor(self, dialect): 60 | def process(value): 61 | if isinstance(value, datetime.date): 62 | return "'%s'" % value.strftime("%Y-%m-%d") 63 | return value 64 | 65 | return process 66 | 67 | 68 | class IRISTimeStamp(sqltypes.DateTime): 69 | __visit_name__ = "TIMESTAMP" 70 | 71 | def bind_processor(self, dialect): 72 | def process(value: datetime.datetime): 73 | if value is not None: 74 | # value = int(value.timestamp() * 1000000) 75 | # value += (2 ** 60) if value > 0 else -(2 ** 61 * 3) 76 | return value.strftime("%Y-%m-%d %H:%M:%S.%f") 77 | return value 78 | 79 | return process 80 | 81 | def result_processor(self, dialect, coltype): 82 | def process(value): 83 | if isinstance(value, str): 84 | if "." not in value: 85 | value += ".0" 86 | return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S.%f") 87 | if isinstance(value, int): 88 | value -= (2**60) if value > 0 else -(2**61 * 3) 89 | value = value / 1000000 90 | value = datetime.datetime.utcfromtimestamp(value) 91 | return value 92 | 93 | return process 94 | 95 | def literal_processor(self, dialect): 96 | def process(value): 97 | if isinstance(value, datetime.datetime): 98 | return "'%s'" % value.strftime("%Y-%m-%d %H:%M:%S.%f") 99 | return value 100 | 101 | return process 102 | 103 | 104 | class IRISDateTime(sqltypes.DateTime): 105 | __visit_name__ = "DATETIME" 106 | 107 | def bind_processor(self, dialect): 108 | def process(value): 109 | if value is not None: 110 | return value.strftime("%Y-%m-%d %H:%M:%S.%f") 111 | return value 112 | 113 | return process 114 | 115 | def result_processor(self, dialect, coltype): 116 | def process(value): 117 | if isinstance(value, datetime.datetime): 118 | return value 119 | if isinstance(value, str): 120 | if "." not in value: 121 | value += ".0" 122 | return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S.%f") 123 | return value 124 | 125 | return process 126 | 127 | def literal_processor(self, dialect): 128 | def process(value): 129 | if isinstance(value, datetime.datetime): 130 | return "'%s'" % value.strftime("%Y-%m-%d %H:%M:%S.%f") 131 | return value 132 | 133 | return process 134 | 135 | 136 | class IRISTime(sqltypes.DateTime): 137 | __visit_name__ = "TIME" 138 | 139 | def bind_processor(self, dialect): 140 | def process(value): 141 | if value is not None: 142 | return value.strftime("%H:%M:%S.%f") 143 | return value 144 | 145 | return process 146 | 147 | def result_processor(self, dialect, coltype): 148 | def process(value): 149 | if isinstance(value, datetime.time): 150 | return value 151 | if isinstance(value, str): 152 | if "." not in value: 153 | value += ".0" 154 | return datetime.datetime.strptime(value, "%H:%M:%S.%f").time() 155 | if isinstance(value, int) or isinstance(value, Decimal): 156 | horolog = value 157 | hour = int(horolog // 3600) 158 | horolog -= int(hour * 3600) 159 | minute = int(horolog // 60) 160 | second = int(horolog % 60) 161 | micro = int(value % 1 * 1000000) 162 | return datetime.time(hour, minute, second, micro) 163 | return value 164 | 165 | return process 166 | 167 | def literal_processor(self, dialect): 168 | def process(value): 169 | if isinstance(value, datetime.time): 170 | return "'%s'" % value.strftime("%H:%M:%S.%f") 171 | return value 172 | 173 | return process 174 | 175 | 176 | if sqlalchemy_version.startswith("2."): 177 | 178 | class IRISUniqueIdentifier(sqltypes.Uuid): 179 | def literal_processor(self, dialect): 180 | if not self.as_uuid: 181 | 182 | def process(value): 183 | return f"""'{value.replace("'", "''")}'""" 184 | 185 | return process 186 | else: 187 | 188 | def process(value): 189 | return f"""'{str(value).replace("'", "''")}'""" 190 | 191 | return process 192 | 193 | def bind_processor(self, dialect): 194 | character_based_uuid = ( 195 | not dialect.supports_native_uuid or not self.native_uuid 196 | ) 197 | 198 | if character_based_uuid: 199 | if self.as_uuid: 200 | 201 | def process(value): 202 | if value is not None: 203 | value = str(value) 204 | return value 205 | 206 | return process 207 | else: 208 | 209 | def process(value): 210 | return value 211 | 212 | return process 213 | else: 214 | return None 215 | 216 | def result_processor(self, dialect, coltype): 217 | character_based_uuid = ( 218 | not dialect.supports_native_uuid or not self.native_uuid 219 | ) 220 | 221 | if character_based_uuid: 222 | if self.as_uuid: 223 | 224 | def process(value): 225 | if value and not isinstance(value, _python_UUID): 226 | value = _python_UUID(value) 227 | return value 228 | 229 | return process 230 | else: 231 | 232 | def process(value): 233 | if value and isinstance(value, _python_UUID): 234 | value = str(value) 235 | return value 236 | 237 | return process 238 | else: 239 | if not self.as_uuid: 240 | 241 | def process(value): 242 | if value and isinstance(value, _python_UUID): 243 | value = str(value) 244 | return value 245 | 246 | return process 247 | else: 248 | return None 249 | 250 | 251 | class IRISListBuild(UserDefinedType): 252 | cache_ok = True 253 | 254 | def __init__(self, max_items: int = None, item_type: type = float): 255 | super(UserDefinedType, self).__init__() 256 | self.max_items = max_items 257 | max_length = None 258 | if type is float or type is int: 259 | max_length = max_items * 10 260 | elif max_items: 261 | max_length = 65535 262 | self.max_length = max_length 263 | 264 | def get_col_spec(self, **kw): 265 | if self.max_length is None: 266 | return "VARBINARY(65535)" 267 | return "VARBINARY(%d)" % self.max_length 268 | 269 | def bind_processor(self, dialect): 270 | def process(value): 271 | irislist = IRISList() 272 | if not value: 273 | return value 274 | if not isinstance(value, list) and not isinstance(value, tuple): 275 | raise ValueError("expected list or tuple, got '%s'" % type(value)) 276 | for item in value: 277 | irislist.add(item) 278 | return irislist.getBuffer() 279 | 280 | return process 281 | 282 | def result_processor(self, dialect, coltype): 283 | def process(value): 284 | if value: 285 | irislist = IRISList(value) 286 | return irislist._list_data 287 | return value 288 | 289 | return process 290 | 291 | class comparator_factory(UserDefinedType.Comparator): 292 | def func(self, funcname: str, other): 293 | if not isinstance(other, list) and not isinstance(other, tuple): 294 | raise ValueError("expected list or tuple, got '%s'" % type(other)) 295 | irislist = IRISList() 296 | for item in other: 297 | irislist.add(item) 298 | return getattr(func, funcname)(self, irislist.getBuffer()) 299 | 300 | 301 | class IRISVector(UserDefinedType): 302 | cache_ok = True 303 | 304 | def __init__(self, max_items: int = None, item_type: type = float): 305 | super(UserDefinedType, self).__init__() 306 | if item_type not in [float, int, Decimal]: 307 | raise TypeError( 308 | f"IRISVector expected int, float or Decimal; got {type.__name__}; expected: int, float, Decimal" 309 | ) 310 | self.max_items = max_items 311 | self.item_type = item_type 312 | item_type_server = ( 313 | "decimal" 314 | if self.item_type is float 315 | else "float" if self.item_type is Decimal else "int" 316 | ) 317 | self.item_type_server = item_type_server 318 | 319 | def get_col_spec(self, **kw): 320 | if self.max_items is None and self.item_type is None: 321 | return "VECTOR" 322 | len = str(self.max_items or "") 323 | return f"VECTOR({self.item_type_server}, {len})" 324 | 325 | def bind_processor(self, dialect): 326 | def process(value): 327 | if not value: 328 | return value 329 | if not isinstance(value, list) and not isinstance(value, tuple): 330 | raise ValueError("expected list or tuple, got '%s'" % type(value)) 331 | return f"[{','.join([str(v) for v in value])}]" 332 | 333 | return process 334 | 335 | def result_processor(self, dialect, coltype): 336 | def process(value): 337 | if not value: 338 | return value 339 | vals = value.split(",") 340 | vals = [self.item_type(v) for v in vals] 341 | return vals 342 | 343 | return process 344 | 345 | class comparator_factory(UserDefinedType.Comparator): 346 | # def l2_distance(self, other): 347 | # return self.func('vector_l2', other) 348 | 349 | def max_inner_product(self, other): 350 | return self.func("vector_dot_product", other) 351 | 352 | def cosine_distance(self, other): 353 | return self.func("vector_cosine", other) 354 | 355 | def cosine(self, other): 356 | return 1 - self.func("vector_cosine", other) 357 | 358 | def func(self, funcname: str, other): 359 | if not isinstance(other, list) and not isinstance(other, tuple): 360 | raise ValueError("expected list or tuple, got '%s'" % type(other)) 361 | othervalue = f"[{','.join([str(v) for v in other])}]" 362 | return getattr(func, funcname)( 363 | self, func.to_vector(othervalue, text(self.type.item_type_server)) 364 | ) 365 | 366 | 367 | class BIT(sqltypes.TypeEngine): 368 | __visit_name__ = "BIT" 369 | 370 | 371 | class TINYINT(sqltypes.Integer): 372 | __visit_name__ = "TINYINT" 373 | 374 | 375 | class DOUBLE(sqltypes.Float): 376 | __visit_name__ = "DOUBLE" 377 | 378 | 379 | class LONGVARCHAR(sqltypes.VARCHAR): 380 | __visit_name__ = "LONGVARCHAR" 381 | 382 | 383 | class LONGVARBINARY(sqltypes.VARBINARY): 384 | __visit_name__ = "LONGVARBINARY" 385 | 386 | 387 | class LISTBUILD(sqltypes.VARBINARY): 388 | __visit_name__ = "VARCHAR" 389 | -------------------------------------------------------------------------------- /test-in-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | iris_start () { 4 | iris start iris 5 | 6 | # Reset Password changing and activate %Service_CallIn for Embedded Python 7 | cat <=4 4 | env_list = py{310,311,312}-{old,new}-{iris,intersystems} 5 | 6 | 7 | [testenv:py{38,39,310,311,312}-old-iris] 8 | deps = 9 | sqlalchemy<2 10 | -r requirements-dev.txt 11 | -r requirements-iris.txt 12 | -e. 13 | commands = {envpython} -m pytest {posargs} --driver iris 14 | 15 | [testenv:py{38,39,310,311,312}-new-iris] 16 | deps = 17 | sqlalchemy>=2 18 | -r requirements-dev.txt 19 | -r requirements-iris.txt 20 | -e. 21 | commands = {envpython} -m pytest {posargs} --driver iris 22 | 23 | [testenv:py{38,39,310,311,312}-old-intersystems] 24 | deps = 25 | sqlalchemy<2 26 | -r requirements-dev.txt 27 | -r requirements-iris.txt 28 | -e .[intersystems] 29 | commands = {envpython} -m pytest {posargs} --driver intersystems 30 | 31 | [testenv:py{38,39,310,311,312}-new-intersystems] 32 | deps = 33 | sqlalchemy>=2 34 | -r requirements-dev.txt 35 | -r requirements-iris.txt 36 | -e .[intersystems] 37 | commands = {envpython} -m pytest {posargs} --driver intersystems 38 | --------------------------------------------------------------------------------