├── .github └── workflows │ └── ibm_db_sa_publish.yml ├── .gitignore ├── CHANGES.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── contributing └── CONTRIBUTING.md ├── ibm_db_sa ├── __init__.py ├── base.py ├── ibm_db.py ├── ibm_db_as400.py ├── pyodbc.py ├── reflection.py ├── requirements.py └── zxjdbc.py ├── polaris.yml ├── run_tests.py ├── setup.cfg ├── setup.py └── test ├── __init__.py ├── test_out_parameters.py └── test_suite.py /.github/workflows/ibm_db_sa_publish.yml: -------------------------------------------------------------------------------- 1 | name: Build and upload to PyPI 2 | 3 | # Build on every workflow_dispatch, branch push, tag push, and pull request change 4 | on: 5 | workflow_dispatch: 6 | pull_request: 7 | push: 8 | branches: 9 | - master 10 | # Sequence of patterns matched against refs/tags 11 | tags: 12 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 13 | 14 | jobs: 15 | deploy: 16 | 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | - name: Set up Python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: '3.x' 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install build 29 | - name: Build package 30 | run: | 31 | python -m build 32 | - name: Publish distribution to PyPI 33 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 34 | uses: pypa/gh-action-pypi-publish@release/v1 35 | 36 | permissions: 37 | # IMPORTANT: this permission is mandatory for trusted publishing 38 | id-token: write 39 | contents: write # Added permission for GitHub Actions to push tags -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | *.orig 4 | build 5 | tmp 6 | dist 7 | .venv 8 | ibm_db_sa.egg-info/ 9 | .coverage 10 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | 2024-07-30, Version 0.4.1 2 | ========================= 3 | 4 | * Fix foreign key reflection when there are tables with the same name in different schemas (#128) (Xnot) 5 | 6 | * Resolved issue of round function on zos server (#130) (bchoudhary6415) 7 | 8 | * Resolved case-sensitive issue of round function (#131) (bchoudhary6415) 9 | 10 | * Update pyodbc.py (#133) (Murchurl) 11 | 12 | * Fix get_table_comment return value (#135) (andrasore-kodinfo) 13 | 14 | * Fix boolean type not recognized warning (#140) (Xnot) 15 | 16 | * Assign OS390Reflector for Db2 for z/OS (#147) (rhgit01) 17 | 18 | 19 | 2023-04-20, Version 0.4.0 20 | ========================= 21 | 22 | * Changes for support of SQLAlchemy 2.0.x (#127) (bchoudhary6415) 23 | 24 | * Some changes to support for SQLAlchemy 2.0 and resolved denormalise name error (#126) (bchoudhary6415) 25 | 26 | * Support for SQLAlchemy 2.0 (#124) (bchoudhary6415) 27 | 28 | 29 | 2023-02-27, Version 0.3.9 30 | ========================= 31 | 32 | * Made some changes for release 039 (#121) (bchoudhary6415) 33 | 34 | * Release 039 (#120) (bchoudhary6415) 35 | 36 | * resolve merge conflicts (Bimal Jha) 37 | 38 | * don't mutate URL object (#116) (Edwin Onuonga) 39 | 40 | * Fix offset condition (#117) (Edwin Onuonga) 41 | 42 | * indentation change for changes file (amukherjee) 43 | 44 | * Added missing columns for the previous commit (Sasa Tomic) 45 | 46 | * Fix for reflection get_primary_keys (Sasa Tomic) 47 | 48 | * DB2 may not return the column names in SYSCOL.INDEXES (Sasa Tomic) 49 | 50 | * pyodbc mods (openmax) 51 | 52 | * implemented iseries db2 dialect inside PASE environment (openmax) 53 | 54 | 2022-05-17, Version 0.3.8 55 | ========================= 56 | - autoload bug fix with SQLAlchemy 1.4.x 57 | - remove warning message while connection 58 | - add columns reflection with comments 59 | - other bug fixes reported 60 | 61 | 2021/07/19 62 | - add support for sqlalchemy 1.4 63 | - Missing none check for dbma_name 64 | - Set issolation level 65 | - Other bug fixes 66 | 67 | 2021/03/03 68 | - issolation level bug fix. 69 | 70 | 2020/12/07 71 | - Added ZOS server support for applications to connect 72 | - Added Iseries server support for application to connect 73 | - Add CurrentSchema key word as part of connection string support 74 | - Added fix for multiple issues 75 | 76 | 2019/05/30 77 | - Added fix for missing "CURRENT ISOLATION" register 78 | - Fixed Autocommit not working for pyodbc 79 | - Fixed NameError: name 'asbool' is not defined python 80 | 81 | 2016/08/29 82 | - Fixed multiple defects mentioned below 83 | - Add documentation on alchemy url for conncetion over ssl 84 | - DB2 on AS400: An unexpected token "ISOLATION" was found on ibm_db_sa/ibm_db.py 85 | - Getting AttributeError for AS400 86 | - name 'unicode' is not defined 87 | - AttributeError when using pyodbc 88 | - add capability to the driver to generate query with literals, compile_kwargs={"literal_binds": True} 89 | 90 | 2016/08/30 91 | -Added Support for Python 3.x 92 | 93 | 2014/10/20 (IBM_DB_SA adaptor 0.3.2) 94 | - Added SSL support 95 | - Added get_incoming_foreign_keys functionality with reflector 96 | - Added get_unique_constraints reflection feature 97 | - Added exist() unary operator support within select clause 98 | - Fixed incompatible issue of sql.true() for SQLAlchemy v0.7.x & 0.8.x 99 | - Fixed add_constraint incompatible issue with SQLAlchemy-0.9.x 100 | - Fixed reflection function get_indexes to not return the unique constraint participating index 101 | 102 | 2014/03/26 (IBM_DB_SA adapter 0.3.1) 103 | - Handle Double Type in DDL Generator 104 | - Translating 'update' and 'read' lock-mode with DB2 compatible SQL 105 | - Added Stored procedure with outparam support in ibm_db_sa dialect 106 | - Convert nullable unique constraint to unique index exclude nulls for DB2 10.5 107 | - Fix to detect invalid connection 108 | - Added support for CHAR_LENGTH function support 109 | - Fix drop index implementation incompatibility with SQLAlchemy-0.8.x onwards 110 | - Add/Fix support for zxjdbc for both IBM DB LUW and AS/400 111 | - Add/Fix support for PyODBC for both IBM DB LUW and AS/400 112 | - Fix reflection for get_lastrowid 113 | 114 | 2013/03/01 (IBM_DB_SA adapter 0.3.0) 115 | - Add support for LIMIT/OFFSET 116 | - Add support for savepoints 117 | - Add support for double-precision floating-point number 118 | - Fixed reflection for get_view_names and get_view_definition 119 | 120 | 2013/02/06 121 | - Add support for SQLAlchemy 0.7/0.8 122 | - Refactor code layout 123 | - Now supporting "db2://" scheme as well as 124 | "ibm_db://" for backwards compatibility 125 | - Add/fix support for explicit sequences 126 | 127 | 2011/09/27 (IBM_DB_SA adapter 0.2.1): 128 | - fix reflection problem 129 | - support alternate DB2 LUW connection via PyODBC 130 | - support alternate DB2 i5/OS (iSeries) via PyODBC 131 | - support alternate DB2 i5/OS (iSeries) via ZxJDBC (Jython) 132 | 133 | 2011/08/28 (IBM_DB_SA adapter 0.2.0): 134 | - Support of SQLAlchemy 0.6/0.7 135 | - Add Jython support 136 | 137 | 2008/11/06 (IBM_DB_SA adapter 0.1.6): 138 | - fixed Metadata not loading any table info (defect #158705) 139 | - fixed problems while using different schema names (defect #163785) 140 | - fixed keyerror in length in visit_function (defect #166292) 141 | 142 | 2008/03/28 (IBM_DB_SA adapter 0.1.5): 143 | - fixed BIGINT driver return issue #5 (defect #150638) 144 | - fixed autocommit default issue #6 (defect #156919) 145 | - fixed _get_exception() tuple issue #8 (defect #156925) 146 | - fixed create_engine DSN support issue (defect #156930) 147 | 148 | 2008/02/15 (IBM_DB_SA adapter 0.1.1): 149 | - fixed .egg setup loading issue #1 (defect #154259) 150 | 151 | 2008/02/08 (IBM_DB_SA adapter 0.1.0): 152 | - initial alpha release 153 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 10 | 11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 12 | 13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 14 | 15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 16 | 17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 18 | 19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 20 | 21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 22 | 23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 24 | 25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 26 | 27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 28 | 29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 30 | 31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 32 | 33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 34 | 35 | 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and 36 | 37 | 2. You must cause any modified files to carry prominent notices stating that You changed the files; and 38 | 39 | 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 40 | 41 | 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 42 | 43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 44 | 45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 46 | 47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 48 | 49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 50 | 51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 52 | 53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 54 | 55 | END OF TERMS AND CONDITIONS 56 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include test *.py 2 | 3 | include README* LICENSE distribute_setup.py CHANGES* run_tests.py 4 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | IBM_DB_SA 2 | ========= 3 | 4 | The IBM_DB_SA adapter provides the Python/SQLAlchemy interface to IBM Data Servers. 5 | 6 | Version 7 | -------- 8 | 0.4.1 (2024/07/30) 9 | 10 | Prerequisites 11 | -------------- 12 | 1. Install Python 2.7 or newer versions except python 3.3 or Jython 2.5.x . 13 | 2. SQLAlchemy version between 0.7.3 - 2.0.x. 14 | 3. IBM_DB driver and IBM_DB_DBI wrapper 1.0.1 or higher. 15 | ``` 16 | Install ibm_db driver with below commands: 17 | Linux and Windows: 18 | pip install ibm_db 19 | Mac: 20 | pip install --no-cache-dir ibm_db 21 | ``` 22 | 23 | Install and Configuration 24 | ========================= 25 | The IBM_DB_SA Python Egg component (.egg) can be installed using the standard setuptools provided by the Python Easy Install through Python Entreprise 26 | Application Kit community portal: 27 | http://peak.telecommunity.com/DevCenter/EasyInstall 28 | 29 | Please follow the steps provided to Install "Easy Install" in the link above and follow up with these additional steps to install IBM_DB_SA: 30 | 31 | 1. To install IBM_DB_SA from pypi repository(pypi.python.org): 32 | Windows: 33 | > pip install ibm_db_sa 34 | Linux/Unix: 35 | $ sudo pip install ibm_db_sa 36 | 37 | 2. To install IBM_DB_SA egg component from the downloaded .egg file 38 | Windows: 39 | > easy_install ibm_db_sa-x.x.x-pyx.x.egg 40 | Linux/Unix: 41 | $ sudo easy_install ibm_db_sa-x.x.x-pyx.x.egg 42 | 43 | 3. To install IBM_DB_SA from source 44 | Standard Python setup should be used:: 45 | python setup.py install 46 | 47 | 4. In case you want to control the sqlalchemy version use the following commands 48 | ``` 49 | Install SQLAlchemy supported version: 50 | pip install sqlalchemy==1.3.23 51 | pip install ibm_db_sa 52 | ``` 53 | 54 | 5. IBM_DB_SA is now available as part of conda-forge channel. 55 | Install Instruction from conda-forge channel 56 | ``` 57 | conda install -c conda-forge ibm_db 58 | conda install -c conda-forge ibm_db_sa 59 | ``` 60 | #### PLATFORMS SUPPORTED THROUGH CONDA FORGE #### 61 | 1. WINDOWS 62 | 2. MAC 63 | 4. LINUX 64 | 65 | Connecting 66 | ---------- 67 | A TCP/IP connection can be specified as the following:: 68 | ``` 69 | from sqlalchemy import create_engine 70 | 71 | e = create_engine("db2+ibm_db://user:pass@host[:port]/database") 72 | ``` 73 | 74 | For a local socket connection, exclude the "host" and "port" portions:: 75 | 76 | ``` 77 | from sqlalchemy import create_engine 78 | 79 | e = create_engine("db2+ibm_db://user:pass@/database") 80 | ``` 81 | 82 | Supported Databases 83 | ------------------- 84 | - IBM DB2 Universal Database for Linux/Unix/Windows versions 9.7 onwards 85 | - IBM Db2 on Cloud 86 | - IBM Db2 on ZOS 87 | - IBM Db2 on Iseries 88 | 89 | Note 90 | ------------------------------------------------------------- 91 | By default, all tables and schemas stored in a Db2 database are created 92 | using capital letters only. 93 | However, if you have a table name in lowercase letters, you can still reference 94 | it by enclosing the name in single quotes inside double quotes. 95 | For example 96 | ``` 97 | if users table is in small letter inside database 98 | So, you can use single quotes "'users'". 99 | If you will not use single quotes such as "users", it will 100 | be refered as "USERS". 101 | 102 | metadata = sqlalchemy.MetaData(schema="schema1") 103 | table = sqlalchemy.Table("'users'", metadata, autoload_with=engine) 104 | 105 | ``` 106 | 107 | Known Limitations in ibm_db_sa adapter for DB2 databases 108 | ------------------------------------------------------------- 109 | 1) Non-standard SQL queries are not supported. e.g. "SELECT ? FROM TAB1" 110 | 2) For updations involving primary/foreign key references, the entries should be made in correct order. Integrity check is always on and thus the primary keys referenced by the foreign keys in the referencing tables should always exist in the parent table. 111 | 3) Unique key which contains nullable column not supported 112 | 4) UPDATE CASCADE for foreign keys not supported 113 | 5) DEFERRABLE INITIALLY deferred not supported 114 | 6) Subquery in ON clause of LEFT OUTER JOIN not supported 115 | 7) PyODBC and Jython/zxjdbc support is experimental 116 | 117 | 118 | Credits 119 | ------- 120 | ibm_db_sa for SQLAlchemy was first produced by IBM Inc., targeting version 0.4. 121 | The library was ported for version 0.6 and 0.7 by Jaimy Azle. 122 | Port for version 0.8 and modernization of test suite by Mike Bayer. 123 | 124 | Contributing to IBM_DB_SA python project 125 | ---------------------------------------- 126 | See `CONTRIBUTING 127 | `_. 128 | 129 | ``` 130 | The developer sign-off should include the reference to the DCO in remarks(example below): 131 | DCO 1.1 Signed-off-by: Random J Developer 132 | ``` 133 | 134 | -------------------------------------------------------------------------------- /contributing/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Developer's Certificate of Origin 1.1 2 | 3 | By making a contribution to this project, I certify that: 4 | 5 | (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or 6 | 7 | (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or 8 | 9 | (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. 10 | 11 | (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. 12 | -------------------------------------------------------------------------------- /ibm_db_sa/__init__.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2016. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Authors: Alex Pitigoi, Abhigyan Agrawal, Rahul Priyadarshi | 17 | # | Contributors: Jaimy Azle, Mike Bayer | 18 | # +--------------------------------------------------------------------------+ 19 | 20 | __version__ = '0.4.1' 21 | 22 | from . import ibm_db, pyodbc, base 23 | 24 | 25 | # default dialect 26 | base.dialect = ibm_db.dialect 27 | 28 | from .base import \ 29 | BIGINT, BLOB, CHAR, CLOB, DATE, DATETIME, \ 30 | DECIMAL, DOUBLE, DECIMAL,\ 31 | GRAPHIC, INTEGER, INTEGER, LONGVARCHAR, \ 32 | NUMERIC, SMALLINT, REAL, TIME, TIMESTAMP, \ 33 | VARCHAR, VARGRAPHIC, dialect 34 | 35 | #__all__ = ( 36 | # TODO: (put types here) 37 | # 'dialect' 38 | #) 39 | -------------------------------------------------------------------------------- /ibm_db_sa/base.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2016. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Authors: Alex Pitigoi, Abhigyan Agrawal, Rahul Priyadarshi | 17 | # | Contributors: Jaimy Azle, Mike Bayer | 18 | # +--------------------------------------------------------------------------+ 19 | """Support for IBM DB2 database 20 | 21 | """ 22 | import datetime, re 23 | from sqlalchemy import types as sa_types 24 | from sqlalchemy import schema as sa_schema 25 | from sqlalchemy import util 26 | from sqlalchemy.sql import compiler 27 | from sqlalchemy.sql import operators 28 | from sqlalchemy.engine import default 29 | from sqlalchemy import __version__ as SA_Version 30 | from . import reflection as ibm_reflection 31 | 32 | from sqlalchemy.types import BLOB, CHAR, CLOB, DATE, DATETIME, INTEGER, \ 33 | SMALLINT, BIGINT, DECIMAL, NUMERIC, REAL, TIME, TIMESTAMP, \ 34 | VARCHAR, FLOAT 35 | 36 | SA_Version = [int(ver_token) for ver_token in SA_Version.split('.')[0:2]] 37 | 38 | # as documented from: 39 | # http://publib.boulder.ibm.com/infocenter/db2luw/v9/index.jsp?topic=/com.ibm.db2.udb.doc/admin/r0001095.htm 40 | RESERVED_WORDS = set( 41 | ['activate', 'disallow', 'locale', 'result', 'add', 'disconnect', 'localtime', 42 | 'result_set_locator', 'after', 'distinct', 'localtimestamp', 'return', 'alias', 43 | 'do', 'locator', 'returns', 'all', 'double', 'locators', 'revoke', 'allocate', 'drop', 44 | 'lock', 'right', 'allow', 'dssize', 'lockmax', 'rollback', 'alter', 'dynamic', 45 | 'locksize', 'routine', 'and', 'each', 'long', 'row', 'any', 'editproc', 'loop', 46 | 'row_number', 'as', 'else', 'maintained', 'rownumber', 'asensitive', 'elseif', 47 | 'materialized', 'rows', 'associate', 'enable', 'maxvalue', 'rowset', 'asutime', 48 | 'encoding', 'microsecond', 'rrn', 'at', 'encryption', 'microseconds', 'run', 49 | 'attributes', 'end', 'minute', 'savepoint', 'audit', 'end-exec', 'minutes', 'schema', 50 | 'authorization', 'ending', 'minvalue', 'scratchpad', 'aux', 'erase', 'mode', 'scroll', 51 | 'auxiliary', 'escape', 'modifies', 'search', 'before', 'every', 'month', 'second', 52 | 'begin', 'except', 'months', 'seconds', 'between', 'exception', 'new', 'secqty', 53 | 'binary', 'excluding', 'new_table', 'security', 'bufferpool', 'exclusive', 54 | 'nextval', 'select', 'by', 'execute', 'no', 'sensitive', 'cache', 'exists', 'nocache', 55 | 'sequence', 'call', 'exit', 'nocycle', 'session', 'called', 'explain', 'nodename', 56 | 'session_user', 'capture', 'external', 'nodenumber', 'set', 'cardinality', 57 | 'extract', 'nomaxvalue', 'signal', 'cascaded', 'fenced', 'nominvalue', 'simple', 58 | 'case', 'fetch', 'none', 'some', 'cast', 'fieldproc', 'noorder', 'source', 'ccsid', 59 | 'file', 'normalized', 'specific', 'char', 'final', 'not', 'sql', 'character', 'for', 60 | 'null', 'sqlid', 'check', 'foreign', 'nulls', 'stacked', 'close', 'free', 'numparts', 61 | 'standard', 'cluster', 'from', 'obid', 'start', 'collection', 'full', 'of', 'starting', 62 | 'collid', 'function', 'old', 'statement', 'column', 'general', 'old_table', 'static', 63 | 'comment', 'generated', 'on', 'stay', 'commit', 'get', 'open', 'stogroup', 'concat', 64 | 'global', 'optimization', 'stores', 'condition', 'go', 'optimize', 'style', 'connect', 65 | 'goto', 'option', 'substring', 'connection', 'grant', 'or', 'summary', 'constraint', 66 | 'graphic', 'order', 'synonym', 'contains', 'group', 'out', 'sysfun', 'continue', 67 | 'handler', 'outer', 'sysibm', 'count', 'hash', 'over', 'sysproc', 'count_big', 68 | 'hashed_value', 'overriding', 'system', 'create', 'having', 'package', 69 | 'system_user', 'cross', 'hint', 'padded', 'table', 'current', 'hold', 'pagesize', 70 | 'tablespace', 'current_date', 'hour', 'parameter', 'then', 'current_lc_ctype', 71 | 'hours', 'part', 'time', 'current_path', 'identity', 'partition', 'timestamp', 72 | 'current_schema', 'if', 'partitioned', 'to', 'current_server', 'immediate', 73 | 'partitioning', 'transaction', 'current_time', 'in', 'partitions', 'trigger', 74 | 'current_timestamp', 'including', 'password', 'trim', 'current_timezone', 75 | 'inclusive', 'path', 'type', 'current_user', 'increment', 'piecesize', 'undo', 76 | 'cursor', 'index', 'plan', 'union', 'cycle', 'indicator', 'position', 'unique', 'data', 77 | 'inherit', 'precision', 'until', 'database', 'inner', 'prepare', 'update', 78 | 'datapartitionname', 'inout', 'prevval', 'usage', 'datapartitionnum', 79 | 'insensitive', 'primary', 'user', 'date', 'insert', 'priqty', 'using', 'day', 80 | 'integrity', 'privileges', 'validproc', 'days', 'intersect', 'procedure', 'value', 81 | 'db2general', 'into', 'program', 'values', 'db2genrl', 'is', 'psid', 'variable', 82 | 'db2sql', 'isobid', 'query', 'variant', 'dbinfo', 'isolation', 'queryno', 'vcat', 83 | 'dbpartitionname', 'iterate', 'range', 'version', 'dbpartitionnum', 'jar', 'rank', 84 | 'view', 'deallocate', 'java', 'read', 'volatile', 'declare', 'join', 'reads', 'volumes', 85 | 'default', 'key', 'recovery', 'when', 'defaults', 'label', 'references', 'whenever', 86 | 'definition', 'language', 'referencing', 'where', 'delete', 'lateral', 'refresh', 87 | 'while', 'dense_rank', 'lc_ctype', 'release', 'with', 'denserank', 'leave', 'rename', 88 | 'without', 'describe', 'left', 'repeat', 'wlm', 'descriptor', 'like', 'reset', 'write', 89 | 'deterministic', 'linktype', 'resignal', 'xmlelement', 'diagnostics', 'local', 90 | 'restart', 'year', 'disable', 'localdate', 'restrict', 'years', '', 'abs', 'grouping', 91 | 'regr_intercept', 'are', 'int', 'regr_r2', 'array', 'integer', 'regr_slope', 92 | 'asymmetric', 'intersection', 'regr_sxx', 'atomic', 'interval', 'regr_sxy', 'avg', 93 | 'large', 'regr_syy', 'bigint', 'leading', 'rollup', 'blob', 'ln', 'scope', 'boolean', 94 | 'lower', 'similar', 'both', 'match', 'smallint', 'ceil', 'max', 'specifictype', 95 | 'ceiling', 'member', 'sqlexception', 'char_length', 'merge', 'sqlstate', 96 | 'character_length', 'method', 'sqlwarning', 'clob', 'min', 'sqrt', 'coalesce', 'mod', 97 | 'stddev_pop', 'collate', 'module', 'stddev_samp', 'collect', 'multiset', 98 | 'submultiset', 'convert', 'national', 'sum', 'corr', 'natural', 'symmetric', 99 | 'corresponding', 'nchar', 'tablesample', 'covar_pop', 'nclob', 'timezone_hour', 100 | 'covar_samp', 'normalize', 'timezone_minute', 'cube', 'nullif', 'trailing', 101 | 'cume_dist', 'numeric', 'translate', 'current_default_transform_group', 102 | 'octet_length', 'translation', 'current_role', 'only', 'treat', 103 | 'current_transform_group_for_type', 'overlaps', 'true', 'dec', 'overlay', 104 | 'uescape', 'decimal', 'percent_rank', 'unknown', 'deref', 'percentile_cont', 105 | 'unnest', 'element', 'percentile_disc', 'upper', 'exec', 'power', 'var_pop', 'exp', 106 | 'real', 'var_samp', 'false', 'recursive', 'varchar', 'filter', 'ref', 'varying', 107 | 'float', 'regr_avgx', 'width_bucket', 'floor', 'regr_avgy', 'window', 'fusion', 108 | 'regr_count', 'within', 'asc']) 109 | 110 | 111 | class _IBM_Boolean(sa_types.Boolean): 112 | 113 | def result_processor(self, dialect, coltype): 114 | def process(value): 115 | if value is None: 116 | return None 117 | else: 118 | return bool(value) 119 | 120 | return process 121 | 122 | def bind_processor(self, dialect): 123 | def process(value): 124 | if value is None: 125 | return None 126 | elif bool(value): 127 | return '1' 128 | else: 129 | return '0' 130 | 131 | return process 132 | 133 | 134 | class _IBM_Date(sa_types.Date): 135 | 136 | def result_processor(self, dialect, coltype): 137 | def process(value): 138 | if value is None: 139 | return None 140 | if isinstance(value, datetime.datetime): 141 | value = datetime.date(value.year, value.month, value.day) 142 | return value 143 | 144 | return process 145 | 146 | def bind_processor(self, dialect): 147 | def process(value): 148 | if value is None: 149 | return None 150 | if isinstance(value, datetime.datetime): 151 | value = datetime.date(value.year, value.month, value.day) 152 | return str(value) 153 | 154 | return process 155 | 156 | 157 | class BOOLEAN(sa_types.Boolean): 158 | __visit_name__ = 'BOOLEAN' 159 | 160 | 161 | class DOUBLE(sa_types.Numeric): 162 | __visit_name__ = 'DOUBLE' 163 | 164 | 165 | class LONGVARCHAR(sa_types.VARCHAR): 166 | __visit_name_ = 'LONGVARCHAR' 167 | 168 | 169 | class DBCLOB(sa_types.CLOB): 170 | __visit_name__ = "DBCLOB" 171 | 172 | 173 | class GRAPHIC(sa_types.CHAR): 174 | __visit_name__ = "GRAPHIC" 175 | 176 | 177 | class VARGRAPHIC(sa_types.Unicode): 178 | __visit_name__ = "VARGRAPHIC" 179 | 180 | 181 | class LONGVARGRAPHIC(sa_types.UnicodeText): 182 | __visit_name__ = "LONGVARGRAPHIC" 183 | 184 | 185 | class XML(sa_types.Text): 186 | __visit_name__ = "XML" 187 | 188 | 189 | colspecs = { 190 | sa_types.Boolean: _IBM_Boolean, 191 | sa_types.Date: _IBM_Date 192 | # really ? 193 | # sa_types.Unicode: DB2VARGRAPHIC 194 | } 195 | 196 | ischema_names = { 197 | 'BOOLEAN': BOOLEAN, 198 | 'BLOB': BLOB, 199 | 'CHAR': CHAR, 200 | 'CHARACTER': CHAR, 201 | 'CLOB': CLOB, 202 | 'DATE': DATE, 203 | 'DATETIME': DATETIME, 204 | 'INTEGER': INTEGER, 205 | 'SMALLINT': SMALLINT, 206 | 'BIGINT': BIGINT, 207 | 'DECIMAL': DECIMAL, 208 | 'NUMERIC': NUMERIC, 209 | 'REAL': REAL, 210 | 'DOUBLE': DOUBLE, 211 | 'FLOAT': FLOAT, 212 | 'TIME': TIME, 213 | 'TIMESTAMP': TIMESTAMP, 214 | 'TIMESTMP': TIMESTAMP, 215 | 'VARCHAR': VARCHAR, 216 | 'LONGVARCHAR': LONGVARCHAR, 217 | 'XML': XML, 218 | 'GRAPHIC': GRAPHIC, 219 | 'VARGRAPHIC': VARGRAPHIC, 220 | 'LONGVARGRAPHIC': LONGVARGRAPHIC, 221 | 'DBCLOB': DBCLOB 222 | } 223 | 224 | 225 | class DB2TypeCompiler(compiler.GenericTypeCompiler): 226 | 227 | def visit_TIMESTAMP(self, type_, **kw): 228 | return "TIMESTAMP" 229 | 230 | def visit_DATE(self, type_, **kw): 231 | return "DATE" 232 | 233 | def visit_TIME(self, type_, **kw): 234 | return "TIME" 235 | 236 | def visit_DATETIME(self, type_, **kw): 237 | return self.visit_TIMESTAMP(type_, **kw) 238 | 239 | def visit_SMALLINT(self, type_, **kw): 240 | return "SMALLINT" 241 | 242 | def visit_INT(self, type_, **kw): 243 | return "INT" 244 | 245 | def visit_BIGINT(self, type_, **kw): 246 | return "BIGINT" 247 | 248 | def visit_FLOAT(self, type_, **kw): 249 | return "FLOAT" if type_.precision is None else \ 250 | "FLOAT(%(precision)s)" % {'precision': type_.precision} 251 | 252 | def visit_DOUBLE(self, type_, **kw): 253 | return "DOUBLE" 254 | 255 | def visit_XML(self, type_, **kw): 256 | return "XML" 257 | 258 | def visit_CLOB(self, type_, **kw): 259 | return "CLOB" 260 | 261 | def visit_BLOB(self, type_, **kw): 262 | return "BLOB(1M)" if type_.length in (None, 0) else \ 263 | "BLOB(%(length)s)" % {'length': type_.length} 264 | 265 | def visit_DBCLOB(self, type_, **kw): 266 | return "DBCLOB(1M)" if type_.length in (None, 0) else \ 267 | "DBCLOB(%(length)s)" % {'length': type_.length} 268 | 269 | def visit_VARCHAR(self, type_, **kw): 270 | return "VARCHAR(%(length)s)" % {'length': type_.length} 271 | 272 | def visit_LONGVARCHAR(self, type_, **kw): 273 | return "LONG VARCHAR" 274 | 275 | def visit_VARGRAPHIC(self, type_, **kw): 276 | return "VARGRAPHIC(%(length)s)" % {'length': type_.length} 277 | 278 | def visit_LONGVARGRAPHIC(self, type_, **kw): 279 | return "LONG VARGRAPHIC" 280 | 281 | def visit_CHAR(self, type_, **kw): 282 | return "CHAR" if type_.length in (None, 0) else \ 283 | "CHAR(%(length)s)" % {'length': type_.length} 284 | 285 | def visit_GRAPHIC(self, type_, **kw): 286 | return "GRAPHIC" if type_.length in (None, 0) else \ 287 | "GRAPHIC(%(length)s)" % {'length': type_.length} 288 | 289 | def visit_DECIMAL(self, type_, **kw): 290 | if not type_.precision: 291 | return "DECIMAL(31, 0)" 292 | elif not type_.scale: 293 | return "DECIMAL(%(precision)s, 0)" % {'precision': type_.precision} 294 | else: 295 | return "DECIMAL(%(precision)s, %(scale)s)" % { 296 | 'precision': type_.precision, 'scale': type_.scale} 297 | 298 | def visit_numeric(self, type_, **kw): 299 | return self.visit_DECIMAL(type_, **kw) 300 | 301 | def visit_datetime(self, type_, **kw): 302 | return self.visit_TIMESTAMP(type_, **kw) 303 | 304 | def visit_date(self, type_, **kw): 305 | return self.visit_DATE(type_, **kw) 306 | 307 | def visit_time(self, type_, **kw): 308 | return self.visit_TIME(type_, **kw) 309 | 310 | def visit_integer(self, type_, **kw): 311 | return self.visit_INT(type_, **kw) 312 | 313 | def visit_boolean(self, type_, **kw): 314 | return self.visit_SMALLINT(type_, **kw) 315 | 316 | def visit_float(self, type_, **kw): 317 | return self.visit_FLOAT(type_, **kw) 318 | 319 | def visit_unicode(self, type_, **kw): 320 | check_server = getattr(DB2Dialect, 'serverType') 321 | return (self.visit_VARGRAPHIC(type_, **kw) + " CCSID 1200") \ 322 | if check_server == "DB2" else self.visit_VARGRAPHIC(type_, **kw) 323 | 324 | def visit_unicode_text(self, type_, **kw): 325 | return self.visit_LONGVARGRAPHIC(type_, **kw) 326 | 327 | def visit_string(self, type_, **kw): 328 | return self.visit_VARCHAR(type_, **kw) 329 | 330 | def visit_TEXT(self, type_, **kw): 331 | return self.visit_CLOB(type_, **kw) 332 | 333 | def visit_large_binary(self, type_, **kw): 334 | return self.visit_BLOB(type_, **kw) 335 | 336 | 337 | class DB2Compiler(compiler.SQLCompiler): 338 | if SA_Version < [0, 9]: 339 | def visit_false(self, expr, **kw): 340 | return '0' 341 | 342 | def visit_true(self, expr, **kw): 343 | return '1' 344 | 345 | def get_cte_preamble(self, recursive): 346 | return "WITH" 347 | 348 | def visit_now_func(self, fn, **kw): 349 | return "CURRENT_TIMESTAMP" 350 | 351 | def for_update_clause(self, select, **kw): 352 | if select.for_update is True: 353 | return ' WITH RS USE AND KEEP UPDATE LOCKS' 354 | elif select.for_update == 'read': 355 | return ' WITH RS USE AND KEEP SHARE LOCKS' 356 | else: 357 | return '' 358 | 359 | def visit_mod_binary(self, binary, operator, **kw): 360 | return "mod(%s, %s)" % (self.process(binary.left), 361 | self.process(binary.right)) 362 | 363 | def limit_clause(self, select, **kwargs): 364 | if (select._limit is not None) and (select._offset is None): 365 | return " FETCH FIRST %s ROWS ONLY" % select._limit 366 | else: 367 | return "" 368 | 369 | def visit_select(self, select, **kwargs): 370 | limit, offset = select._limit, select._offset 371 | sql_ori = compiler.SQLCompiler.visit_select(self, select, **kwargs) 372 | if offset is not None: 373 | __rownum = 'Z.__ROWNUM' 374 | sql_split = re.split(r"[\s+]FROM ", sql_ori, 1) 375 | sql_sec = "" 376 | sql_sec = " \nFROM %s " % (sql_split[1]) 377 | 378 | dummyVal = "Z.__db2_" 379 | sql_pri = "" 380 | 381 | sql_sel = "SELECT " 382 | if select._distinct: 383 | sql_sel = "SELECT DISTINCT " 384 | 385 | sql_select_token = sql_split[0].split(",") 386 | i = 0 387 | while (i < len(sql_select_token)): 388 | if sql_select_token[i].count("TIMESTAMP(DATE(SUBSTR(CHAR(") == 1: 389 | sql_sel = "%s \"%s%d\"," % (sql_sel, dummyVal, i + 1) 390 | sql_pri = '%s %s,%s,%s,%s AS "%s%d",' % ( 391 | sql_pri, 392 | sql_select_token[i], 393 | sql_select_token[i + 1], 394 | sql_select_token[i + 2], 395 | sql_select_token[i + 3], 396 | dummyVal, i + 1) 397 | i = i + 4 398 | continue 399 | 400 | if sql_select_token[i].count(" AS ") == 1: 401 | temp_col_alias = sql_select_token[i].split(" AS ") 402 | sql_pri = '%s %s,' % (sql_pri, sql_select_token[i]) 403 | sql_sel = "%s %s," % (sql_sel, temp_col_alias[1]) 404 | i = i + 1 405 | continue 406 | 407 | sql_pri = '%s %s AS "%s%d",' % (sql_pri, sql_select_token[i], dummyVal, i + 1) 408 | sql_sel = "%s \"%s%d\"," % (sql_sel, dummyVal, i + 1) 409 | i = i + 1 410 | 411 | sql_pri = sql_pri[:len(sql_pri) - 1] 412 | sql_pri = "%s%s" % (sql_pri, sql_sec) 413 | sql_sel = sql_sel[:len(sql_sel) - 1] 414 | sql = '%s, ( ROW_NUMBER() OVER() ) AS "%s" FROM ( %s ) AS M' % (sql_sel, __rownum, sql_pri) 415 | sql = '%s FROM ( %s ) Z WHERE' % (sql_sel, sql) 416 | 417 | if offset != 0: 418 | sql = '%s "%s" > %d' % (sql, __rownum, offset) 419 | if offset != 0 and limit is not None: 420 | sql = '%s AND ' % (sql) 421 | if limit is not None: 422 | sql = '%s "%s" <= %d' % (sql, __rownum, offset + limit) 423 | return "( %s )" % (sql,) 424 | else: 425 | return sql_ori 426 | 427 | def visit_sequence(self, sequence, **kw): 428 | if sequence.schema: 429 | return "NEXT VALUE FOR %s.%s" % (sequence.schema, sequence.name) 430 | return "NEXT VALUE FOR %s" % sequence.name 431 | 432 | def default_from(self): 433 | # DB2 uses SYSIBM.SYSDUMMY1 table for row count 434 | return " FROM SYSIBM.SYSDUMMY1" 435 | 436 | def visit_function(self, func, result_map=None, **kwargs): 437 | if func.name.upper() == "AVG": 438 | return "AVG(DOUBLE(%s))" % (self.function_argspec(func, **kwargs)) 439 | elif func.name.upper() == "CHAR_LENGTH": 440 | return "CHAR_LENGTH(%s, %s)" % (self.function_argspec(func, **kwargs), 'OCTETS') 441 | else: 442 | return compiler.SQLCompiler.visit_function(self, func, **kwargs) 443 | 444 | # TODO: this is wrong but need to know what DB2 is expecting here 445 | # if func.name.upper() == "LENGTH": 446 | # return "LENGTH('%s')" % func.compile().params[func.name + '_1'] 447 | # else: 448 | # return compiler.SQLCompiler.visit_function(self, func, **kwargs) 449 | 450 | def visit_cast(self, cast, **kw): 451 | type_ = cast.typeclause.type 452 | 453 | # TODO: verify that CAST shouldn't be called with 454 | # other types, I was able to CAST against VARCHAR 455 | # for example 456 | if isinstance(type_, ( 457 | sa_types.DateTime, sa_types.Date, sa_types.Time, sa_types.DOUBLE, sa_types.Double, sa_types.Integer, sa_types.INTEGER, 458 | sa_types.Boolean, sa_types.BOOLEAN, sa_types.BIGINT, sa_types.BigInteger, sa_types.BINARY, sa_types.NUMERIC, sa_types.SmallInteger, 459 | sa_types.DECIMAL, sa_types.String, sa_types.Float, sa_types.FLOAT, sa_types.Numeric)): 460 | return super(DB2Compiler, self).visit_cast(cast, **kw) 461 | else: 462 | return self.process(cast.clause) 463 | 464 | def get_select_precolumns(self, select, **kwargs): 465 | if isinstance(select._distinct, str): 466 | return select._distinct.upper() + " " 467 | elif select._distinct: 468 | return "DISTINCT " 469 | else: 470 | return "" 471 | 472 | def visit_join(self, join, asfrom=False, **kwargs): 473 | # NOTE: this is the same method as that used in mysql/base.py 474 | # to render INNER JOIN 475 | return ''.join( 476 | (self.process(join.left, asfrom=True, **kwargs), 477 | (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "), 478 | self.process(join.right, asfrom=True, **kwargs), 479 | " ON ", 480 | self.process(join.onclause, **kwargs))) 481 | 482 | def visit_savepoint(self, savepoint_stmt): 483 | return "SAVEPOINT %(sid)s ON ROLLBACK RETAIN CURSORS" % {'sid': self.preparer.format_savepoint(savepoint_stmt)} 484 | 485 | def visit_rollback_to_savepoint(self, savepoint_stmt): 486 | return 'ROLLBACK TO SAVEPOINT %(sid)s' % {'sid': self.preparer.format_savepoint(savepoint_stmt)} 487 | 488 | def visit_release_savepoint(self, savepoint_stmt): 489 | return 'RELEASE TO SAVEPOINT %(sid)s' % {'sid': self.preparer.format_savepoint(savepoint_stmt)} 490 | 491 | def visit_unary(self, unary, **kw): 492 | if (unary.operator == operators.exists) and kw.get('within_columns_clause', False): 493 | usql = super(DB2Compiler, self).visit_unary(unary, **kw) 494 | usql = "CASE WHEN " + usql + " THEN 1 ELSE 0 END" 495 | return usql 496 | else: 497 | return super(DB2Compiler, self).visit_unary(unary, **kw) 498 | 499 | 500 | class DB2DDLCompiler(compiler.DDLCompiler): 501 | 502 | @staticmethod 503 | def get_server_version_info(dialect): 504 | """Returns the DB2 server major and minor version as a list of ints.""" 505 | return [int(ver_token) for ver_token in dialect.dbms_ver.split('.')[0:2]] \ 506 | if hasattr(dialect, 'dbms_ver') else [] 507 | 508 | @classmethod 509 | def _is_nullable_unique_constraint_supported(cls, dialect): 510 | """Checks to see if the DB2 version is at least 10.5. 511 | This is needed for checking if unique constraints with null columns are supported. 512 | """ 513 | dbms_name = getattr(dialect, 'dbms_name', None) 514 | if hasattr(dialect, 'dbms_name'): 515 | if not (dbms_name is None) and (dbms_name.find('DB2/') != -1): 516 | return cls.get_server_version_info(dialect) >= [10, 5] 517 | else: 518 | return False 519 | 520 | def get_column_specification(self, column, **kw): 521 | col_spec = [self.preparer.format_column(column), 522 | self.dialect.type_compiler.process(column.type, type_expression=column)] 523 | 524 | # column-options: "NOT NULL" 525 | if not column.nullable or column.primary_key: 526 | col_spec.append('NOT NULL') 527 | 528 | # default-clause: 529 | default = self.get_column_default_string(column) 530 | if default is not None: 531 | col_spec.extend(['WITH DEFAULT', default]) 532 | 533 | if column is column.table._autoincrement_column: 534 | col_spec.extend(['GENERATED BY DEFAULT', 535 | 'AS IDENTITY', 536 | '(START WITH 1)']) 537 | column_spec = ' '.join(col_spec) 538 | return column_spec 539 | 540 | def define_constraint_cascades(self, constraint): 541 | text = "" 542 | if constraint.ondelete is not None: 543 | text += " ON DELETE %s" % constraint.ondelete 544 | 545 | if constraint.onupdate is not None: 546 | util.warn( 547 | "DB2 does not support UPDATE CASCADE for foreign keys.") 548 | 549 | return text 550 | 551 | def visit_drop_constraint(self, drop, **kw): 552 | constraint = drop.element 553 | if isinstance(constraint, sa_schema.ForeignKeyConstraint): 554 | qual = "FOREIGN KEY " 555 | const = self.preparer.format_constraint(constraint) 556 | elif isinstance(constraint, sa_schema.PrimaryKeyConstraint): 557 | qual = "PRIMARY KEY " 558 | const = "" 559 | elif isinstance(constraint, sa_schema.UniqueConstraint): 560 | qual = "UNIQUE " 561 | if self._is_nullable_unique_constraint_supported(self.dialect): 562 | for column in constraint: 563 | if column.nullable: 564 | constraint.uConstraint_as_index = True 565 | if getattr(constraint, 'uConstraint_as_index', None): 566 | qual = "INDEX " 567 | const = self.preparer.format_constraint(constraint) 568 | else: 569 | qual = "" 570 | const = self.preparer.format_constraint(constraint) 571 | 572 | return ("DROP %s%s" % (qual, const)) if \ 573 | hasattr(constraint, 'uConstraint_as_index') and constraint.uConstraint_as_index else \ 574 | ("ALTER TABLE %s DROP %s%s" % (self.preparer.format_table(constraint.table), qual, const)) 575 | 576 | def create_table_constraints(self, table, **kw): 577 | if self._is_nullable_unique_constraint_supported(self.dialect): 578 | for constraint in table._sorted_constraints: 579 | if isinstance(constraint, sa_schema.UniqueConstraint): 580 | for column in constraint: 581 | if column.nullable: 582 | constraint.use_alter = True 583 | constraint.uConstraint_as_index = True 584 | break 585 | if getattr(constraint, 'uConstraint_as_index', None): 586 | if not constraint.name: 587 | index_name = "%s_%s_%s" % ('ukey', self.preparer.format_table(constraint.table), 588 | '_'.join(column.name for column in constraint)) 589 | else: 590 | index_name = constraint.name 591 | index = sa_schema.Index(index_name, *(column for column in constraint)) 592 | index.unique = True 593 | index.uConstraint_as_index = True 594 | result = super(DB2DDLCompiler, self).create_table_constraints(table, **kw) 595 | return result 596 | 597 | def visit_create_index(self, create, include_schema=True, include_table_schema=True, **kw): 598 | if SA_Version < [0, 8]: 599 | sql = super(DB2DDLCompiler, self).visit_create_index(create, **kw) 600 | else: 601 | sql = super(DB2DDLCompiler, self).visit_create_index(create, include_schema, include_table_schema, **kw) 602 | if getattr(create.element, 'uConstraint_as_index', None): 603 | sql += ' EXCLUDE NULL KEYS' 604 | return sql 605 | 606 | def visit_add_constraint(self, create, **kw): 607 | if self._is_nullable_unique_constraint_supported(self.dialect): 608 | if isinstance(create.element, sa_schema.UniqueConstraint): 609 | for column in create.element: 610 | if column.nullable: 611 | create.element.uConstraint_as_index = True 612 | break 613 | if getattr(create.element, 'uConstraint_as_index', None): 614 | if not create.element.name: 615 | index_name = "%s_%s_%s" % ('uk_index', self.preparer.format_table(create.element.table), 616 | '_'.join(column.name for column in create.element)) 617 | else: 618 | index_name = create.element.name 619 | index = sa_schema.Index(index_name, *(column for column in create.element)) 620 | index.unique = True 621 | index.uConstraint_as_index = True 622 | sql = self.visit_create_index(sa_schema.CreateIndex(index)) 623 | return sql 624 | sql = super(DB2DDLCompiler, self).visit_add_constraint(create) 625 | return sql 626 | 627 | 628 | class DB2IdentifierPreparer(compiler.IdentifierPreparer): 629 | reserved_words = RESERVED_WORDS 630 | illegal_initial_characters = set(range(0, 10)).union(["_", "$"]) 631 | 632 | 633 | class _SelectLastRowIDMixin(object): 634 | _select_lastrowid = False 635 | _lastrowid = None 636 | 637 | def get_lastrowid(self): 638 | return self._lastrowid 639 | 640 | def pre_exec(self): 641 | if self.isinsert: 642 | tbl = self.compiled.statement.table 643 | seq_column = tbl._autoincrement_column 644 | insert_has_sequence = seq_column is not None 645 | 646 | self._select_lastrowid = insert_has_sequence and \ 647 | not self.compiled.returning and \ 648 | not self.compiled.inline 649 | 650 | def post_exec(self): 651 | conn = self.root_connection 652 | if self._select_lastrowid: 653 | conn._cursor_execute(self.cursor, 654 | "SELECT IDENTITY_VAL_LOCAL() FROM SYSIBM.SYSDUMMY1", 655 | (), self) 656 | row = self.cursor.fetchall()[0] 657 | if row[0] is not None: 658 | self._lastrowid = int(row[0]) 659 | 660 | 661 | class DB2ExecutionContext(_SelectLastRowIDMixin, default.DefaultExecutionContext): 662 | def fire_sequence(self, seq, type_): 663 | return self._execute_scalar("SELECT NEXTVAL FOR " + 664 | self.dialect.identifier_preparer.format_sequence(seq) + 665 | " FROM SYSIBM.SYSDUMMY1", type_) 666 | 667 | 668 | class DB2Dialect(default.DefaultDialect): 669 | name = 'ibm_db_sa' 670 | max_identifier_length = 128 671 | encoding = 'utf-8' 672 | default_paramstyle = 'qmark' 673 | colspecs = colspecs 674 | ischema_names = ischema_names 675 | supports_char_length = False 676 | supports_unicode_statements = False 677 | supports_unicode_binds = False 678 | if SA_Version < [1, 4]: 679 | returns_unicode_strings = False 680 | elif SA_Version < [2, 0]: 681 | returns_unicode_strings = sa_types.String.RETURNS_CONDITIONAL 682 | else: 683 | returns_unicode_strings = True 684 | postfetch_lastrowid = True 685 | supports_sane_rowcount = True 686 | supports_sane_multi_rowcount = True 687 | supports_native_decimal = False 688 | supports_native_boolean = False 689 | supports_statement_cache = False 690 | preexecute_sequences = False 691 | supports_alter = True 692 | supports_sequences = True 693 | sequences_optional = True 694 | 695 | requires_name_normalize = True 696 | 697 | supports_default_values = False 698 | supports_empty_insert = False 699 | 700 | two_phase_transactions = False 701 | savepoints = True 702 | 703 | statement_compiler = DB2Compiler 704 | ddl_compiler = DB2DDLCompiler 705 | type_compiler = DB2TypeCompiler 706 | preparer = DB2IdentifierPreparer 707 | execution_ctx_cls = DB2ExecutionContext 708 | 709 | _reflector_cls = ibm_reflection.DB2Reflector 710 | serverType = '' 711 | 712 | def __init__(self, **kw): 713 | super(DB2Dialect, self).__init__(**kw) 714 | self._reflector = self._reflector_cls(self) 715 | self.dbms_ver = None 716 | self.dbms_name = None 717 | 718 | # reflection: these all defer to an BaseDB2Reflector 719 | # object which selects between DB2 and AS/400 schemas 720 | def initialize(self, connection): 721 | self.dbms_ver = getattr(connection.connection, 'dbms_ver', None) 722 | self.dbms_name = getattr(connection.connection, 'dbms_name', None) 723 | DB2Dialect.serverType = self.dbms_name 724 | super(DB2Dialect, self).initialize(connection) 725 | # check server type logic here 726 | _reflector_cls = ibm_reflection.DB2Reflector 727 | if self.dbms_name == 'AS': 728 | _reflector_cls = ibm_reflection.AS400Reflector 729 | elif self.dbms_name == "DB2": 730 | _reflector_cls = ibm_reflection.OS390Reflector 731 | elif(self.dbms_name is None): 732 | _reflector_cls = ibm_reflection.DB2Reflector 733 | elif "DB2/" in self.dbms_name: 734 | _reflector_cls = ibm_reflection.DB2Reflector 735 | elif "IDS/" in self.dbms_name: 736 | _reflector_cls = ibm_reflection.DB2Reflector 737 | elif self.dbms_name.startswith("DSN"): 738 | _reflector_cls = ibm_reflection.OS390Reflector 739 | self._reflector = _reflector_cls(self) 740 | 741 | def get_columns(self, connection, table_name, schema=None, **kw): 742 | return self._reflector.get_columns(connection, table_name, schema=schema, **kw) 743 | 744 | def get_pk_constraint(self, connection, table_name, schema=None, **kw): 745 | return self._reflector.get_pk_constraint(connection, table_name, schema=schema, **kw) 746 | 747 | def get_foreign_keys(self, connection, table_name, schema=None, **kw): 748 | return self._reflector.get_foreign_keys(connection, table_name, schema=schema, **kw) 749 | 750 | def get_table_names(self, connection, schema=None, **kw): 751 | return self._reflector.get_table_names(connection, schema=schema, **kw) 752 | 753 | def get_view_names(self, connection, schema=None, **kw): 754 | return self._reflector.get_view_names(connection, schema=schema, **kw) 755 | 756 | def get_sequence_names(self, connection, schema=None, **kw): 757 | return self._reflector.get_sequence_names(connection, schema=schema, **kw) 758 | 759 | def get_view_definition(self, connection, view_name, schema=None, **kw): 760 | return self._reflector.get_view_definition(connection, view_name, schema=schema, **kw) 761 | 762 | def get_indexes(self, connection, table_name, schema=None, **kw): 763 | return self._reflector.get_indexes(connection, table_name, schema=schema, **kw) 764 | 765 | def get_unique_constraints(self, connection, table_name, schema=None, **kw): 766 | return self._reflector.get_unique_constraints(connection, table_name, schema=schema, **kw) 767 | 768 | def get_table_comment(self, connection, table_name, schema=None, **kw): 769 | return self._reflector.get_table_comment(connection, table_name, schema=schema, **kw) 770 | 771 | def normalize_name(self, name): 772 | return self._reflector.normalize_name(name) 773 | 774 | def denormalize_name(self, name): 775 | return self._reflector.denormalize_name(name) 776 | 777 | def has_table(self, connection, table_name, schema=None, **kw): 778 | return self._reflector.has_table(connection, table_name, schema=schema, **kw) 779 | 780 | def has_sequence(self, connection, sequence_name, schema=None, **kw): 781 | return self._reflector.has_sequence(connection, sequence_name, schema=schema, **kw) 782 | 783 | def get_schema_names(self, connection, **kw): 784 | return self._reflector.get_schema_names(connection, **kw) 785 | 786 | def get_primary_keys(self, connection, table_name, schema=None, **kw): 787 | return self._reflector.get_primary_keys( 788 | connection, table_name, schema=schema, **kw) 789 | 790 | def get_incoming_foreign_keys(self, connection, table_name, schema=None, **kw): 791 | return self._reflector.get_incoming_foreign_keys( 792 | connection, table_name, schema=schema, **kw) 793 | 794 | 795 | # legacy naming 796 | IBM_DBCompiler = DB2Compiler 797 | IBM_DBDDLCompiler = DB2DDLCompiler 798 | IBM_DBIdentifierPreparer = DB2IdentifierPreparer 799 | IBM_DBExecutionContext = DB2ExecutionContext 800 | IBM_DBDialect = DB2Dialect 801 | 802 | dialect = DB2Dialect 803 | -------------------------------------------------------------------------------- /ibm_db_sa/ibm_db.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2016. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Authors: Alex Pitigoi, Abhigyan Agrawal, Rahul Priyadarshi,Abhinav Radke | 17 | # | Contributors: Jaimy Azle, Mike Bayer,Hemlata Bhatt | 18 | # +--------------------------------------------------------------------------+ 19 | 20 | from sqlalchemy import __version__ as SA_Version 21 | SA_Version = [int(ver_token) for ver_token in SA_Version.split('.')[0:2]] 22 | 23 | from .base import DB2ExecutionContext, DB2Dialect 24 | 25 | if SA_Version < [2,0]: 26 | from sqlalchemy import processors, types as sa_types, util 27 | else: 28 | from sqlalchemy import types as sa_types, util 29 | from sqlalchemy.engine import processors 30 | 31 | from sqlalchemy.exc import ArgumentError 32 | 33 | SQL_TXN_READ_UNCOMMITTED = 1 34 | SQL_TXN_READ_COMMITTED = 2 35 | SQL_TXN_REPEATABLE_READ = 4 36 | SQL_TXN_SERIALIZABLE = 8 37 | SQL_ATTR_TXN_ISOLATION = 108 38 | 39 | if SA_Version < [0, 8]: 40 | from sqlalchemy.engine import base 41 | else: 42 | from sqlalchemy.engine import result as _result 43 | 44 | 45 | class _IBM_Numeric_ibm_db(sa_types.Numeric): 46 | def result_processor(self, dialect, coltype): 47 | def to_float(value): 48 | if value is None: 49 | return None 50 | else: 51 | return float(value) 52 | if self.asdecimal: 53 | return None 54 | else: 55 | return to_float 56 | 57 | 58 | class DB2ExecutionContext_ibm_db(DB2ExecutionContext): 59 | _callproc_result = None 60 | _out_parameters = None 61 | 62 | def get_lastrowid(self): 63 | return self.cursor.last_identity_val 64 | 65 | def pre_exec(self): 66 | # check for the compiled_parameters attribute in self 67 | if (hasattr(self, "compiled_parameters")): 68 | # if a single execute, check for outparams 69 | if len(self.compiled_parameters) == 1: 70 | for bindparam in self.compiled.binds.values(): 71 | if bindparam.isoutparam: 72 | self._out_parameters = True 73 | break 74 | else: 75 | pass 76 | 77 | def get_result_proxy(self): 78 | if self._callproc_result and self._out_parameters: 79 | if SA_Version < [0, 8]: 80 | result = base.ResultProxy(self) 81 | else: 82 | result = _result.ResultProxy(self) 83 | result.out_parameters = {} 84 | 85 | for bindparam in self.compiled.binds.values(): 86 | if bindparam.isoutparam: 87 | name = self.compiled.bind_names[bindparam] 88 | result.out_parameters[name] = self._callproc_result[self.compiled.positiontup.index(name)] 89 | 90 | return result 91 | else: 92 | if SA_Version < [0, 8]: 93 | result = base.ResultProxy(self) 94 | else: 95 | result = _result.ResultProxy(self) 96 | return result 97 | 98 | 99 | class DB2Dialect_ibm_db(DB2Dialect): 100 | driver = 'ibm_db_sa' 101 | supports_unicode_statements = True 102 | supports_statement_cache = False 103 | supports_sane_rowcount = True 104 | supports_sane_multi_rowcount = False 105 | supports_native_decimal = False 106 | supports_char_length = True 107 | supports_default_values = False 108 | supports_multivalues_insert = True 109 | execution_ctx_cls = DB2ExecutionContext_ibm_db 110 | 111 | colspecs = util.update_copy( 112 | DB2Dialect.colspecs, 113 | { 114 | sa_types.Numeric: _IBM_Numeric_ibm_db 115 | } 116 | ) 117 | 118 | if SA_Version < [2, 0]: 119 | @classmethod 120 | def dbapi(cls): 121 | """ Returns: the underlying DBAPI driver module 122 | """ 123 | import ibm_db_dbi as module 124 | return module 125 | else: 126 | @classmethod 127 | def import_dbapi(cls): 128 | """ Returns: the underlying DBAPI driver module 129 | """ 130 | import ibm_db_dbi as module 131 | return module 132 | 133 | def do_execute(self, cursor, statement, parameters, context=None): 134 | if context and context._out_parameters: 135 | statement = statement.split('(', 1)[0].split()[1] 136 | context._callproc_result = cursor.callproc(statement, parameters) 137 | else: 138 | check_server = getattr(DB2Dialect, 'serverType') 139 | if ("round(" in statement.casefold()) and check_server == "DB2": 140 | value_index = 0 141 | while '?' in statement and value_index < len(parameters): 142 | statement = statement.replace('?', str(parameters[value_index]), 1) 143 | value_index += 1 144 | cursor.execute(statement) 145 | else: 146 | cursor.execute(statement, parameters) 147 | 148 | def _get_server_version_info(self, connection): 149 | return connection.connection.server_info() 150 | 151 | _isolation_lookup = set(['READ STABILITY', 'RS', 'UNCOMMITTED READ', 'UR', 152 | 'CURSOR STABILITY', 'CS', 'REPEATABLE READ', 'RR']) 153 | 154 | _isolation_levels_cli = {'RR': SQL_TXN_SERIALIZABLE, 'REPEATABLE READ': SQL_TXN_SERIALIZABLE, 155 | 'UR': SQL_TXN_READ_UNCOMMITTED, 'UNCOMMITTED READ': SQL_TXN_READ_UNCOMMITTED, 156 | 'RS': SQL_TXN_REPEATABLE_READ, 'READ STABILITY': SQL_TXN_REPEATABLE_READ, 157 | 'CS': SQL_TXN_READ_COMMITTED, 'CURSOR STABILITY': SQL_TXN_READ_COMMITTED} 158 | 159 | _isolation_levels_returned = {value: key for key, value in _isolation_levels_cli.items()} 160 | 161 | def _get_cli_isolation_levels(self, level): 162 | return self._isolation_levels_cli[level] 163 | 164 | def set_isolation_level(self, connection, level): 165 | if level is None: 166 | level = 'CS' 167 | else: 168 | if len(level.strip()) < 1: 169 | level = 'CS' 170 | level = level.upper().replace("-", " ").replace("_", " ") 171 | if level not in self._isolation_lookup: 172 | raise ArgumentError( 173 | "Invalid value '%s' for isolation_level. " 174 | "Valid isolation levels for %s are %s" % 175 | (level, self.name, ", ".join(self._isolation_lookup)) 176 | ) 177 | attrib = {SQL_ATTR_TXN_ISOLATION: self._get_cli_isolation_levels(level)} 178 | res = connection.set_option(attrib) 179 | 180 | def get_isolation_level(self, connection): 181 | 182 | attrib = SQL_ATTR_TXN_ISOLATION 183 | res = connection.get_option(attrib) 184 | 185 | val = self._isolation_levels_returned[res] 186 | return val 187 | 188 | def reset_isolation_level(self, connection): 189 | self.set_isolation_level(connection, 'CS') 190 | 191 | def create_connect_args(self, url): 192 | # DSN support through CLI configuration (../cfg/db2cli.ini), 193 | # while 2 connection attributes are mandatory: database alias 194 | # and UID (in support to current schema), all the other 195 | # connection attributes (protocol, hostname, servicename) are 196 | # provided through db2cli.ini database catalog entry. Example 197 | # 1: ibm_db_sa:///?UID=db2inst1 or Example 2: 198 | # ibm_db_sa:///?DSN=;UID=db2inst1 199 | if not url.host: 200 | dsn = url.database 201 | uid = url.username 202 | pwd = url.password 203 | return (dsn, uid, pwd, '', ''), {} 204 | else: 205 | # Full URL string support for connection to remote data servers 206 | dsn_param = ['DATABASE=%s' % url.database, 207 | 'HOSTNAME=%s' % url.host, 208 | 'PROTOCOL=TCPIP'] 209 | if url.port: 210 | dsn_param.append('PORT=%s' % url.port) 211 | if url.username: 212 | dsn_param.append('UID=%s' % url.username) 213 | if url.password: 214 | if ';' in url.password: 215 | url.password = (url.password).partition(";")[0] 216 | dsn_param.append('PWD=%s' % url.password) 217 | 218 | # check for connection arguments 219 | connection_keys = ['Security', 'SSLClientKeystoredb', 'SSLClientKeystash', 'SSLServerCertificate', 220 | 'CurrentSchema'] 221 | query_keys = url.query.keys() 222 | for key in connection_keys: 223 | for query_key in query_keys: 224 | if query_key.lower() == key.lower(): 225 | dsn_param.append( 226 | '%(connection_key)s=%(value)s' % {'connection_key': key, 'value': url.query[query_key]}) 227 | url = url.difference_update_query([query_key]) 228 | break 229 | 230 | dsn = ';'.join(dsn_param) 231 | dsn += ';' 232 | return (dsn, url.username, '', '', ''), {} 233 | 234 | # Retrieves current schema for the specified connection object 235 | def _get_default_schema_name(self, connection): 236 | return self.normalize_name(connection.connection.get_current_schema()) 237 | 238 | # Checks if the DB_API driver error indicates an invalid connection 239 | def is_disconnect(self, ex, connection, cursor): 240 | if isinstance(ex, (self.dbapi.ProgrammingError, 241 | self.dbapi.OperationalError)): 242 | connection_errors = ('Connection is not active', 'connection is no longer active', 243 | 'Connection Resource cannot be found', 'SQL30081N', 244 | 'CLI0108E', 'CLI0106E', 'SQL1224N') 245 | for err_msg in connection_errors: 246 | if err_msg in str(ex): 247 | return True 248 | else: 249 | return False 250 | 251 | 252 | dialect = DB2Dialect_ibm_db 253 | -------------------------------------------------------------------------------- /ibm_db_sa/ibm_db_as400.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import __version__ as SA_Version 2 | SA_Version = [int(ver_token) for ver_token in SA_Version.split('.')[0:2]] 3 | from .base import DB2ExecutionContext, DB2Dialect 4 | if SA_Version < [2,0]: 5 | from sqlalchemy import processors, types as sa_types, util 6 | else: 7 | from sqlalchemy import types as sa_types, util 8 | from sqlalchemy.engine import processors 9 | from sqlalchemy.exc import ArgumentError 10 | from ibm_db_sa.ibm_db import DB2Dialect_ibm_db 11 | from ibm_db_sa.reflection import AS400Reflector 12 | 13 | class AS400Dialect(DB2Dialect_ibm_db): 14 | _reflector_cls = AS400Reflector 15 | -------------------------------------------------------------------------------- /ibm_db_sa/pyodbc.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2016. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Authors: Jaimy Azle, Rahul Priyadarshi | 17 | # | Contributors: Mike Bayer | 18 | # +--------------------------------------------------------------------------+ 19 | from sqlalchemy import util 20 | import urllib 21 | from sqlalchemy.connectors.pyodbc import PyODBCConnector 22 | from .base import _SelectLastRowIDMixin, DB2ExecutionContext, DB2Dialect 23 | from . import reflection as ibm_reflection 24 | 25 | class DB2ExecutionContext_pyodbc(DB2ExecutionContext): 26 | pass 27 | 28 | class DB2Dialect_pyodbc(PyODBCConnector, DB2Dialect): 29 | 30 | supports_unicode_statements = True 31 | supports_char_length = True 32 | supports_native_decimal = False 33 | supports_statement_cache = False 34 | 35 | execution_ctx_cls = DB2ExecutionContext_pyodbc 36 | 37 | pyodbc_driver_name = "IBM DB2 ODBC DRIVER" 38 | 39 | def create_connect_args(self, url): 40 | opts = url.translate_connect_args(username='user') 41 | opts.update(url.query) 42 | 43 | keys = opts 44 | query = url.query 45 | 46 | connect_args = {} 47 | for param in ('ansi', 'unicode_results', 'autocommit'): 48 | if param in keys: 49 | connect_args[param] = util.asbool(keys.pop(param)) 50 | 51 | if 'odbc_connect' in keys: 52 | connectors = [urllib.parse.unquote_plus(keys.pop('odbc_connect'))] 53 | else: 54 | dsn_connection = 'dsn' in keys or \ 55 | ('host' in keys and 'database' not in keys) 56 | if dsn_connection: 57 | connectors = ['dsn=%s' % (keys.pop('host', '') or \ 58 | keys.pop('dsn', ''))] 59 | else: 60 | port = '' 61 | if 'port' in keys and not 'port' in query: 62 | port = '%d' % int(keys.pop('port')) 63 | 64 | database = keys.pop('database', '') 65 | 66 | connectors = ["DRIVER={%s}" % 67 | keys.pop('driver', self.pyodbc_driver_name), 68 | 'hostname=%s;port=%s' % (keys.pop('host', ''), port), 69 | 'database=%s' % database] 70 | 71 | user = keys.pop("user", None) 72 | if user: 73 | connectors.append("uid=%s" % user) 74 | connectors.append("pwd=%s" % keys.pop('password', '')) 75 | else: 76 | connectors.append("trusted_connection=yes") 77 | 78 | # if set to 'yes', the odbc layer will try to automagically 79 | # convert textual data from your database encoding to your 80 | # client encoding. this should obviously be set to 'no' if 81 | # you query a cp1253 encoded database from a latin1 client... 82 | if 'odbc_autotranslate' in keys: 83 | connectors.append("autotranslate=%s" % 84 | keys.pop("odbc_autotranslate")) 85 | 86 | connectors.extend(['%s=%s' % (k, v) 87 | for k, v in keys.items()]) 88 | return [[";".join(connectors)], connect_args] 89 | 90 | class AS400Dialect_pyodbc(PyODBCConnector, DB2Dialect): 91 | 92 | supports_unicode_statements = True 93 | supports_sane_rowcount = False 94 | supports_sane_multi_rowcount = False 95 | supports_native_decimal = True 96 | supports_char_length = True 97 | supports_native_decimal = False 98 | 99 | # pyodbc_driver_name = "iSeries Access ODBC Driver" 100 | pyodbc_driver_name ="IBM i Access ODBC Driver" 101 | _reflector_cls = ibm_reflection.AS400Reflector 102 | 103 | def create_connect_args(self, url): 104 | opts = url.translate_connect_args(username='user') 105 | opts.update(url.query) 106 | 107 | keys = opts 108 | query = url.query 109 | 110 | connect_args = {} 111 | for param in ('ansi', 'unicode_results', 'autocommit'): 112 | if param in keys: 113 | connect_args[param] = util.asbool(keys.pop(param)) 114 | 115 | if 'odbc_connect' in keys: 116 | connectors = [urllib.parse.unquote_plus(keys.pop('odbc_connect'))] 117 | else: 118 | dsn_connection = 'dsn' in keys or \ 119 | ('host' in keys and 'database' not in keys) 120 | if dsn_connection: 121 | connectors = ['dsn=%s' % (keys.pop('host', '') or \ 122 | keys.pop('dsn', ''))] 123 | else: 124 | connectors = ["DRIVER={%s}" % keys.pop('driver', self.pyodbc_driver_name), 125 | 'System=%s' % keys.pop('host', ''), 126 | 'DBQ=QGPL'] 127 | connectors.append("PKG=QGPL/DEFAULT(IBM),2,0,1,0,512") 128 | db_name = keys.pop('database', '') 129 | if db_name: 130 | connectors.append("DATABASE=%s" % db_name) 131 | 132 | user = keys.pop("user", None) 133 | if user: 134 | connectors.append("UID=%s" % user) 135 | connectors.append("PWD=%s" % keys.pop('password', '')) 136 | else: 137 | connectors.append("trusted_connection=yes") 138 | 139 | # if set to 'Yes', the ODBC layer will try to automagically convert 140 | # textual data from your database encoding to your client encoding 141 | # This should obviously be set to 'No' if you query a cp1253 encoded 142 | # database from a latin1 client... 143 | if 'odbc_autotranslate' in keys: 144 | connectors.append("AutoTranslate=%s" % keys.pop("odbc_autotranslate")) 145 | 146 | connectors.extend(['%s=%s' % (k,v) for k,v in keys.items()]) 147 | return [[";".join (connectors)], connect_args] 148 | 149 | 150 | -------------------------------------------------------------------------------- /ibm_db_sa/reflection.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2019. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Authors: Alex Pitigoi, Abhigyan Agrawal, Rahul Priyadarshi,Abhinav Radke | 17 | # | Contributors: Jaimy Azle, Mike Bayer,Hemlata Bhatt | 18 | # +--------------------------------------------------------------------------+ 19 | import sys 20 | from sqlalchemy import types as sa_types 21 | from sqlalchemy import sql, util, join 22 | from sqlalchemy import Table, MetaData, Column 23 | from sqlalchemy.engine import reflection 24 | from sqlalchemy import * 25 | import re 26 | import codecs 27 | from sys import version_info 28 | 29 | 30 | class CoerceUnicode(sa_types.TypeDecorator): 31 | impl = sa_types.Unicode 32 | 33 | def process_bind_param(self, value, dialect): 34 | if isinstance(value, str): 35 | value = value 36 | return value 37 | 38 | 39 | class BaseReflector(object): 40 | def __init__(self, dialect): 41 | self.dialect = dialect 42 | self.ischema_names = dialect.ischema_names 43 | self.identifier_preparer = dialect.identifier_preparer 44 | 45 | def normalize_name(self, name): 46 | if isinstance(name, str): 47 | name = name 48 | if name is not None: 49 | return name.lower() if name.upper() == name and \ 50 | not self.identifier_preparer._requires_quotes(name.lower()) \ 51 | else name 52 | return name 53 | 54 | def denormalize_name(self, name): 55 | if name is None: 56 | return None 57 | elif name.lower() == name and \ 58 | not self.identifier_preparer._requires_quotes(name.lower()): 59 | name = name.upper() 60 | if not self.dialect.supports_unicode_binds: 61 | if isinstance(name, str): 62 | name = name 63 | else: 64 | name = codecs.decode(name) 65 | else: 66 | if version_info[0] < 3: 67 | name = unicode(name) 68 | else: 69 | name = str(name) 70 | return name 71 | 72 | def _get_default_schema_name(self, connection): 73 | """Return: current setting of the schema attribute""" 74 | default_schema_name = connection.execute( 75 | u'SELECT CURRENT_SCHEMA FROM SYSIBM.SYSDUMMY1').scalar() 76 | if isinstance(default_schema_name, str): 77 | default_schema_name = default_schema_name.strip() 78 | elif version_info[0] < 3: 79 | if isinstance(default_schema_name, unicode): 80 | default_schema_name = default_schema_name.strip().__str__() 81 | else: 82 | if isinstance(default_schema_name, str): 83 | default_schema_name = default_schema_name.strip().__str__() 84 | return self.normalize_name(default_schema_name) 85 | 86 | @property 87 | def default_schema_name(self): 88 | return self.dialect.default_schema_name 89 | 90 | 91 | class DB2Reflector(BaseReflector): 92 | ischema = MetaData() 93 | 94 | sys_schemas = Table("SCHEMATA", ischema, 95 | Column("SCHEMANAME", CoerceUnicode, key="schemaname"), 96 | Column("OWNER", CoerceUnicode, key="owner"), 97 | Column("OWNERTYPE", CoerceUnicode, key="ownertype"), 98 | Column("DEFINER", CoerceUnicode, key="definer"), 99 | Column("DEFINERTYPE", CoerceUnicode, key="definertype"), 100 | Column("REMARK", CoerceUnicode, key="remark"), 101 | schema="SYSCAT") 102 | 103 | sys_tables = Table("TABLES", ischema, 104 | Column("TABSCHEMA", CoerceUnicode, key="tabschema"), 105 | Column("TABNAME", CoerceUnicode, key="tabname"), 106 | Column("OWNER", CoerceUnicode, key="owner"), 107 | Column("OWNERTYPE", CoerceUnicode, key="ownertype"), 108 | Column("TYPE", CoerceUnicode, key="type"), 109 | Column("STATUS", CoerceUnicode, key="status"), 110 | Column("REMARKS", CoerceUnicode, key="remarks"), 111 | schema="SYSCAT") 112 | 113 | sys_indexes = Table("INDEXES", ischema, 114 | Column("TABSCHEMA", CoerceUnicode, key="tabschema"), 115 | Column("TABNAME", CoerceUnicode, key="tabname"), 116 | Column("INDNAME", CoerceUnicode, key="indname"), 117 | Column("COLNAMES", CoerceUnicode, key="colnames"), 118 | Column("UNIQUERULE", CoerceUnicode, key="uniquerule"), 119 | Column("SYSTEM_REQUIRED", sa_types.SMALLINT, key="system_required"), 120 | schema="SYSCAT") 121 | 122 | sys_tabconst = Table("TABCONST", ischema, 123 | Column("TABSCHEMA", CoerceUnicode, key="tabschema"), 124 | Column("TABNAME", CoerceUnicode, key="tabname"), 125 | Column("CONSTNAME", CoerceUnicode, key="constname"), 126 | Column("TYPE", CoerceUnicode, key="type"), 127 | schema="SYSCAT") 128 | 129 | sys_keycoluse = Table("KEYCOLUSE", ischema, 130 | Column("TABSCHEMA", CoerceUnicode, key="tabschema"), 131 | Column("TABNAME", CoerceUnicode, key="tabname"), 132 | Column("CONSTNAME", CoerceUnicode, key="constname"), 133 | Column("COLNAME", CoerceUnicode, key="colname"), 134 | schema="SYSCAT") 135 | 136 | sys_foreignkeys = Table("SQLFOREIGNKEYS", ischema, 137 | Column("FK_NAME", CoerceUnicode, key="fkname"), 138 | Column("FKTABLE_SCHEM", CoerceUnicode, key="fktabschema"), 139 | Column("FKTABLE_NAME", CoerceUnicode, key="fktabname"), 140 | Column("FKCOLUMN_NAME", CoerceUnicode, key="fkcolname"), 141 | Column("PK_NAME", CoerceUnicode, key="pkname"), 142 | Column("PKTABLE_SCHEM", CoerceUnicode, key="pktabschema"), 143 | Column("PKTABLE_NAME", CoerceUnicode, key="pktabname"), 144 | Column("PKCOLUMN_NAME", CoerceUnicode, key="pkcolname"), 145 | Column("KEY_SEQ", sa_types.Integer, key="colno"), 146 | schema="SYSIBM") 147 | 148 | sys_columns = Table("COLUMNS", ischema, 149 | Column("TABSCHEMA", CoerceUnicode, key="tabschema"), 150 | Column("TABNAME", CoerceUnicode, key="tabname"), 151 | Column("COLNAME", CoerceUnicode, key="colname"), 152 | Column("COLNO", sa_types.Integer, key="colno"), 153 | Column("TYPENAME", CoerceUnicode, key="typename"), 154 | Column("LENGTH", sa_types.Integer, key="length"), 155 | Column("SCALE", sa_types.Integer, key="scale"), 156 | Column("DEFAULT", CoerceUnicode, key="defaultval"), 157 | Column("NULLS", CoerceUnicode, key="nullable"), 158 | Column("KEYSEQ", CoerceUnicode, key="keyseq"), 159 | Column("PARTKEYSEQ", CoerceUnicode, key="partkeyseq"), 160 | Column("IDENTITY", CoerceUnicode, key="identity"), 161 | Column("GENERATED", CoerceUnicode, key="generated"), 162 | Column("REMARKS", CoerceUnicode, key="remarks"), 163 | schema="SYSCAT") 164 | 165 | sys_views = Table("VIEWS", ischema, 166 | Column("VIEWSCHEMA", CoerceUnicode, key="viewschema"), 167 | Column("VIEWNAME", CoerceUnicode, key="viewname"), 168 | Column("TEXT", CoerceUnicode, key="text"), 169 | schema="SYSCAT") 170 | 171 | sys_sequences = Table("SEQUENCES", ischema, 172 | Column("SEQSCHEMA", CoerceUnicode, key="seqschema"), 173 | Column("SEQNAME", CoerceUnicode, key="seqname"), 174 | schema="SYSCAT") 175 | 176 | def has_table(self, connection, table_name, schema=None, **kw): 177 | current_schema = self.denormalize_name( 178 | schema or self.default_schema_name) 179 | if table_name.startswith("'") and table_name.endswith("'"): 180 | table_name = table_name.replace("'", "") 181 | table_name = self.normalize_name(table_name) 182 | else: 183 | table_name = self.denormalize_name(table_name) 184 | if current_schema: 185 | whereclause = sql.and_(self.sys_tables.c.tabschema == current_schema, 186 | self.sys_tables.c.tabname == table_name) 187 | else: 188 | whereclause = self.sys_tables.c.tabname == table_name 189 | s = sql.select(self.sys_tables.c.tabname).where(whereclause) 190 | c = connection.execute(s) 191 | return c.first() is not None 192 | 193 | def has_sequence(self, connection, sequence_name, schema=None): 194 | current_schema = self.denormalize_name(schema or self.default_schema_name) 195 | sequence_name = self.denormalize_name(sequence_name) 196 | if current_schema: 197 | whereclause = sql.and_(self.sys_sequences.c.seqschema == current_schema, 198 | self.sys_sequences.c.seqname == sequence_name) 199 | else: 200 | whereclause = self.sys_sequences.c.seqname == sequence_name 201 | s = sql.select(self.sys_sequences.c.seqname).where(whereclause) 202 | c = connection.execute(s) 203 | return c.first() is not None 204 | 205 | @reflection.cache 206 | def get_sequence_names(self, connection, schema=None, **kw): 207 | current_schema = self.denormalize_name(schema or self.default_schema_name) 208 | sys_sequence = self.sys_sequences 209 | query = sql.select(sys_sequence.c.seqname).\ 210 | where(sys_sequence.c.seqschema == current_schema).\ 211 | order_by(sys_sequence.c.seqschema, sys_sequence.c.seqname) 212 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 213 | 214 | @reflection.cache 215 | def get_schema_names(self, connection, **kw): 216 | sysschema = self.sys_schemas 217 | query = sql.select(sysschema.c.schemaname).\ 218 | where(not_(sysschema.c.schemaname.like('SYS%'))).\ 219 | order_by(sysschema.c.schemaname) 220 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 221 | 222 | @reflection.cache 223 | def get_table_names(self, connection, schema=None, **kw): 224 | current_schema = self.denormalize_name(schema or self.default_schema_name) 225 | systbl = self.sys_tables 226 | query = sql.select(systbl.c.tabname).\ 227 | where(systbl.c.type == 'T').\ 228 | where(systbl.c.tabschema == current_schema).\ 229 | order_by(systbl.c.tabname) 230 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 231 | 232 | @reflection.cache 233 | def get_table_comment(self, connection, table_name, schema=None, **kw): 234 | current_schema = self.denormalize_name(schema or self.default_schema_name) 235 | table_name = self.denormalize_name(table_name) 236 | systbl = self.sys_tables 237 | query = sql.select(systbl.c.remarks).\ 238 | where(systbl.c.type == 'T').\ 239 | where(systbl.c.tabschema == current_schema).\ 240 | where(systbl.c.tabname == table_name) 241 | return {'text': connection.execute(query).scalar()} 242 | 243 | @reflection.cache 244 | def get_view_names(self, connection, schema=None, **kw): 245 | current_schema = self.denormalize_name(schema or self.default_schema_name) 246 | 247 | query = sql.select(self.sys_views.c.viewname).\ 248 | where(self.sys_views.c.viewschema == current_schema).\ 249 | order_by(self.sys_views.c.viewname) 250 | 251 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 252 | 253 | @reflection.cache 254 | def get_view_definition(self, connection, viewname, schema=None, **kw): 255 | current_schema = self.denormalize_name(schema or self.default_schema_name) 256 | viewname = self.denormalize_name(viewname) 257 | 258 | query = sql.select(self.sys_views.c.text).\ 259 | where(self.sys_views.c.viewschema == current_schema).\ 260 | where(self.sys_views.c.viewname == viewname) 261 | 262 | return connection.execute(query).scalar() 263 | 264 | @reflection.cache 265 | def get_columns(self, connection, table_name, schema=None, **kw): 266 | current_schema = self.denormalize_name(schema or self.default_schema_name) 267 | table_name = self.denormalize_name(table_name) 268 | syscols = self.sys_columns 269 | 270 | query = sql.select(syscols.c.colname, syscols.c.typename, 271 | syscols.c.defaultval, syscols.c.nullable, 272 | syscols.c.length, syscols.c.scale, 273 | syscols.c.identity, syscols.c.generated, 274 | syscols.c.remarks).\ 275 | where(and_( 276 | syscols.c.tabschema == current_schema, 277 | syscols.c.tabname == table_name)).\ 278 | order_by(syscols.c.colno) 279 | sa_columns = [] 280 | for r in connection.execute(query): 281 | coltype = r[1].upper() 282 | if coltype in ['DECIMAL', 'NUMERIC']: 283 | coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) 284 | elif coltype in ['CHARACTER', 'CHAR', 'VARCHAR', 285 | 'GRAPHIC', 'VARGRAPHIC']: 286 | coltype = self.ischema_names.get(coltype)(int(r[4])) 287 | else: 288 | try: 289 | coltype = self.ischema_names[coltype] 290 | except KeyError: 291 | util.warn("Did not recognize type '%s' of column '%s'" % 292 | (coltype, r[0])) 293 | coltype = coltype = sa_types.NULLTYPE 294 | 295 | sa_columns.append({ 296 | 'name': self.normalize_name(r[0]), 297 | 'type': coltype, 298 | 'nullable': r[3] == 'Y', 299 | 'default': r[2] or None, 300 | 'autoincrement': (r[6] == 'Y') and (r[7] != ' '), 301 | 'comment': r[8] or None, 302 | }) 303 | return sa_columns 304 | 305 | @reflection.cache 306 | def get_pk_constraint(self, connection, table_name, schema=None, **kw): 307 | current_schema = self.denormalize_name(schema or self.default_schema_name) 308 | table_name = self.denormalize_name(table_name) 309 | sysindexes = self.sys_indexes 310 | col_finder = re.compile(r"(\w+)") 311 | query = sql.select(sysindexes.c.colnames, sysindexes.c.indname).\ 312 | where(and_(sysindexes.c.tabschema == current_schema, 313 | sysindexes.c.tabname == table_name, 314 | sysindexes.c.uniquerule == 'P')).\ 315 | order_by(sysindexes.c.tabschema, sysindexes.c.tabname) 316 | pk_columns = [] 317 | pk_name = None 318 | for r in connection.execute(query): 319 | cols = col_finder.findall(r[0]) 320 | pk_columns.extend(cols) 321 | if not pk_name: 322 | pk_name = self.normalize_name(r[1]) 323 | 324 | return {"constrained_columns": [self.normalize_name(col) for col in pk_columns], 325 | "name": pk_name} 326 | 327 | @reflection.cache 328 | def get_primary_keys(self, connection, table_name, schema=None, **kw): 329 | current_schema = self.denormalize_name(schema or self.default_schema_name) 330 | table_name = self.denormalize_name(table_name) 331 | syscols = self.sys_columns 332 | col_finder = re.compile(r"(\w+)") 333 | query = sql.select(syscols.c.colname).\ 334 | where(and_( 335 | syscols.c.tabschema == current_schema, 336 | syscols.c.tabname == table_name, 337 | syscols.c.keyseq > 0 338 | )).\ 339 | order_by(syscols.c.tabschema, syscols.c.tabname) 340 | pk_columns = [] 341 | for r in connection.execute(query): 342 | cols = col_finder.findall(r[0]) 343 | pk_columns.extend(cols) 344 | return [self.normalize_name(col) for col in pk_columns] 345 | 346 | @reflection.cache 347 | def get_foreign_keys(self, connection, table_name, schema=None, **kw): 348 | default_schema = self.default_schema_name 349 | current_schema = self.denormalize_name(schema or default_schema) 350 | default_schema = self.normalize_name(default_schema) 351 | table_name = self.denormalize_name(table_name) 352 | sysfkeys = self.sys_foreignkeys 353 | systbl = self.sys_tables 354 | query = sql.select(sysfkeys.c.fkname, sysfkeys.c.fktabschema, 355 | sysfkeys.c.fktabname, sysfkeys.c.fkcolname, 356 | sysfkeys.c.pkname, sysfkeys.c.pktabschema, 357 | sysfkeys.c.pktabname, sysfkeys.c.pkcolname).\ 358 | select_from( 359 | join(systbl, 360 | sysfkeys, 361 | sql.and_( 362 | systbl.c.tabname == sysfkeys.c.pktabname, 363 | systbl.c.tabschema == sysfkeys.c.pktabschema 364 | ) 365 | ) 366 | ).where(systbl.c.type == 'T').\ 367 | where(systbl.c.tabschema == current_schema).\ 368 | where(sysfkeys.c.fktabname == table_name).\ 369 | order_by(systbl.c.tabname) 370 | 371 | fschema = {} 372 | for r in connection.execute(query): 373 | if not (r[0]) in fschema: 374 | referred_schema = self.normalize_name(r[5]) 375 | 376 | # if no schema specified and referred schema here is the 377 | # default, then set to None 378 | if schema is None and \ 379 | referred_schema == default_schema: 380 | referred_schema = None 381 | 382 | fschema[r[0]] = { 383 | 'name': self.normalize_name(r[0]), 384 | 'constrained_columns': [self.normalize_name(r[3])], 385 | 'referred_schema': referred_schema, 386 | 'referred_table': self.normalize_name(r[6]), 387 | 'referred_columns': [self.normalize_name(r[7])]} 388 | else: 389 | fschema[r[0]]['constrained_columns'].append(self.normalize_name(r[3])) 390 | fschema[r[0]]['referred_columns'].append(self.normalize_name(r[7])) 391 | return [value for key, value in fschema.items()] 392 | 393 | @reflection.cache 394 | def get_incoming_foreign_keys(self, connection, table_name, schema=None, **kw): 395 | default_schema = self.default_schema_name 396 | current_schema = self.denormalize_name(schema or default_schema) 397 | default_schema = self.normalize_name(default_schema) 398 | table_name = self.denormalize_name(table_name) 399 | sysfkeys = self.sys_foreignkeys 400 | query = sql.select(sysfkeys.c.fkname, sysfkeys.c.fktabschema, 401 | sysfkeys.c.fktabname, sysfkeys.c.fkcolname, 402 | sysfkeys.c.pkname, sysfkeys.c.pktabschema, 403 | sysfkeys.c.pktabname, sysfkeys.c.pkcolname).\ 404 | where(and_( 405 | sysfkeys.c.pktabschema == current_schema, 406 | sysfkeys.c.pktabname == table_name 407 | )).\ 408 | order_by(sysfkeys.c.colno) 409 | 410 | fschema = {} 411 | for r in connection.execute(query): 412 | if not fschema.has_key(r[0]): 413 | constrained_schema = self.normalize_name(r[1]) 414 | 415 | # if no schema specified and referred schema here is the 416 | # default, then set to None 417 | if schema is None and \ 418 | constrained_schema == default_schema: 419 | constrained_schema = None 420 | 421 | fschema[r[0]] = { 422 | 'name': self.normalize_name(r[0]), 423 | 'constrained_schema': constrained_schema, 424 | 'constrained_table': self.normalize_name(r[2]), 425 | 'constrained_columns': [self.normalize_name(r[3])], 426 | 'referred_schema': schema, 427 | 'referred_table': self.normalize_name(r[6]), 428 | 'referred_columns': [self.normalize_name(r[7])]} 429 | else: 430 | fschema[r[0]]['constrained_columns'].append(self.normalize_name(r[3])) 431 | fschema[r[0]]['referred_columns'].append(self.normalize_name(r[7])) 432 | return [value for key, value in fschema.items()] 433 | 434 | @reflection.cache 435 | def get_indexes(self, connection, table_name, schema=None, **kw): 436 | current_schema = self.denormalize_name(schema or self.default_schema_name) 437 | table_name = self.denormalize_name(table_name) 438 | sysidx = self.sys_indexes 439 | query = sql.select(sysidx.c.indname, sysidx.c.colnames, 440 | sysidx.c.uniquerule, sysidx.c.system_required).\ 441 | where(and_(sysidx.c.tabschema == current_schema,sysidx.c.tabname == table_name)).\ 442 | order_by(sysidx.c.tabname) 443 | indexes = [] 444 | col_finder = re.compile(r"(\w+)") 445 | for r in connection.execute(query): 446 | if r[2] != 'P': 447 | if r[2] == 'U' and r[3] != 0: 448 | continue 449 | if 'sqlnotapplicable' in r[1].lower(): 450 | continue 451 | indexes.append({ 452 | 'name': self.normalize_name(r[0]), 453 | 'column_names': [self.normalize_name(col) 454 | for col in col_finder.findall(r[1])], 455 | 'unique': r[2] == 'U' 456 | }) 457 | return indexes 458 | 459 | @reflection.cache 460 | def get_unique_constraints(self, connection, table_name, schema=None, **kw): 461 | current_schema = self.denormalize_name(schema or self.default_schema_name) 462 | table_name = self.denormalize_name(table_name) 463 | syskeycol = self.sys_keycoluse 464 | sysconst = self.sys_tabconst 465 | query = ( 466 | sql.select(syskeycol.c.constname, syskeycol.c.colname) 467 | .select_from( 468 | join( 469 | syskeycol, 470 | sysconst, 471 | and_( 472 | syskeycol.c.constname == sysconst.c.constname, 473 | syskeycol.c.tabschema == sysconst.c.tabschema, 474 | syskeycol.c.tabname == sysconst.c.tabname, 475 | ), 476 | ) 477 | ) 478 | .where( 479 | and_( 480 | sysconst.c.tabname == table_name, 481 | sysconst.c.tabschema == current_schema, 482 | sysconst.c.type == "U", 483 | ) 484 | ) 485 | .order_by(syskeycol.c.constname) 486 | ) 487 | uniqueConsts = [] 488 | currConst = None 489 | for r in connection.execute(query): 490 | if currConst == r[0]: 491 | uniqueConsts[-1]["column_names"].append(self.normalize_name(r[1])) 492 | else: 493 | currConst = r[0] 494 | uniqueConsts.append( 495 | { 496 | "name": self.normalize_name(currConst), 497 | "column_names": [self.normalize_name(r[1])], 498 | } 499 | ) 500 | return uniqueConsts 501 | 502 | 503 | class AS400Reflector(BaseReflector): 504 | 505 | ischema = MetaData() 506 | 507 | sys_schemas = Table("SQLSCHEMAS", ischema, 508 | Column("TABLE_SCHEM", CoerceUnicode, key="schemaname"), 509 | schema="SYSIBM") 510 | 511 | sys_tables = Table("SYSTABLES", ischema, 512 | Column("TABLE_SCHEMA", CoerceUnicode, key="tabschema"), 513 | Column("TABLE_NAME", CoerceUnicode, key="tabname"), 514 | Column("TABLE_TYPE", CoerceUnicode, key="tabtype"), 515 | schema="QSYS2") 516 | 517 | sys_table_constraints = Table("SYSCST", ischema, 518 | Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="conschema"), 519 | Column("CONSTRAINT_NAME", CoerceUnicode, key="conname"), 520 | Column("CONSTRAINT_TYPE", CoerceUnicode, key="contype"), 521 | Column("TABLE_SCHEMA", CoerceUnicode, key="tabschema"), 522 | Column("TABLE_NAME", CoerceUnicode, key="tabname"), 523 | Column("TABLE_TYPE", CoerceUnicode, key="tabtype"), 524 | schema="QSYS2") 525 | 526 | sys_key_constraints = Table("SYSKEYCST", ischema, 527 | Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="conschema"), 528 | Column("CONSTRAINT_NAME", CoerceUnicode, key="conname"), 529 | Column("TABLE_SCHEMA", CoerceUnicode, key="tabschema"), 530 | Column("TABLE_NAME", CoerceUnicode, key="tabname"), 531 | Column("COLUMN_NAME", CoerceUnicode, key="colname"), 532 | Column("ORDINAL_POSITION", sa_types.Integer, key="colno"), 533 | schema="QSYS2") 534 | 535 | sys_columns = Table("SYSCOLUMNS", ischema, 536 | Column("TABLE_SCHEMA", CoerceUnicode, key="tabschema"), 537 | Column("TABLE_NAME", CoerceUnicode, key="tabname"), 538 | Column("COLUMN_NAME", CoerceUnicode, key="colname"), 539 | Column("ORDINAL_POSITION", sa_types.Integer, key="colno"), 540 | Column("DATA_TYPE", CoerceUnicode, key="typename"), 541 | Column("LENGTH", sa_types.Integer, key="length"), 542 | Column("NUMERIC_SCALE", sa_types.Integer, key="scale"), 543 | Column("IS_NULLABLE", sa_types.Integer, key="nullable"), 544 | Column("COLUMN_DEFAULT", CoerceUnicode, key="defaultval"), 545 | Column("HAS_DEFAULT", CoerceUnicode, key="hasdef"), 546 | Column("IS_IDENTITY", CoerceUnicode, key="isid"), 547 | Column("IDENTITY_GENERATION", CoerceUnicode, key="idgenerate"), 548 | Column("LONG_COMMENT", CoerceUnicode, key="remark"), 549 | schema="QSYS2") 550 | 551 | sys_indexes = Table("SYSINDEXES", ischema, 552 | Column("TABLE_SCHEMA", CoerceUnicode, key="tabschema"), 553 | Column("TABLE_NAME", CoerceUnicode, key="tabname"), 554 | Column("INDEX_SCHEMA", CoerceUnicode, key="indschema"), 555 | Column("INDEX_NAME", CoerceUnicode, key="indname"), 556 | Column("IS_UNIQUE", CoerceUnicode, key="uniquerule"), 557 | schema="QSYS2") 558 | 559 | sys_keys = Table("SYSKEYS", ischema, 560 | Column("INDEX_SCHEMA", CoerceUnicode, key="indschema"), 561 | Column("INDEX_NAME", CoerceUnicode, key="indname"), 562 | Column("COLUMN_NAME", CoerceUnicode, key="colname"), 563 | Column("ORDINAL_POSITION", sa_types.Integer, key="colno"), 564 | Column("ORDERING", CoerceUnicode, key="ordering"), 565 | schema="QSYS2") 566 | 567 | sys_foreignkeys = Table("SQLFOREIGNKEYS", ischema, 568 | Column("FK_NAME", CoerceUnicode, key="fkname"), 569 | Column("FKTABLE_SCHEM", CoerceUnicode, key="fktabschema"), 570 | Column("FKTABLE_NAME", CoerceUnicode, key="fktabname"), 571 | Column("FKCOLUMN_NAME", CoerceUnicode, key="fkcolname"), 572 | Column("PK_NAME", CoerceUnicode, key="pkname"), 573 | Column("PKTABLE_SCHEM", CoerceUnicode, key="pktabschema"), 574 | Column("PKTABLE_NAME", CoerceUnicode, key="pktabname"), 575 | Column("PKCOLUMN_NAME", CoerceUnicode, key="pkcolname"), 576 | Column("KEY_SEQ", sa_types.Integer, key="colno"), 577 | schema="SYSIBM") 578 | 579 | sys_views = Table("SYSVIEWS", ischema, 580 | Column("TABLE_SCHEMA", CoerceUnicode, key="viewschema"), 581 | Column("TABLE_NAME", CoerceUnicode, key="viewname"), 582 | Column("VIEW_DEFINITION", CoerceUnicode, key="text"), 583 | schema="QSYS2") 584 | 585 | sys_sequences = Table("SYSSEQUENCES", ischema, 586 | Column("SEQUENCE_SCHEMA", CoerceUnicode, key="seqschema"), 587 | Column("SEQUENCE_NAME", CoerceUnicode, key="seqname"), 588 | schema="QSYS2") 589 | 590 | def has_table(self, connection, table_name, schema=None, **kw): 591 | current_schema = self.denormalize_name( 592 | schema or self.default_schema_name) 593 | table_name = self.denormalize_name(table_name) 594 | if current_schema: 595 | whereclause = sql.and_( 596 | self.sys_tables.c.tabschema == current_schema, 597 | self.sys_tables.c.tabname == table_name) 598 | else: 599 | whereclause = self.sys_tables.c.tabname == table_name 600 | s = sql.select(self.sys_tables).where(whereclause) 601 | c = connection.execute(s) 602 | return c.first() is not None 603 | 604 | def has_sequence(self, connection, sequence_name, schema=None): 605 | current_schema = self.denormalize_name( 606 | schema or self.default_schema_name) 607 | sequence_name = self.denormalize_name(sequence_name) 608 | if current_schema: 609 | whereclause = sql.and_( 610 | self.sys_sequences.c.seqschema == current_schema, 611 | self.sys_sequences.c.seqname == sequence_name) 612 | else: 613 | whereclause = self.sys_sequences.c.seqname == sequence_name 614 | s = sql.select(self.sys_sequences.c.seqname).where(whereclause) 615 | c = connection.execute(s) 616 | return c.first() is not None 617 | 618 | def get_table_comment(self, connection, table_name, schema=None, **kw): 619 | raise NotImplementedError() 620 | 621 | @reflection.cache 622 | def get_sequence_names(self, connection, schema=None, **kw): 623 | current_schema = self.denormalize_name(schema or self.default_schema_name) 624 | sys_sequence = self.sys_sequences 625 | query = sql.select(sys_sequence.c.seqname).\ 626 | where(sys_sequence.c.seqschema == current_schema).\ 627 | order_by(sys_sequence.c.seqschema, sys_sequence.c.seqname) 628 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 629 | 630 | @reflection.cache 631 | def get_schema_names(self, connection, **kw): 632 | sysschema = self.sys_schemas 633 | if version_info[0] < 3: 634 | query = sql.select(sysschema.c.schemaname). \ 635 | where(~sysschema.c.schemaname.like(unicode('Q%'))). \ 636 | where(~sysschema.c.schemaname.like(unicode('SYS%'))). \ 637 | order_by(sysschema.c.schemaname) 638 | else: 639 | query = sql.select(sysschema.c.schemaname). \ 640 | where(~sysschema.c.schemaname.like(str('Q%'))). \ 641 | where(~sysschema.c.schemaname.like(str('SYS%'))). \ 642 | order_by(sysschema.c.schemaname) 643 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 644 | 645 | # Retrieves a list of table names for a given schema 646 | @reflection.cache 647 | def get_table_names(self, connection, schema=None, **kw): 648 | current_schema = self.denormalize_name(schema or self.default_schema_name) 649 | systbl = self.sys_tables 650 | if version_info[0] < 3: 651 | query = not sql.select(systbl.c.tabname). \ 652 | where(systbl.c.tabtype == unicode('T')). \ 653 | where(systbl.c.tabschema == current_schema). \ 654 | order_by(systbl.c.tabname) 655 | else: 656 | query = not sql.select(systbl.c.tabname). \ 657 | where(systbl.c.tabtype == str('T')). \ 658 | where(systbl.c.tabschema == current_schema). \ 659 | order_by(systbl.c.tabname) 660 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 661 | 662 | @reflection.cache 663 | def get_view_names(self, connection, schema=None, **kw): 664 | current_schema = self.denormalize_name( 665 | schema or self.default_schema_name) 666 | 667 | query = sql.select(self.sys_views.c.viewname).\ 668 | where(self.sys_views.c.viewschema == current_schema).\ 669 | order_by(self.sys_views.c.viewname) 670 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 671 | 672 | @reflection.cache 673 | def get_view_definition(self, connection, viewname, schema=None, **kw): 674 | current_schema = self.denormalize_name( 675 | schema or self.default_schema_name) 676 | viewname = self.denormalize_name(viewname) 677 | 678 | query = sql.select(self.sys_views.c.text).\ 679 | where(self.sys_views.c.viewschema == current_schema).\ 680 | where(self.sys_views.c.viewname == viewname) 681 | return connection.execute(query).scalar() 682 | 683 | @reflection.cache 684 | def get_columns(self, connection, table_name, schema=None, **kw): 685 | current_schema = self.denormalize_name(schema or self.default_schema_name) 686 | table_name = self.denormalize_name(table_name) 687 | syscols = self.sys_columns 688 | 689 | query = sql.select(syscols.c.colname,syscols.c.typename, 690 | syscols.c.defaultval, syscols.c.nullable, 691 | syscols.c.length, syscols.c.scale, 692 | syscols.c.isid, syscols.c.idgenerate, 693 | syscols.c.remark).\ 694 | where(and_( 695 | syscols.c.tabschema == current_schema, 696 | syscols.c.tabname == table_name)).\ 697 | order_by(syscols.c.colno) 698 | sa_columns = [] 699 | for r in connection.execute(query): 700 | coltype = r[1].upper() 701 | if coltype in ['DECIMAL', 'NUMERIC']: 702 | coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) 703 | elif coltype in ['CHARACTER', 'CHAR', 'VARCHAR', 704 | 'GRAPHIC', 'VARGRAPHIC']: 705 | coltype = self.ischema_names.get(coltype)(int(r[4])) 706 | else: 707 | try: 708 | coltype = self.ischema_names[coltype] 709 | except KeyError: 710 | util.warn("Did not recognize type '%s' of column '%s'" % 711 | (coltype, r[0])) 712 | coltype = coltype = sa_types.NULLTYPE 713 | 714 | if version_info[0] < 3: 715 | sa_columns.append({ 716 | 'name': self.normalize_name(r[0]), 717 | 'type': coltype, 718 | 'nullable': r[3] == unicode('Y'), 719 | 'default': r[2], 720 | 'autoincrement': (r[6] == unicode('YES')) and (r[7] != None), 721 | 'comment': r[8] or None, 722 | }) 723 | else: 724 | sa_columns.append({ 725 | 'name': self.normalize_name(r[0]), 726 | 'type': coltype, 727 | 'nullable': r[3] == str('Y'), 728 | 'default': r[2], 729 | 'autoincrement': (r[6] == str('YES')) and (r[7] != None), 730 | 'comment': r[8] or None, 731 | }) 732 | return sa_columns 733 | 734 | @reflection.cache 735 | def get_pk_constraint(self, connection, table_name, schema=None, **kw): 736 | current_schema = self.denormalize_name( 737 | schema or self.default_schema_name) 738 | table_name = self.denormalize_name(table_name) 739 | sysconst = self.sys_table_constraints 740 | syskeyconst = self.sys_key_constraints 741 | 742 | query = sql.select(syskeyconst.c.colname, sysconst.c.tabname, sysconst.c.conname).\ 743 | where(and_( 744 | syskeyconst.c.conschema == sysconst.c.conschema, 745 | syskeyconst.c.conname == sysconst.c.conname, 746 | sysconst.c.tabschema == current_schema, 747 | sysconst.c.tabname == table_name, 748 | sysconst.c.contype == 'PRIMARY KEY')).\ 749 | order_by(syskeyconst.c.colno) 750 | 751 | pk_columns = [] 752 | pk_name = None 753 | for key in connection.execute(query): 754 | pk_columns.append(self.normalize_name(key[0])) 755 | if not pk_name: 756 | pk_name = self.normalize_name(key[2]) 757 | return {"constrained_columns": pk_columns, "name": pk_name} 758 | 759 | @reflection.cache 760 | def get_primary_keys(self, connection, table_name, schema=None, **kw): 761 | current_schema = self.denormalize_name( 762 | schema or self.default_schema_name) 763 | table_name = self.denormalize_name(table_name) 764 | sysconst = self.sys_table_constraints 765 | syskeyconst = self.sys_key_constraints 766 | 767 | if version_info[0] < 3: 768 | query = sql.select(syskeyconst.c.colname, sysconst.c.tabname). \ 769 | where(and_( 770 | syskeyconst.c.conschema == sysconst.c.conschema, 771 | syskeyconst.c.conname == sysconst.c.conname, 772 | sysconst.c.tabschema == current_schema, 773 | sysconst.c.tabname == table_name, 774 | sysconst.c.contype == unicode('PRIMARY KEY'))). \ 775 | order_by(syskeyconst.c.colno) 776 | else: 777 | query = sql.select(syskeyconst.c.colname, sysconst.c.tabname). \ 778 | where(and_( 779 | syskeyconst.c.conschema == sysconst.c.conschema, 780 | syskeyconst.c.conname == sysconst.c.conname, 781 | sysconst.c.tabschema == current_schema, 782 | sysconst.c.tabname == table_name, 783 | sysconst.c.contype == str('PRIMARY KEY'))). \ 784 | order_by(syskeyconst.c.colno) 785 | 786 | return [self.normalize_name(key[0]) 787 | for key in connection.execute(query)] 788 | 789 | @reflection.cache 790 | def get_foreign_keys(self, connection, table_name, schema=None, **kw): 791 | default_schema = self.default_schema_name 792 | current_schema = self.denormalize_name(schema or default_schema) 793 | default_schema = self.normalize_name(default_schema) 794 | table_name = self.denormalize_name(table_name) 795 | sysfkeys = self.sys_foreignkeys 796 | query = sql.select(sysfkeys.c.fkname, sysfkeys.c.fktabschema, 797 | sysfkeys.c.fktabname, sysfkeys.c.fkcolname, 798 | sysfkeys.c.pkname, sysfkeys.c.pktabschema, 799 | sysfkeys.c.pktabname, sysfkeys.c.pkcolname).\ 800 | where(and_( 801 | sysfkeys.c.fktabschema == current_schema, 802 | sysfkeys.c.fktabname == table_name)).\ 803 | order_by(sysfkeys.c.colno) 804 | fschema = {} 805 | for r in connection.execute(query): 806 | if r[0] not in fschema: 807 | referred_schema = self.normalize_name(r[5]) 808 | 809 | # if no schema specified and referred schema here is the 810 | # default, then set to None 811 | if schema is None and \ 812 | referred_schema == default_schema: 813 | referred_schema = None 814 | 815 | fschema[r[0]] = {'name': self.normalize_name(r[0]), 816 | 'constrained_columns': [self.normalize_name(r[3])], 817 | 'referred_schema': referred_schema, 818 | 'referred_table': self.normalize_name(r[6]), 819 | 'referred_columns': [self.normalize_name(r[7])]} 820 | else: 821 | fschema[r[0]]['constrained_columns'].append(self.normalize_name(r[3])) 822 | fschema[r[0]]['referred_columns'].append(self.normalize_name(r[7])) 823 | return [value for key, value in fschema.items()] 824 | 825 | # Retrieves a list of index names for a given schema 826 | @reflection.cache 827 | def get_indexes(self, connection, table_name, schema=None, **kw): 828 | current_schema = self.denormalize_name( 829 | schema or self.default_schema_name) 830 | table_name = self.denormalize_name(table_name) 831 | 832 | sysidx = self.sys_indexes 833 | syskey = self.sys_keys 834 | 835 | query = sql.select(sysidx.c.indname,sysidx.c.uniquerule, 836 | syskey.c.colname).\ 837 | where(and_( 838 | syskey.c.indschema == sysidx.c.indschema, 839 | syskey.c.indname == sysidx.c.indname, 840 | sysidx.c.tabschema == current_schema, 841 | sysidx.c.tabname == table_name)).\ 842 | order_by(syskey.c.indname, syskey.c.colno) 843 | indexes = {} 844 | for r in connection.execute(query): 845 | key = r[0].upper() 846 | if key in indexes: 847 | indexes[key]['column_names'].append(self.normalize_name(r[2])) 848 | else: 849 | if version_info[0] < 3: 850 | indexes[key] = { 851 | 'name': self.normalize_name(r[0]), 852 | 'column_names': [self.normalize_name(r[2])], 853 | 'unique': r[1] == unicode('Y') 854 | } 855 | else: 856 | indexes[key] = { 857 | 'name': self.normalize_name(r[0]), 858 | 'column_names': [self.normalize_name(r[2])], 859 | 'unique': r[1] == str('Y') 860 | } 861 | return [value for key, value in indexes.items()] 862 | 863 | @reflection.cache 864 | def get_unique_constraints(self, connection, table_name, schema=None, **kw): 865 | uniqueConsts = [] 866 | return uniqueConsts 867 | 868 | 869 | class OS390Reflector(BaseReflector): 870 | ischema = MetaData() 871 | 872 | sys_schemas = Table("SYSSCHEMAAUTH", ischema, 873 | Column("SCHEMANAME", CoerceUnicode, key="schemaname"), 874 | Column("GRANTEE", CoerceUnicode, key="owner"), 875 | Column("GRANTEETYPE", CoerceUnicode, key="ownertype"), 876 | Column("GRANTOR", CoerceUnicode, key="definer"), 877 | Column("GRANTORTYPE", CoerceUnicode, key="definertype"), 878 | schema="SYSIBM") 879 | 880 | sys_tables = Table("SYSTABLES", ischema, 881 | Column("CREATOR", CoerceUnicode, key="tabschema"), 882 | Column("NAME", CoerceUnicode, key="tabname"), 883 | Column("OWNER", CoerceUnicode, key="owner"), 884 | Column("OWNERTYPE", CoerceUnicode, key="ownertype"), 885 | Column("TYPE", CoerceUnicode, key="type"), 886 | Column("STATUS", CoerceUnicode, key="status"), 887 | schema="SYSIBM") 888 | 889 | sys_indexes = Table("SYSINDEXES", ischema, 890 | Column("CREATOR", CoerceUnicode, key="tabschema"), 891 | Column("TBNAME", CoerceUnicode, key="tabname"), 892 | Column("NAME", CoerceUnicode, key="indname"), 893 | Column("UNIQUERULE", CoerceUnicode, key="uniquerule"), 894 | Column("IBMREQD", sa_types.SMALLINT, key="system_required"), 895 | schema="SYSIBM") 896 | 897 | sys_tabconst = Table("SYSTABCONST", ischema, 898 | Column("TBCREATOR", CoerceUnicode, key="tabschema"), 899 | Column("TBNAME", CoerceUnicode, key="tabname"), 900 | Column("CONSTNAME", CoerceUnicode, key="constname"), 901 | Column("TYPE", CoerceUnicode, key="type"), 902 | schema="SYSIBM") 903 | 904 | sys_keycoluse = Table("SYSKEYCOLUSE", ischema, 905 | Column("TBCREATOR", CoerceUnicode, key="tabschema"), 906 | Column("TBNAME", CoerceUnicode, key="tabname"), 907 | Column("CONSTNAME", CoerceUnicode, key="constname"), 908 | Column("COLNAME", CoerceUnicode, key="colname"), 909 | schema="SYSIBM") 910 | 911 | sys_rels = Table("SYSRELS", ischema, 912 | Column("CREATOR", CoerceUnicode, key="fktabschema"), 913 | Column("TBNAME", CoerceUnicode, key="fktabname"), 914 | Column("RELNAME", CoerceUnicode, key="fkname"), 915 | Column("REFTBNAME", CoerceUnicode, key="pktabname"), 916 | Column("REFTBCREATOR", CoerceUnicode, key="pktabschema"), 917 | schema="SYSIBM") 918 | 919 | sys_foreignkeys = Table("SYSFOREIGNKEYS", ischema, 920 | Column("CREATOR", CoerceUnicode, key="fktabschema"), 921 | Column("TBNAME", CoerceUnicode, key="fktabname"), 922 | Column("RELNAME", CoerceUnicode, key="fkname"), 923 | Column("COLNAME", CoerceUnicode, key="fkcolname"), 924 | Column("COLSEQ", sa_types.Integer, key="colno"), 925 | schema="SYSIBM") 926 | 927 | sys_columns = Table("SYSCOLUMNS", ischema, 928 | Column("TBCREATOR", CoerceUnicode, key="tabschema"), 929 | Column("TBNAME", CoerceUnicode, key="tabname"), 930 | Column("NAME", CoerceUnicode, key="colname"), 931 | Column("COLNO", sa_types.Integer, key="colno"), 932 | Column("TYPENAME", CoerceUnicode, key="typename"), 933 | Column("LENGTH", sa_types.Integer, key="length"), 934 | Column("SCALE", sa_types.Integer, key="scale"), 935 | Column("DEFAULT", CoerceUnicode, key="defaultval"), 936 | Column("NULLS", CoerceUnicode, key="nullable"), 937 | Column("GENERATED_ATTR", CoerceUnicode, key="generated"), 938 | Column("KEYSEQ", sa_types.Integer, key="keyseq"), 939 | Column("REMARKS", sa_types.Integer, key="remark"), 940 | schema="SYSIBM") 941 | 942 | sys_views = Table("SYSVIEWS", ischema, 943 | Column("CREATOR", CoerceUnicode, key="viewschema"), 944 | Column("NAME", CoerceUnicode, key="viewname"), 945 | Column("STATEMENT", CoerceUnicode, key="text"), 946 | schema="SYSIBM") 947 | 948 | sys_sequences = Table("SYSSEQUENCES", ischema, 949 | Column("SCHEMA", CoerceUnicode, key="seqschema"), 950 | Column("NAME", CoerceUnicode, key="seqname"), 951 | schema="SYSIBM") 952 | 953 | def has_table(self, connection, table_name, schema=None, **kw): 954 | current_schema = self.denormalize_name( 955 | schema or self.default_schema_name) 956 | table_name = self.denormalize_name(table_name) 957 | if current_schema: 958 | whereclause = sql.and_(self.sys_tables.c.tabschema == current_schema, 959 | self.sys_tables.c.tabname == table_name) 960 | else: 961 | whereclause = self.sys_tables.c.tabname == table_name 962 | s = sql.select(self.sys_tables.c.tabname).where(whereclause) 963 | c = connection.execute(s) 964 | return c.first() is not None 965 | 966 | def has_sequence(self, connection, sequence_name, schema=None): 967 | current_schema = self.denormalize_name(schema or self.default_schema_name) 968 | sequence_name = self.denormalize_name(sequence_name) 969 | if current_schema: 970 | whereclause = sql.and_(self.sys_sequences.c.seqschema == current_schema, 971 | self.sys_sequences.c.seqname == sequence_name) 972 | else: 973 | whereclause = self.sys_sequences.c.seqname == sequence_name 974 | s = sql.select(self.sys_sequences.c.seqname).where(whereclause) 975 | c = connection.execute(s) 976 | return c.first() is not None 977 | 978 | @reflection.cache 979 | def get_sequence_names(self, connection, schema=None, **kw): 980 | current_schema = self.denormalize_name(schema or self.default_schema_name) 981 | sys_sequence = self.sys_sequences 982 | query = sql.select(sys_sequence.c.seqname).\ 983 | where(sys_sequence.c.seqschema == current_schema).\ 984 | order_by(sys_sequence.c.seqschema, sys_sequence.c.seqname) 985 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 986 | 987 | @reflection.cache 988 | def get_schema_names(self, connection, **kw): 989 | sysschema = self.sys_tables 990 | query = sql.select(sysschema.c.tabschema).\ 991 | where(not_(sysschema.c.tabschema.like('SYS%'))).\ 992 | distinct(sysschema.c.tabschema) 993 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 994 | 995 | def get_table_comment(self, connection, table_name, schema=None, **kw): 996 | raise NotImplementedError() 997 | 998 | @reflection.cache 999 | def get_table_names(self, connection, schema=None, **kw): 1000 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1001 | systbl = self.sys_tables 1002 | query = sql.select(systbl.c.tabname).\ 1003 | where(systbl.c.type == 'T').\ 1004 | where(systbl.c.tabschema == current_schema).\ 1005 | order_by(systbl.c.tabname) 1006 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 1007 | 1008 | @reflection.cache 1009 | def get_view_names(self, connection, schema=None, **kw): 1010 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1011 | 1012 | query = sql.select(self.sys_views.c.viewname).\ 1013 | where(self.sys_views.c.viewschema == current_schema).\ 1014 | order_by(self.sys_views.c.viewname) 1015 | 1016 | return [self.normalize_name(r[0]) for r in connection.execute(query)] 1017 | 1018 | @reflection.cache 1019 | def get_view_definition(self, connection, viewname, schema=None, **kw): 1020 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1021 | viewname = self.denormalize_name(viewname) 1022 | 1023 | query = sql.select(self.sys_views.c.text).\ 1024 | where(self.sys_views.c.viewschema == current_schema).\ 1025 | where(self.sys_views.c.viewname == viewname) 1026 | 1027 | return connection.execute(query).scalar() 1028 | 1029 | @reflection.cache 1030 | def get_columns(self, connection, table_name, schema=None, **kw): 1031 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1032 | table_name = self.denormalize_name(table_name) 1033 | syscols = self.sys_columns 1034 | 1035 | query = sql.select(syscols.c.colname, syscols.c.typename, 1036 | syscols.c.defaultval, syscols.c.nullable, 1037 | syscols.c.length, syscols.c.scale, 1038 | syscols.c.generated, syscols.c.remark).\ 1039 | where(and_( 1040 | syscols.c.tabschema == current_schema, 1041 | syscols.c.tabname == table_name)).\ 1042 | order_by(syscols.c.colno) 1043 | sa_columns = [] 1044 | for r in connection.execute(query): 1045 | coltype = r[1].upper() 1046 | if coltype in ['DECIMAL', 'NUMERIC']: 1047 | coltype = self.ischema_names.get(coltype)(int(r[4]), int(r[5])) 1048 | elif coltype in ['CHARACTER', 'CHAR', 'VARCHAR', 1049 | 'GRAPHIC', 'VARGRAPHIC']: 1050 | coltype = self.ischema_names.get(coltype)(int(r[4])) 1051 | else: 1052 | try: 1053 | coltype = self.ischema_names[coltype] 1054 | except KeyError: 1055 | util.warn("Did not recognize type '%s' of column '%s'" % 1056 | (coltype, r[0])) 1057 | coltype = coltype = sa_types.NULLTYPE 1058 | 1059 | sa_columns.append({ 1060 | 'name': self.normalize_name(r[0]), 1061 | 'type': coltype, 1062 | 'nullable': r[3] == 'Y', 1063 | 'default': r[2] or None, 1064 | 'autoincrement': (r[2] == 'J') and (r[2] != ' ') , 1065 | 'comment': r[7] or None, 1066 | }) 1067 | return sa_columns 1068 | 1069 | @reflection.cache 1070 | def get_pk_constraint(self, connection, table_name, schema=None, **kw): 1071 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1072 | table_name = self.denormalize_name(table_name) 1073 | sysindexes = self.sys_columns 1074 | col_finder = re.compile(r"(\w+)") 1075 | query = sql.select(sysindexes.c.colname).\ 1076 | where(and_( 1077 | sysindexes.c.tabschema == current_schema, 1078 | sysindexes.c.tabname == table_name, 1079 | sysindexes.c.keyseq > 0)).\ 1080 | order_by(sysindexes.c.tabschema, sysindexes.c.tabname) 1081 | pk_columns = [] 1082 | for r in connection.execute(query): 1083 | cols = col_finder.findall(r[0]) 1084 | pk_columns.extend(cols) 1085 | return {"constrained_columns": [self.normalize_name(col) for col in pk_columns], "name": None} 1086 | 1087 | @reflection.cache 1088 | def get_primary_keys(self, connection, table_name, schema=None, **kw): 1089 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1090 | table_name = self.denormalize_name(table_name) 1091 | sysindexes = self.sys_columns 1092 | col_finder = re.compile(r"(\w+)") 1093 | query = sql.select(sysindexes.c.colname).\ 1094 | where(and_( 1095 | sysindexes.c.tabschema == current_schema, 1096 | sysindexes.c.tabname == table_name, 1097 | sysindexes.c.keyseq > 0)).\ 1098 | order_by(sysindexes.c.tabschema, sysindexes.c.tabname) 1099 | pk_columns = [] 1100 | for r in connection.execute(query): 1101 | cols = col_finder.findall(r[0]) 1102 | pk_columns.extend(cols) 1103 | return [self.normalize_name(col) for col in pk_columns] 1104 | 1105 | @reflection.cache 1106 | def get_foreign_keys(self, connection, table_name, schema=None, **kw): 1107 | default_schema = self.default_schema_name 1108 | current_schema = self.denormalize_name(schema or default_schema) 1109 | default_schema = self.normalize_name(default_schema) 1110 | table_name = self.denormalize_name(table_name) 1111 | sysfkeys = self.sys_foreignkeys 1112 | sysrels = self.sys_rels 1113 | syscolspk = self.sys_columns 1114 | sysindex = self.sys_indexes 1115 | query = sql.select(sysrels.c.fkname, sysrels.c.fktabschema, 1116 | sysrels.c.fktabname, sysfkeys.c.fkcolname, 1117 | sysindex.c.indname, sysrels.c.pktabschema, 1118 | sysrels.c.pktabname, syscolspk.c.colname).\ 1119 | where(and_( 1120 | sysrels.c.fktabschema == current_schema, 1121 | sysrels.c.fktabname == table_name, 1122 | sysrels.c.fktabname == sysfkeys.c.fktabname, 1123 | sysrels.c.pktabname == syscolspk.c.tabname, 1124 | syscolspk.c.tabname == sysindex.c.tabname,syscolspk.c.keyseq > 0)).\ 1125 | order_by(sysfkeys.c.colno) 1126 | 1127 | fschema = {} 1128 | for r in connection.execute(query): 1129 | if not (r[0]) in fschema: 1130 | referred_schema = self.normalize_name(r[5]) 1131 | 1132 | # if no schema specified and referred schema here is the 1133 | # default, then set to None 1134 | if schema is None and \ 1135 | referred_schema == default_schema: 1136 | referred_schema = None 1137 | 1138 | fschema[r[0]] = { 1139 | 'name': self.normalize_name(r[0]), 1140 | 'constrained_columns': [self.normalize_name(r[3])], 1141 | 'referred_schema': referred_schema, 1142 | 'referred_table': self.normalize_name(r[6]), 1143 | 'referred_columns': [self.normalize_name(r[7])]} 1144 | else: 1145 | fschema[r[0]]['constrained_columns'].append(self.normalize_name(r[3])) 1146 | fschema[r[0]]['referred_columns'].append(self.normalize_name(r[7])) 1147 | return [value for key, value in fschema.items()] 1148 | 1149 | @reflection.cache 1150 | def get_incoming_foreign_keys(self, connection, table_name, schema=None, **kw): 1151 | default_schema = self.default_schema_name 1152 | current_schema = self.denormalize_name(schema or default_schema) 1153 | default_schema = self.normalize_name(default_schema) 1154 | table_name = self.denormalize_name(table_name) 1155 | sysfkeys = self.sys_foreignkeys 1156 | sysrels = self.sys_rels 1157 | syscolspk = self.sys_columns 1158 | sysindex = self.sys_indexes 1159 | query = sql.select(sysrels.c.fkname, sysrels.c.fktabschema, 1160 | sysrels.c.fktabname, sysfkeys.c.fkcolname, 1161 | sysindex.c.indname, sysrels.c.pktabschema, 1162 | sysrels.c.pktabname, syscolspk.c.colname).\ 1163 | where(and_( 1164 | syscolspk.c.tabschema == current_schema, 1165 | syscolspk.c.tabname == table_name, 1166 | sysrels.c.fktabname == sysfkeys.c.fktabname, 1167 | sysrels.c.pktabname == syscolspk.c.tabname, 1168 | syscolspk.c.tabname == sysindex.c.tabname, 1169 | syscolspk.c.keyseq > 0)).\ 1170 | order_by(sysfkeys.c.colno) 1171 | 1172 | fschema = {} 1173 | for r in connection.execute(query): 1174 | if not fschema.has_key(r[0]): 1175 | constrained_schema = self.normalize_name(r[1]) 1176 | 1177 | # if no schema specified and referred schema here is the 1178 | # default, then set to None 1179 | if schema is None and \ 1180 | constrained_schema == default_schema: 1181 | constrained_schema = None 1182 | 1183 | fschema[r[0]] = { 1184 | 'name': self.normalize_name(r[0]), 1185 | 'constrained_schema': constrained_schema, 1186 | 'constrained_table': self.normalize_name(r[2]), 1187 | 'constrained_columns': [self.normalize_name(r[3])], 1188 | 'referred_schema': schema, 1189 | 'referred_table': self.normalize_name(r[6]), 1190 | 'referred_columns': [self.normalize_name(r[7])]} 1191 | else: 1192 | fschema[r[0]]['constrained_columns'].append(self.normalize_name(r[3])) 1193 | fschema[r[0]]['referred_columns'].append(self.normalize_name(r[7])) 1194 | return [value for key, value in fschema.items()] 1195 | 1196 | @reflection.cache 1197 | def get_indexes(self, connection, table_name, schema=None, **kw): 1198 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1199 | table_name = self.denormalize_name(table_name) 1200 | sysidx = self.sys_indexes 1201 | syscolpk = self.sys_columns 1202 | query = sql.select(sysidx.c.indname, syscolpk.c.colname, sysidx.c.uniquerule, sysidx.c.system_required).\ 1203 | where(and_( 1204 | sysidx.c.tabschema == current_schema, 1205 | sysidx.c.tabname == table_name, 1206 | syscolpk.c.colname == sysidx.c.tabname, 1207 | syscolpk.c.keyseq > 0)).\ 1208 | order_by(sysidx.c.tabname) 1209 | indexes = [] 1210 | col_finder = re.compile(r"(\w+)") 1211 | for r in connection.execute(query): 1212 | if r[2] != 'P': 1213 | if r[2] == 'U' and r[3] != 0: 1214 | continue 1215 | indexes.append({ 1216 | 'name': self.normalize_name(r[0]), 1217 | 'column_names': [self.normalize_name(col) 1218 | for col in col_finder.findall(r[1])], 1219 | 'unique': r[2] == 'U' 1220 | }) 1221 | return indexes 1222 | 1223 | @reflection.cache 1224 | def get_unique_constraints(self, connection, table_name, schema=None, **kw): 1225 | current_schema = self.denormalize_name(schema or self.default_schema_name) 1226 | table_name = self.denormalize_name(table_name) 1227 | syskeycol = self.sys_keycoluse 1228 | sysconst = self.sys_tabconst 1229 | query = ( 1230 | sql.select(syskeycol.c.constname, syskeycol.c.colname) 1231 | .select_from( 1232 | join( 1233 | syskeycol, 1234 | sysconst, 1235 | and_( 1236 | syskeycol.c.constname == sysconst.c.constname, 1237 | syskeycol.c.tabschema == sysconst.c.tabschema, 1238 | syskeycol.c.tabname == sysconst.c.tabname, 1239 | ), 1240 | ) 1241 | ) 1242 | .where( 1243 | and_( 1244 | sysconst.c.tabname == table_name, 1245 | sysconst.c.tabschema == current_schema, 1246 | sysconst.c.type == "U", 1247 | ) 1248 | ) 1249 | .order_by(syskeycol.c.constname) 1250 | ) 1251 | uniqueConsts = [] 1252 | currConst = None 1253 | for r in connection.execute(query): 1254 | if currConst == r[0]: 1255 | uniqueConsts[-1]["column_names"].append(self.normalize_name(r[1])) 1256 | else: 1257 | currConst = r[0] 1258 | uniqueConsts.append( 1259 | { 1260 | "name": self.normalize_name(currConst), 1261 | "column_names": [self.normalize_name(r[1])], 1262 | } 1263 | ) 1264 | return uniqueConsts 1265 | -------------------------------------------------------------------------------- /ibm_db_sa/requirements.py: -------------------------------------------------------------------------------- 1 | """requirements.py 2 | 3 | 4 | This file is used by the SQLAlchemy 0.8 testing suite to mark various 5 | optional behaviors as non-supported. 6 | 7 | """ 8 | from sqlalchemy.testing.requirements import SuiteRequirements 9 | 10 | from sqlalchemy.testing import exclusions 11 | 12 | class Requirements(SuiteRequirements): 13 | 14 | @property 15 | def on_update_cascade(self): 16 | """"target database must support ON UPDATE..CASCADE behavior in 17 | foreign keys.""" 18 | 19 | return exclusions.closed() 20 | 21 | @property 22 | def datetime_microseconds(self): 23 | """target dialect supports representation of Python 24 | datetime.datetime() with microsecond objects.""" 25 | 26 | return exclusions.closed() 27 | 28 | @property 29 | def time_microseconds(self): 30 | """target dialect supports representation of Python 31 | datetime.time() with microsecond objects.""" 32 | 33 | return exclusions.closed() 34 | 35 | @property 36 | def unbounded_varchar(self): 37 | """Target database must support VARCHAR with no length""" 38 | 39 | return exclusions.closed() 40 | 41 | #@property 42 | #def offset(self): 43 | # return exclusions.closed() 44 | 45 | @property 46 | def window_functions(self): 47 | """Target database must support window functions.""" 48 | return exclusions.open() 49 | 50 | @property 51 | def precision_numerics_enotation_small(self): 52 | """target backend supports Decimal() objects using E notation 53 | to represent very small values.""" 54 | return exclusions.open() 55 | 56 | @property 57 | def precision_numerics_enotation_large(self): 58 | """target backend supports Decimal() objects using E notation 59 | to represent very large values.""" 60 | return exclusions.closed() 61 | 62 | @property 63 | def precision_numerics_many_significant_digits(self): 64 | """target backend supports values with many digits on both sides, 65 | such as 319438950232418390.273596, 87673.594069654243 66 | 67 | """ 68 | return exclusions.fails_if(lambda: True, 69 | "Throws error SQL0604N, regarding Decimal(38, 12)" 70 | ) 71 | 72 | @property 73 | def precision_numerics_retains_significant_digits(self): 74 | """A precision numeric type will return empty significant digits, 75 | i.e. a value such as 10.000 will come back in Decimal form with 76 | the .000 maintained.""" 77 | 78 | return exclusions.open() 79 | -------------------------------------------------------------------------------- /ibm_db_sa/zxjdbc.py: -------------------------------------------------------------------------------- 1 | # +--------------------------------------------------------------------------+ 2 | # | Licensed Materials - Property of IBM | 3 | # | | 4 | # | (C) Copyright IBM Corporation 2008, 2016. | 5 | # +--------------------------------------------------------------------------+ 6 | # | This module complies with SQLAlchemy 0.8 and is | 7 | # | Licensed under the Apache License, Version 2.0 (the "License"); | 8 | # | you may not use this file except in compliance with the License. | 9 | # | You may obtain a copy of the License at | 10 | # | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable | 11 | # | law or agreed to in writing, software distributed under the License is | 12 | # | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | 13 | # | KIND, either express or implied. See the License for the specific | 14 | # | language governing permissions and limitations under the License. | 15 | # +--------------------------------------------------------------------------+ 16 | # | Author: Jaimy Azle | 17 | # | Contributor: Mike Bayer | 18 | # +--------------------------------------------------------------------------+ 19 | 20 | # raise NotImplementedError( 21 | # "The zxjdbc dialect is not implemented at this time.") 22 | 23 | 24 | # NOTE: it appears that to use zxjdbc, the "RETURNING" syntax 25 | # must be installed in DB2, which appears to be optional. It would 26 | # be best if the RETURNING support were built into the base dialect 27 | # and not be local to zxjdbc here. 28 | 29 | from decimal import Decimal as _python_Decimal 30 | from sqlalchemy import sql, util 31 | from sqlalchemy import types as sa_types 32 | from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector 33 | from .base import _SelectLastRowIDMixin, DB2Dialect, DB2ExecutionContext, DB2Compiler 34 | from . import reflection as ibm_reflection 35 | 36 | class DB2ExecutionContext_zxjdbc(DB2ExecutionContext): 37 | 38 | def create_cursor(self): 39 | cursor = self._dbapi_connection.cursor() 40 | cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) 41 | return cursor 42 | 43 | class DB2Dialect_zxjdbc(ZxJDBCConnector, DB2Dialect): 44 | 45 | supports_unicode_statements = supports_unicode_binds = \ 46 | returns_unicode_strings = supports_unicode = False 47 | supports_sane_multi_rowcount = False 48 | 49 | supports_unicode_statements = False 50 | supports_sane_rowcount = True 51 | supports_char_length = True 52 | 53 | jdbc_db_name = 'db2' 54 | jdbc_driver_name = 'com.ibm.db2.jcc.DB2Driver' 55 | 56 | statement_compiler = DB2Compiler 57 | execution_ctx_cls = DB2ExecutionContext_zxjdbc 58 | 59 | @classmethod 60 | def dbapi(cls): 61 | 62 | global SQLException, zxJDBC 63 | from java.sql import SQLException, Types as java_Types 64 | from com.ziclix.python.sql import zxJDBC 65 | from com.ziclix.python.sql import FilterDataHandler 66 | 67 | # TODO: this should be somewhere else 68 | class IBM_DB2DataHandler(FilterDataHandler): 69 | 70 | def setJDBCObject(self, statement, index, object, dbtype=None): 71 | if dbtype is None: 72 | if (isinstance(object, int)): 73 | statement.setObject(index, str(object), java_Types.INTEGER) 74 | elif (isinstance(object, long)): 75 | statement.setObject(index, str(object), java_Types.BIGINT) 76 | elif (isinstance(object, _python_Decimal)): 77 | statement.setObject(index, str(object), java_Types.DECIMAL) 78 | else: 79 | statement.setObject(index, object) 80 | else: 81 | FilterDataHandler.setJDBCObject(self, statement, index, object, dbtype) 82 | 83 | cls.DataHandler = IBM_DB2DataHandler 84 | return zxJDBC 85 | 86 | 87 | class AS400Dialect_zxjdbc(DB2Dialect_zxjdbc): 88 | jdbc_db_name = 'as400' 89 | jdbc_driver_name = 'com.ibm.as400.access.AS400JDBCDriver' 90 | 91 | _reflector_cls = ibm_reflection.AS400Reflector 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /polaris.yml: -------------------------------------------------------------------------------- 1 | version: "1" 2 | project: 3 | name: ${scm.git.repo} 4 | branch: ${scm.git.branch} 5 | revision: 6 | name: ${scm.git.commit} 7 | date: ${scm.git.commit.date} 8 | capture: 9 | build: 10 | buildCommands: 11 | - shell: [python, -m, build] 12 | fileSystem: 13 | ears: 14 | extensions: [ear] 15 | files: 16 | - directory: ${project.projectDir} 17 | java: 18 | files: 19 | - directory: ${project.projectDir} 20 | javascript: 21 | files: 22 | - directory: ${project.projectDir} 23 | - excludeRegex: node_modules|bower_components|vendor 24 | php: 25 | files: 26 | - directory: ${project.projectDir} 27 | python: 28 | files: 29 | - directory: ${project.projectDir} 30 | ruby: 31 | files: 32 | - directory: ${project.projectDir} 33 | typescript: 34 | files: 35 | - directory: ${project.projectDir} 36 | wars: 37 | extensions: [war] 38 | files: 39 | - directory: ${project.projectDir} 40 | dlls: 41 | extensions: [c, C, cpp, CPP, h, hpp] 42 | files: 43 | - directory: ${project.projectDir} 44 | analyze: 45 | mode: central 46 | install: 47 | coverity: 48 | version: default 49 | serverUrl: https://rocketsoftware.cop.blackduck.com -------------------------------------------------------------------------------- /run_tests.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.dialects import registry 2 | 3 | registry.register("db2", "ibm_db_sa.ibm_db", "DB2Dialect_ibm_db") 4 | registry.register("db2.ibm_db", "ibm_db_sa.ibm_db", "DB2Dialect_ibm_db") 5 | registry.register("db2.pyodbc", "ibm_db_sa.pyodbc", "DB2Dialect_pyodbc") 6 | registry.register("db2.zxjdbc", "ibm_db_sa.zxjdbc", "DB2Dialect_zxjdbc") 7 | registry.register("db2.pyodbc400", "ibm_db_sa.pyodbc", "AS400Dialect_pyodbc") 8 | registry.register("db2.zxjdbc400", "ibm_db_sa.zxjdbc", "AS400Dialect_zxjdbc") 9 | 10 | from sqlalchemy.testing import runner 11 | 12 | runner.main() 13 | 14 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [egg_info] 2 | tag_build = 3 | 4 | [nosetests] 5 | with-sqla_testing = true 6 | where = test 7 | cover-package = ibm_db 8 | with-coverage = 1 9 | cover-erase = 1 10 | verbosity = 1 11 | 12 | [sqla_testing] 13 | requirement_cls=ibm_db_sa.requirements:Requirements 14 | profile_file=.profiles.txt 15 | 16 | [db] 17 | default=db2+ibm_db://db2inst2:db2inst2@/test 18 | sqlite=sqlite:///:memory: 19 | 20 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | import os 5 | import re 6 | 7 | 8 | v = open(os.path.join(os.path.dirname(__file__), 'ibm_db_sa', '__init__.py')) 9 | VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v.read()).group(1) 10 | v.close() 11 | 12 | readme = os.path.join(os.path.dirname(__file__), 'README.md') 13 | if 'USE_PYODBC' in os.environ and os.environ['USE_PYODBC'] == '1': 14 | require = ['sqlalchemy>=0.7.3'] 15 | else: 16 | require = ['sqlalchemy>=0.7.3','ibm_db>=2.0.0'] 17 | 18 | 19 | setup( 20 | name='ibm_db_sa', 21 | version=VERSION, 22 | license='Apache License 2.0', 23 | description='SQLAlchemy support for IBM Data Servers', 24 | author='IBM Application Development Team', 25 | author_email='balram.choudhary@ibm.com', 26 | url='http://pypi.python.org/pypi/ibm_db_sa/', 27 | download_url='https://github.com/ibmdb/python-ibmdbsa', 28 | keywords='sqlalchemy database interface IBM Data Servers Db2', 29 | long_description_content_type='text/markdown', 30 | classifiers=[ 31 | 'Development Status :: 5 - Production/Stable', 32 | 'Intended Audience :: Developers', 33 | 'License :: OSI Approved :: Apache Software License', 34 | 'Operating System :: OS Independent', 35 | 'Topic :: Database :: Front-Ends' 36 | ], 37 | long_description=open(readme).read(), 38 | platforms='All', 39 | install_requires= require, 40 | packages=['ibm_db_sa'], 41 | entry_points={ 42 | 'sqlalchemy.dialects': [ 43 | 'db2as400=ibm_db_sa.ibm_db_as400:AS400Dialect', 44 | 'db2=ibm_db_sa.ibm_db:DB2Dialect_ibm_db', 45 | 'db2.ibm_db=ibm_db_sa.ibm_db:DB2Dialect_ibm_db', 46 | 'db2.zxjdbc=ibm_db_sa.zxjdbc:DB2Dialect_zxjdbc', 47 | 'db2.pyodbc=ibm_db_sa.pyodbc:DB2Dialect_pyodbc', 48 | 'db2.zxjdbc400=ibm_db_sa.zxjdbc:AS400Dialect_zxjdbc', 49 | 'db2.pyodbc400=ibm_db_sa.pyodbc:AS400Dialect_pyodbc', 50 | 51 | # older "ibm_db_sa://" style for backwards 52 | # compatibility 53 | 'ibm_db_sa=ibm_db_sa.ibm_db:DB2Dialect_ibm_db', 54 | 'ibm_db_sa.zxjdbc=ibm_db_sa.zxjdbc:DB2Dialect_zxjdbc', 55 | 'ibm_db_sa.pyodbc=ibm_db_sa.pyodbc:DB2Dialect_pyodbc', 56 | 'ibm_db_sa.zxjdbc400=ibm_db_sa.zxjdbc:AS400Dialect_zxjdbc', 57 | 'ibm_db_sa.pyodbc400=ibm_db_sa.pyodbc:AS400Dialect_pyodbc', 58 | ] 59 | }, 60 | zip_safe=False, 61 | tests_require=['nose >= 0.11'], 62 | ) 63 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibmdb/python-ibmdbsa/b6cba39ae0a0848f1af58162c52fa642953ce194/test/__init__.py -------------------------------------------------------------------------------- /test/test_out_parameters.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.testing import fixtures, config 2 | from sqlalchemy import text, bindparam, outparam 3 | from sqlalchemy import Float, Integer, String 4 | from sqlalchemy.testing.assertions import eq_ 5 | 6 | 7 | class OutParamTest(fixtures.TestBase): 8 | 9 | @classmethod 10 | def setup_class(cls): 11 | config.db.execute(""" 12 | create or replace procedure foo(IN x_in integer, OUT x_out integer, OUT y_out integer, OUT z_out varchar(20)) 13 | BEGIN 14 | SET x_out = 10; 15 | SET y_out = x_in * 15; 16 | SET z_out = NULL; 17 | END 18 | """) 19 | 20 | def test_out_params(self): 21 | result = \ 22 | config.db.execute(text('call foo(:x_in, :x_out, :y_out, ' 23 | ':z_out)', 24 | bindparams=[bindparam('x_in'), 25 | outparam('x_out'), 26 | outparam('y_out'), 27 | outparam('z_out')]), x_in=5, x_out=0, y_out=0, z_out='') 28 | eq_(result.out_parameters, {'x_out': 10, 'y_out': 75, 'z_out': None}) 29 | assert isinstance(result.out_parameters['x_out'], long) 30 | 31 | @classmethod 32 | def teardown_class(cls): 33 | config.db.execute("DROP PROCEDURE foo") -------------------------------------------------------------------------------- /test/test_suite.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.testing.suite import * 2 | 3 | --------------------------------------------------------------------------------