├── .bumpversion.cfg ├── .gitattributes ├── .github └── workflows │ ├── publish_on_pypi.yml │ └── test_with_tox.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── dbutils ├── __init__.py ├── persistent_db.py ├── persistent_pg.py ├── pooled_db.py ├── pooled_pg.py ├── simple_pooled_db.py ├── simple_pooled_pg.py ├── steady_db.py └── steady_pg.py ├── docs ├── changelog.html ├── changelog.rst ├── dependencies_db.png ├── dependencies_pg.png ├── doc.css ├── docutils.css ├── main.de.html ├── main.de.rst ├── main.html ├── main.rst ├── make.py ├── persistent.png └── pooled.png ├── pyproject.toml ├── tests ├── __init__.py ├── mock_db.py ├── mock_pg.py ├── test_persistent_db.py ├── test_persistent_pg.py ├── test_pooled_db.py ├── test_pooled_pg.py ├── test_simple_pooled_db.py ├── test_simple_pooled_pg.py ├── test_steady_db.py ├── test_steady_pg.py └── test_threading_local.py └── tox.ini /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 3.1.0 3 | 4 | [bumpversion:file:pyproject.toml] 5 | search = version = "{current_version}" 6 | replace = version = "{new_version}" 7 | 8 | [bumpversion:file:dbutils/__init__.py] 9 | search = __version__ = '{current_version}' 10 | replace = __version__ = '{new_version}' 11 | 12 | [bumpversion:file:README.md] 13 | search = The current version {current_version} 14 | replace = The current version {new_version} 15 | 16 | [bumpversion:file:docs/main.rst] 17 | search = :Version: {current_version} 18 | search = :Version: {new_version} 19 | 20 | [bumpversion:file:docs/main.de.rst] 21 | search = :Version: {current_version} 22 | search = :Version: {new_version} 23 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | 3 | *.bat text eol=crlf 4 | *.config text eol=lf 5 | *.css text eol=lf 6 | *.html text eol=lf 7 | *.js text eol=lf 8 | *.prefs text 9 | *.py text eol=lf 10 | *.rst text eol=lf 11 | *.sh text eol=lf 12 | *.txt text eol=lf 13 | *.po text eol=lf 14 | *.pot text eol=lf 15 | *.styl text eol=lf 16 | *.xml text 17 | 18 | *.gif binary 19 | *.ico binary 20 | *.jpg binary 21 | *.lnk binary 22 | *.mo binary 23 | *.png binary 24 | *.exe binary 25 | *.so binary 26 | *.ppt binary 27 | *.pdf binary 28 | *.gz binary 29 | *.zip binary 30 | -------------------------------------------------------------------------------- /.github/workflows/publish_on_pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish DBUtils on PyPI 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'Release-*' 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Set up Python 16 | uses: actions/setup-python@v5 17 | with: 18 | python-version: "3.12" 19 | 20 | - name: Install build tool 21 | run: python -m pip install build --user 22 | 23 | - name: Build source tarball and wheel 24 | run: python -m build 25 | 26 | - name: Publish distribution to PyPI 27 | uses: pypa/gh-action-pypi-publish@release/v1 28 | with: 29 | user: __token__ 30 | password: ${{ secrets.PYPI_TOKEN }} 31 | -------------------------------------------------------------------------------- /.github/workflows/test_with_tox.yml: -------------------------------------------------------------------------------- 1 | name: Test DBUtils using tox 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python: ['3.9', '3.10', '3.11', '3.12', '3.13'] 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Setup Python ${{ matrix.python }} 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: ${{ matrix.python }} 19 | 20 | - run: pip install tox 21 | 22 | - run: tox -e py 23 | 24 | - if: matrix.python == 3.12 25 | run: TOXENV=ruff,manifest,docs,spell tox 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.bak 3 | *.default 4 | *.egg-info 5 | *.log 6 | *.patch 7 | *.pid 8 | *.pstats 9 | *.pyc 10 | *.pyo 11 | *.swp 12 | 13 | build 14 | dist 15 | local 16 | 17 | .idea 18 | .tox 19 | .pytest_cache 20 | 21 | test.bat 22 | 23 | MANIFEST 24 | 25 | Thumbs.db 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2025 Christoph Zwerschke 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in 2 | 3 | include LICENSE 4 | include README.md 5 | 6 | include .bumpversion.cfg 7 | include pyproject.toml 8 | include tox.ini 9 | 10 | recursive-include tests *.py 11 | 12 | recursive-include docs *.rst make.py *.html *.css *.png 13 | prune docs/_build 14 | 15 | global-exclude *.py[co] __pycache__ 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | DBUtils 2 | ======= 3 | 4 | DBUtils is a suite of tools providing solid, persistent and pooled connections 5 | to a database that can be used in all kinds of multi-threaded environments. 6 | 7 | The suite supports DB-API 2 compliant database interfaces 8 | and the classic PyGreSQL interface. 9 | 10 | The current version 3.1.1 of DBUtils supports Python versions 3.7 to 3.13. 11 | 12 | **Please have a look at the [changelog](https://webwareforpython.github.io/DBUtils/changelog.html), because there were some breaking changes in version 2.0.** 13 | 14 | The DBUtils home page can be found at https://webwareforpython.github.io/DBUtils/ 15 | -------------------------------------------------------------------------------- /dbutils/__init__.py: -------------------------------------------------------------------------------- 1 | """The DBUtils main package.""" 2 | 3 | __all__ = ["__version__"] 4 | 5 | __version__ = "3.1.1" 6 | -------------------------------------------------------------------------------- /dbutils/persistent_db.py: -------------------------------------------------------------------------------- 1 | """PersistentDB - persistent DB-API 2 connections. 2 | 3 | Implements steady, thread-affine persistent connections to a database 4 | based on an arbitrary DB-API 2 compliant database interface module. 5 | 6 | This should result in a speedup for persistent applications such as the 7 | application server of "Webware for Python," without loss of robustness. 8 | 9 | Robustness is provided by using "hardened" SteadyDB connections. 10 | Even if the underlying database is restarted and all connections 11 | are lost, they will be automatically and transparently reopened. 12 | However, since you don't want this to happen in the middle of a database 13 | transaction, you must explicitly start transactions with the begin() 14 | method so that SteadyDB knows that the underlying connection shall not 15 | be replaced and errors passed on until the transaction is completed. 16 | 17 | Measures are taken to make the database connections thread-affine. 18 | This means the same thread always uses the same cached connection, 19 | and no other thread will use it. So even if the underlying DB-API module 20 | is not thread-safe at the connection level this will be no problem here. 21 | 22 | For best performance, the application server should keep threads persistent. 23 | For this, you have to set MinServerThreads = MaxServerThreads in Webware. 24 | 25 | For the Python DB-API 2 specification, see: 26 | https://www.python.org/dev/peps/pep-0249/ 27 | For information on Webware for Python, see: 28 | https://webwareforpython.github.io/w4py/ 29 | 30 | 31 | Usage: 32 | 33 | First you need to set up a generator for your kind of database connections 34 | by creating an instance of PersistentDB, passing the following parameters: 35 | 36 | creator: either an arbitrary function returning new DB-API 2 37 | connection objects or a DB-API 2 compliant database module 38 | maxusage: the maximum number of reuses of a single connection 39 | (the default of 0 or None means unlimited reuse) 40 | Whenever the limit is reached, the connection will be reset. 41 | setsession: an optional list of SQL commands that may serve to 42 | prepare the session, e.g. ["set datestyle to german", ...]. 43 | failures: an optional exception class or a tuple of exception classes 44 | for which the connection failover mechanism shall be applied, 45 | if the default (OperationalError, InterfaceError, InternalError) 46 | is not adequate for the used database module 47 | ping: an optional flag controlling when connections are checked 48 | with the ping() method if such a method is available 49 | (0 = None = never, 1 = default = whenever it is requested, 50 | 2 = when a cursor is created, 4 = when a query is executed, 51 | 7 = always, and all other bit combinations of these values) 52 | closeable: if this is set to true, then closing connections will 53 | be allowed, but by default this will be silently ignored 54 | threadlocal: an optional class for representing thread-local data 55 | that will be used instead of our Python implementation 56 | (threading.local is faster, but cannot be used in all cases) 57 | 58 | The creator function or the connect function of the DB-API 2 compliant 59 | database module specified as the creator will receive any additional 60 | parameters such as the host, database, user, password etc. You may 61 | choose some or all of these parameters in your own creator function, 62 | allowing for sophisticated failover and load-balancing mechanisms. 63 | 64 | For instance, if you are using pgdb as your DB-API 2 database module and want 65 | every connection to your local database 'mydb' to be reused 1000 times: 66 | 67 | import pgdb # import used DB-API 2 module 68 | from dbutils.persistent_db import PersistentDB 69 | persist = PersistentDB(pgdb, 1000, database='mydb') 70 | 71 | Once you have set up the generator with these parameters, you can 72 | request database connections of that kind: 73 | 74 | db = persist.connection() 75 | 76 | You can use these connections just as if they were ordinary 77 | DB-API 2 connections. Actually what you get is the hardened 78 | SteadyDB version of the underlying DB-API 2 connection. 79 | 80 | Closing a persistent connection with db.close() will be silently 81 | ignored since it would be reopened at the next usage anyway and 82 | contrary to the intent of having persistent connections. Instead, 83 | the connection will be automatically closed when the thread dies. 84 | You can change this behavior by setting the closeable parameter. 85 | 86 | Note that you need to explicitly start transactions by calling the 87 | begin() method. This ensures that the transparent reopening will be 88 | suspended until the end of the transaction, and that the connection 89 | will be rolled back before being reused by the same thread. 90 | 91 | By setting the threadlocal parameter to threading.local, getting 92 | connections may become a bit faster, but this may not work in all 93 | environments (for instance, mod_wsgi is known to cause problems 94 | since it clears the threading.local data between requests). 95 | 96 | 97 | Ideas for improvement: 98 | 99 | * Add a thread for monitoring, restarting (or closing) bad or expired 100 | connections (similar to DBConnectionPool/ResourcePool by Warren Smith). 101 | * Optionally log usage, bad connections and exceeding of limits. 102 | 103 | 104 | Copyright, credits and license: 105 | 106 | * Contributed as supplement for Webware for Python and PyGreSQL 107 | by Christoph Zwerschke in September 2005 108 | * Based on an idea presented on the Webware developer mailing list 109 | by Geoffrey Talvola in July 2005 110 | 111 | Licensed under the MIT license. 112 | """ 113 | 114 | from . import __version__ 115 | from .steady_db import connect 116 | 117 | try: 118 | # Prefer the pure Python version of threading.local. 119 | # The C implementation turned out to be problematic with mod_wsgi, 120 | # since it does not keep the thread-local data between requests. 121 | from _threading_local import local 122 | except ImportError: 123 | # Fall back to the default version of threading.local. 124 | from threading import local 125 | 126 | __all__ = ['PersistentDB', 'PersistentDBError', 'NotSupportedError'] 127 | 128 | 129 | class PersistentDBError(Exception): 130 | """General PersistentDB error.""" 131 | 132 | 133 | class NotSupportedError(PersistentDBError): 134 | """DB-API module not supported by PersistentDB.""" 135 | 136 | 137 | class PersistentDB: 138 | """Generator for persistent DB-API 2 connections. 139 | 140 | After you have created the connection pool, you can use 141 | connection() to get thread-affine, steady DB-API 2 connections. 142 | """ 143 | 144 | version = __version__ 145 | 146 | def __init__( 147 | self, creator, 148 | maxusage=None, setsession=None, failures=None, ping=1, 149 | closeable=False, threadlocal=None, *args, **kwargs): 150 | """Set up the persistent DB-API 2 connection generator. 151 | 152 | creator: either an arbitrary function returning new DB-API 2 153 | connection objects or a DB-API 2 compliant database module 154 | maxusage: maximum number of reuses of a single connection 155 | (number of database operations, 0 or None means unlimited) 156 | Whenever the limit is reached, the connection will be reset. 157 | setsession: optional list of SQL commands that may serve to prepare 158 | the session, e.g. ["set datestyle to ...", "set time zone ..."] 159 | failures: an optional exception class or a tuple of exception classes 160 | for which the connection failover mechanism shall be applied, 161 | if the default (OperationalError, InterfaceError, InternalError) 162 | is not adequate for the used database module 163 | ping: determines when the connection should be checked with ping() 164 | (0 = None = never, 1 = default = whenever it is requested, 165 | 2 = when a cursor is created, 4 = when a query is executed, 166 | 7 = always, and all other bit combinations of these values) 167 | closeable: if this is set to true, then closing connections will 168 | be allowed, but by default this will be silently ignored 169 | threadlocal: an optional class for representing thread-local data 170 | that will be used instead of our Python implementation 171 | (threading.local is faster, but cannot be used in all cases) 172 | args, kwargs: the parameters that shall be passed to the creator 173 | function or the connection constructor of the DB-API 2 module 174 | """ 175 | try: 176 | threadsafety = creator.threadsafety 177 | except AttributeError: 178 | try: 179 | threadsafety = creator.dbapi.threadsafety 180 | except AttributeError: 181 | try: 182 | if not callable(creator.connect): 183 | raise AttributeError 184 | except AttributeError: 185 | threadsafety = 1 186 | else: 187 | threadsafety = 0 188 | if not threadsafety: 189 | raise NotSupportedError("Database module is not thread-safe.") 190 | self._creator = creator 191 | self._maxusage = maxusage 192 | self._setsession = setsession 193 | self._failures = failures 194 | self._ping = ping 195 | self._closeable = closeable 196 | self._args, self._kwargs = args, kwargs 197 | self.thread = (threadlocal or local)() 198 | 199 | def steady_connection(self): 200 | """Get a steady, non-persistent DB-API 2 connection.""" 201 | return connect( 202 | self._creator, self._maxusage, self._setsession, 203 | self._failures, self._ping, self._closeable, 204 | *self._args, **self._kwargs) 205 | 206 | def connection(self, shareable=False): # noqa: ARG002 207 | """Get a steady, persistent DB-API 2 connection. 208 | 209 | The shareable parameter exists only for compatibility with the 210 | PooledDB connection method. In reality, persistent connections 211 | are of course never shared with other threads. 212 | """ 213 | try: 214 | con = self.thread.connection 215 | except AttributeError as error: 216 | con = self.steady_connection() 217 | if not con.threadsafety(): 218 | raise NotSupportedError( 219 | "Database module is not thread-safe.") from error 220 | self.thread.connection = con 221 | con._ping_check() 222 | return con 223 | 224 | def dedicated_connection(self): 225 | """Alias for connection(shareable=False).""" 226 | return self.connection() 227 | -------------------------------------------------------------------------------- /dbutils/persistent_pg.py: -------------------------------------------------------------------------------- 1 | """PersistentPg - persistent classic PyGreSQL connections. 2 | 3 | Implements steady, thread-affine persistent connections to a PostgreSQL 4 | database using the classic (not DB-API 2 compliant) PyGreSQL API. 5 | 6 | This should result in a speedup for persistent applications such as the 7 | application server of "Webware for Python," without loss of robustness. 8 | 9 | Robustness is provided by using "hardened" SteadyPg connections. 10 | Even if the underlying database is restarted and all connections 11 | are lost, they will be automatically and transparently reopened. 12 | However, since you don't want this to happen in the middle of a database 13 | transaction, you must explicitly start transactions with the begin() 14 | method so that SteadyPg knows that the underlying connection shall not 15 | be replaced and errors passed on until the transaction is completed. 16 | 17 | Measures are taken to make the database connections thread-affine. 18 | This means the same thread always uses the same cached connection, 19 | and no other thread will use it. So the fact that the classic PyGreSQL 20 | pg module is not thread-safe at the connection level is no problem here. 21 | 22 | For best performance, the application server should keep threads persistent. 23 | For this, you have to set MinServerThreads = MaxServerThreads in Webware. 24 | 25 | For more information on PostgreSQL, see: 26 | https://www.postgresql.org/ 27 | For more information on PyGreSQL, see: 28 | http://www.pygresql.org 29 | For more information on Webware for Python, see: 30 | https://webwareforpython.github.io/w4py/ 31 | 32 | 33 | Usage: 34 | 35 | First you need to set up a generator for your kind of database connections 36 | by creating an instance of PersistentPg, passing the following parameters: 37 | 38 | maxusage: the maximum number of reuses of a single connection 39 | (the default of 0 or None means unlimited reuse) 40 | When this maximum usage number of the connection is reached, 41 | the connection is automatically reset (closed and reopened). 42 | setsession: An optional list of SQL commands that may serve to 43 | prepare the session, e.g. ["set datestyle to german", ...] 44 | closeable: if this is set to true, then closing connections will 45 | be allowed, but by default this will be silently ignored 46 | threadlocal: an optional class for representing thread-local data 47 | that will be used instead of our Python implementation 48 | (threading.local is faster, but cannot be used in all cases) 49 | 50 | Additionally, you have to pass the parameters for the actual 51 | PostgreSQL connection which are passed via PyGreSQL, 52 | such as the names of the host, database, user, password etc. 53 | 54 | For instance, if you want every connection to your local database 'mydb' 55 | to be reused 1000 times: 56 | 57 | from dbutils.persistent_pg import PersistentPg 58 | persist = PersistentPg(5, dbname='mydb') 59 | 60 | Once you have set up the generator with these parameters, you can 61 | request database connections of that kind: 62 | 63 | db = persist.connection() 64 | 65 | You can use these connections just as if they were ordinary 66 | classic PyGreSQL API connections. Actually what you get is the 67 | hardened SteadyPg version of a classic PyGreSQL connection. 68 | 69 | Closing a persistent connection with db.close() will be silently 70 | ignored since it would be reopened at the next usage anyway and 71 | contrary to the intent of having persistent connections. Instead, 72 | the connection will be automatically closed when the thread dies. 73 | You can change this behavior by setting the closeable parameter. 74 | 75 | Note that you need to explicitly start transactions by calling the 76 | begin() method. This ensures that the transparent reopening will be 77 | suspended until the end of the transaction, and that the connection 78 | will be rolled back before being reused in the same thread. To end 79 | transactions, use one of the end(), commit() or rollback() methods. 80 | 81 | By setting the threadlocal parameter to threading.local, getting 82 | connections may become a bit faster, but this may not work in all 83 | environments (for instance, mod_wsgi is known to cause problems 84 | since it clears the threading.local data between requests). 85 | 86 | 87 | Ideas for improvement: 88 | 89 | * Add a thread for monitoring, restarting (or closing) bad or expired 90 | connections (similar to DBConnectionPool/ResourcePool by Warren Smith). 91 | * Optionally log usage, bad connections and exceeding of limits. 92 | 93 | 94 | Copyright, credits and license: 95 | 96 | * Contributed as supplement for Webware for Python and PyGreSQL 97 | by Christoph Zwerschke in September 2005 98 | * Based on an idea presented on the Webware developer mailing list 99 | by Geoffrey Talvola in July 2005 100 | 101 | Licensed under the MIT license. 102 | """ 103 | 104 | from . import __version__ 105 | from .steady_pg import SteadyPgConnection 106 | 107 | try: 108 | # Prefer the pure Python version of threading.local. 109 | # The C implementation turned out to be problematic with mod_wsgi, 110 | # since it does not keep the thread-local data between requests. 111 | from _threading_local import local 112 | except ImportError: 113 | # Fall back to the default version of threading.local. 114 | from threading import local 115 | 116 | __all__ = ['PersistentPg'] 117 | 118 | 119 | class PersistentPg: 120 | """Generator for persistent classic PyGreSQL connections. 121 | 122 | After you have created the connection pool, you can use 123 | connection() to get thread-affine, steady PostgreSQL connections. 124 | """ 125 | 126 | version = __version__ 127 | 128 | def __init__( 129 | self, maxusage=None, setsession=None, 130 | closeable=False, threadlocal=None, *args, **kwargs): 131 | """Set up the persistent PostgreSQL connection generator. 132 | 133 | maxusage: maximum number of reuses of a single connection 134 | (0 or None means unlimited reuse) 135 | When this maximum usage number of the connection is reached, 136 | the connection is automatically reset (closed and reopened). 137 | setsession: optional list of SQL commands that may serve to prepare 138 | the session, e.g. ["set datestyle to ...", "set time zone ..."] 139 | closeable: if this is set to true, then closing connections will 140 | be allowed, but by default this will be silently ignored 141 | threadlocal: an optional class for representing thread-local data 142 | that will be used instead of our Python implementation 143 | (threading.local is faster, but cannot be used in all cases) 144 | args, kwargs: the parameters that shall be used to establish 145 | the PostgreSQL connections using class PyGreSQL pg.DB() 146 | """ 147 | self._maxusage = maxusage 148 | self._setsession = setsession 149 | self._closeable = closeable 150 | self._args, self._kwargs = args, kwargs 151 | self.thread = (threadlocal or local)() 152 | 153 | def steady_connection(self): 154 | """Get a steady, non-persistent PyGreSQL connection.""" 155 | return SteadyPgConnection( 156 | self._maxusage, self._setsession, self._closeable, 157 | *self._args, **self._kwargs) 158 | 159 | def connection(self): 160 | """Get a steady, persistent PyGreSQL connection.""" 161 | try: 162 | con = self.thread.connection 163 | except AttributeError: 164 | con = self.steady_connection() 165 | self.thread.connection = con 166 | return con 167 | -------------------------------------------------------------------------------- /dbutils/pooled_db.py: -------------------------------------------------------------------------------- 1 | """PooledDB - pooling for DB-API 2 connections. 2 | 3 | Implements a pool of steady, thread-safe cached connections 4 | to a database which are transparently reused, 5 | using an arbitrary DB-API 2 compliant database interface module. 6 | 7 | This should result in a speedup for persistent applications such as the 8 | application server of "Webware for Python," without loss of robustness. 9 | 10 | Robustness is provided by using "hardened" SteadyDB connections. 11 | Even if the underlying database is restarted and all connections 12 | are lost, they will be automatically and transparently reopened. 13 | However, since you don't want this to happen in the middle of a database 14 | transaction, you must explicitly start transactions with the begin() 15 | method so that SteadyDB knows that the underlying connection shall not 16 | be replaced and errors passed on until the transaction is completed. 17 | 18 | Measures are taken to make the pool of connections thread-safe. 19 | If the underlying DB-API module is thread-safe at the connection level, 20 | the requested connections may be shared with other threads by default, 21 | but you can also request dedicated connections in case you need them. 22 | 23 | For the Python DB-API 2 specification, see: 24 | https://www.python.org/dev/peps/pep-0249/ 25 | For information on Webware for Python, see: 26 | https://webwareforpython.github.io/w4py/ 27 | 28 | 29 | Usage: 30 | 31 | First you need to set up the database connection pool by creating 32 | an instance of PooledDB, passing the following parameters: 33 | 34 | creator: either an arbitrary function returning new DB-API 2 35 | connection objects or a DB-API 2 compliant database module 36 | mincached: the initial number of idle connections in the pool 37 | (the default of 0 means no connections are made at startup) 38 | maxcached: the maximum number of idle connections in the pool 39 | (the default value of 0 or None means unlimited pool size) 40 | maxshared: maximum number of shared connections allowed 41 | (the default value of 0 or None means all connections are dedicated) 42 | When this maximum number is reached, connections are 43 | shared if they have been requested as shareable. 44 | maxconnections: maximum number of connections generally allowed 45 | (the default value of 0 or None means any number of connections) 46 | blocking: determines behavior when exceeding the maximum 47 | (if this is set to true, block and wait until the number of 48 | connections decreases, but by default an error will be reported) 49 | maxusage: maximum number of reuses of a single connection 50 | (the default of 0 or None means unlimited reuse) 51 | When this maximum usage number of the connection is reached, 52 | the connection is automatically reset (closed and reopened). 53 | setsession: an optional list of SQL commands that may serve to 54 | prepare the session, e.g. ["set datestyle to german", ...] 55 | reset: how connections should be reset when returned to the pool 56 | (False or None to rollback transactions started with begin(), 57 | the default value True always issues a rollback for safety's sake) 58 | failures: an optional exception class or a tuple of exception classes 59 | for which the connection failover mechanism shall be applied, 60 | if the default (OperationalError, InterfaceError, InternalError) 61 | is not adequate for the used database module 62 | ping: an optional flag controlling when connections are checked 63 | with the ping() method if such a method is available 64 | (0 = None = never, 1 = default = whenever fetched from the pool, 65 | 2 = when a cursor is created, 4 = when a query is executed, 66 | 7 = always, and all other bit combinations of these values) 67 | 68 | The creator function or the connect function of the DB-API 2 compliant 69 | database module specified as the creator will receive any additional 70 | parameters such as the host, database, user, password etc. You may 71 | choose some or all of these parameters in your own creator function, 72 | allowing for sophisticated failover and load-balancing mechanisms. 73 | 74 | For instance, if you are using pgdb as your DB-API 2 database module and 75 | want a pool of at least five connections to your local database 'mydb': 76 | 77 | import pgdb # import used DB-API 2 module 78 | from dbutils.pooled_db import PooledDB 79 | pool = PooledDB(pgdb, 5, database='mydb') 80 | 81 | Once you have set up the connection pool you can request 82 | database connections from that pool: 83 | 84 | db = pool.connection() 85 | 86 | You can use these connections just as if they were ordinary 87 | DB-API 2 connections. Actually what you get is the hardened 88 | SteadyDB version of the underlying DB-API 2 connection. 89 | 90 | Please note that the connection may be shared with other threads 91 | by default if you set a non-zero maxshared parameter and the DB-API 2 92 | module allows this. If you want to have a dedicated connection, use: 93 | 94 | db = pool.connection(shareable=False) 95 | 96 | You can also use this to get a dedicated connection: 97 | 98 | db = pool.dedicated_connection() 99 | 100 | If you don't need it anymore, you should immediately return it to the 101 | pool with db.close(). You can get another connection in the same way. 102 | 103 | Warning: In a threaded environment, never do the following: 104 | 105 | pool.connection().cursor().execute(...) 106 | 107 | This would release the connection too early for reuse which may be 108 | fatal if the connections are not thread-safe. Make sure that the 109 | connection object stays alive as long as you are using it, like that: 110 | 111 | db = pool.connection() 112 | cur = db.cursor() 113 | cur.execute(...) 114 | res = cur.fetchone() 115 | cur.close() # or del cur 116 | db.close() # or del db 117 | 118 | You can also use context managers for simpler code: 119 | 120 | with pool.connection() as db: 121 | with db.cursor as cur: 122 | cur.execute(...) 123 | res = cur.fetchone() 124 | 125 | Note that you need to explicitly start transactions by calling the 126 | begin() method. This ensures that the connection will not be shared 127 | with other threads, that the transparent reopening will be suspended 128 | until the end of the transaction, and that the connection will be rolled 129 | back before being given back to the connection pool. 130 | 131 | 132 | Ideas for improvement: 133 | 134 | * Add a thread for monitoring, restarting (or closing) bad or expired 135 | connections (similar to DBConnectionPool/ResourcePool by Warren Smith). 136 | * Optionally log usage, bad connections and exceeding of limits. 137 | 138 | 139 | Copyright, credits and license: 140 | 141 | * Contributed as supplement for Webware for Python and PyGreSQL 142 | by Christoph Zwerschke in September 2005 143 | * Based on the code of DBPool, contributed to Webware for Python 144 | by Dan Green in December 2000 145 | 146 | Licensed under the MIT license. 147 | """ 148 | 149 | from contextlib import suppress 150 | from functools import total_ordering 151 | from threading import Condition 152 | 153 | from . import __version__ 154 | from .steady_db import connect 155 | 156 | __all__ = [ 157 | 'PooledDB', 'PooledDedicatedDBConnection', 158 | 'SharedDBConnection', 'PooledSharedDBConnection', 159 | 'PooledDBError', 'InvalidConnectionError', 160 | 'NotSupportedError', 'TooManyConnectionsError', 161 | ] 162 | 163 | 164 | class PooledDBError(Exception): 165 | """General PooledDB error.""" 166 | 167 | 168 | class InvalidConnectionError(PooledDBError): 169 | """Database connection is invalid.""" 170 | 171 | 172 | class NotSupportedError(PooledDBError): 173 | """DB-API module not supported by PooledDB.""" 174 | 175 | 176 | class TooManyConnectionsError(PooledDBError): 177 | """Too many database connections were opened.""" 178 | 179 | 180 | # deprecated alias names for error classes 181 | InvalidConnection = InvalidConnectionError 182 | TooManyConnections = TooManyConnectionsError 183 | 184 | 185 | class PooledDB: 186 | """Pool for DB-API 2 connections. 187 | 188 | After you have created the connection pool, you can use 189 | connection() to get pooled, steady DB-API 2 connections. 190 | """ 191 | 192 | version = __version__ 193 | 194 | def __init__( 195 | self, creator, mincached=0, maxcached=0, 196 | maxshared=0, maxconnections=0, blocking=False, 197 | maxusage=None, setsession=None, reset=True, 198 | failures=None, ping=1, 199 | *args, **kwargs): 200 | """Set up the DB-API 2 connection pool. 201 | 202 | creator: either an arbitrary function returning new DB-API 2 203 | connection objects or a DB-API 2 compliant database module 204 | mincached: initial number of idle connections in the pool 205 | (0 means no connections are made at startup) 206 | maxcached: maximum number of idle connections in the pool 207 | (0 or None means unlimited pool size) 208 | maxshared: maximum number of shared connections 209 | (0 or None means all connections are dedicated) 210 | When this maximum number is reached, connections are 211 | shared if they have been requested as shareable. 212 | maxconnections: maximum number of connections generally allowed 213 | (0 or None means an arbitrary number of connections) 214 | blocking: determines behavior when exceeding the maximum 215 | (if this is set to true, block and wait until the number of 216 | connections decreases, otherwise an error will be reported) 217 | maxusage: maximum number of reuses of a single connection 218 | (0 or None means unlimited reuse) 219 | When this maximum usage number of the connection is reached, 220 | the connection is automatically reset (closed and reopened). 221 | setsession: optional list of SQL commands that may serve to prepare 222 | the session, e.g. ["set datestyle to ...", "set time zone ..."] 223 | reset: how connections should be reset when returned to the pool 224 | (False or None to rollback transactions started with begin(), 225 | True to always issue a rollback for safety's sake) 226 | failures: an optional exception class or a tuple of exception classes 227 | for which the connection failover mechanism shall be applied, 228 | if the default (OperationalError, InterfaceError, InternalError) 229 | is not adequate for the used database module 230 | ping: determines when the connection should be checked with ping() 231 | (0 = None = never, 1 = default = whenever fetched from the pool, 232 | 2 = when a cursor is created, 4 = when a query is executed, 233 | 7 = always, and all other bit combinations of these values) 234 | args, kwargs: the parameters that shall be passed to the creator 235 | function or the connection constructor of the DB-API 2 module 236 | """ 237 | try: 238 | threadsafety = creator.threadsafety 239 | except AttributeError: 240 | try: 241 | threadsafety = creator.dbapi.threadsafety 242 | except AttributeError: 243 | try: 244 | if not callable(creator.connect): 245 | raise AttributeError 246 | except AttributeError: 247 | threadsafety = 1 248 | else: 249 | threadsafety = 0 250 | if not threadsafety: 251 | raise NotSupportedError("Database module is not thread-safe.") 252 | self._creator = creator 253 | self._args, self._kwargs = args, kwargs 254 | self._blocking = blocking 255 | self._maxusage = maxusage 256 | self._setsession = setsession 257 | self._reset = reset 258 | self._failures = failures 259 | self._ping = ping 260 | if mincached is None: 261 | mincached = 0 262 | if maxcached is None: 263 | maxcached = 0 264 | if maxconnections is None: 265 | maxconnections = 0 266 | if maxcached: 267 | maxcached = max(maxcached, mincached) 268 | self._maxcached = maxcached 269 | else: 270 | self._maxcached = 0 271 | if threadsafety > 1 and maxshared: 272 | self._maxshared = maxshared 273 | self._shared_cache = [] # the cache for shared connections 274 | else: 275 | self._maxshared = 0 276 | if maxconnections: 277 | maxconnections = max(maxconnections, maxcached) 278 | maxconnections = max(maxconnections, maxshared) 279 | self._maxconnections = maxconnections 280 | else: 281 | self._maxconnections = 0 282 | self._idle_cache = [] # the actual pool of idle connections 283 | self._lock = Condition() 284 | self._connections = 0 285 | # Establish an initial number of idle database connections: 286 | idle = [self.dedicated_connection() for i in range(mincached)] 287 | while idle: 288 | idle.pop().close() 289 | 290 | def steady_connection(self): 291 | """Get a steady, unpooled DB-API 2 connection.""" 292 | return connect( 293 | self._creator, self._maxusage, self._setsession, 294 | self._failures, self._ping, True, *self._args, **self._kwargs) 295 | 296 | def connection(self, shareable=True): 297 | """Get a steady, cached DB-API 2 connection from the pool. 298 | 299 | If shareable is set and the underlying DB-API 2 allows it, 300 | then the connection may be shared with other threads. 301 | """ 302 | if shareable and self._maxshared: 303 | with self._lock: 304 | while (not self._shared_cache and self._maxconnections 305 | and self._connections >= self._maxconnections): 306 | self._wait_lock() 307 | if len(self._shared_cache) < self._maxshared: 308 | # shared cache is not full, get a dedicated connection 309 | try: # first try to get it from the idle cache 310 | con = self._idle_cache.pop(0) 311 | except IndexError: # else get a fresh connection 312 | con = self.steady_connection() 313 | else: 314 | con._ping_check() # check this connection 315 | con = SharedDBConnection(con) 316 | self._connections += 1 317 | else: # shared cache full or no more connections allowed 318 | self._shared_cache.sort() # least shared connection first 319 | con = self._shared_cache.pop(0) # get it 320 | while con.con._transaction: 321 | # do not share connections which are in a transaction 322 | self._shared_cache.insert(0, con) 323 | self._wait_lock() 324 | self._shared_cache.sort() 325 | con = self._shared_cache.pop(0) 326 | con.con._ping_check() # check the underlying connection 327 | con.share() # increase share of this connection 328 | # put the connection (back) into the shared cache 329 | self._shared_cache.append(con) 330 | self._lock.notify() 331 | con = PooledSharedDBConnection(self, con) 332 | else: # try to get a dedicated connection 333 | with self._lock: 334 | while (self._maxconnections 335 | and self._connections >= self._maxconnections): 336 | self._wait_lock() 337 | # connection limit not reached, get a dedicated connection 338 | try: # first try to get it from the idle cache 339 | con = self._idle_cache.pop(0) 340 | except IndexError: # else get a fresh connection 341 | con = self.steady_connection() 342 | else: 343 | con._ping_check() # check connection 344 | con = PooledDedicatedDBConnection(self, con) 345 | self._connections += 1 346 | return con 347 | 348 | def dedicated_connection(self): 349 | """Alias for connection(shareable=False).""" 350 | return self.connection(False) 351 | 352 | def unshare(self, con): 353 | """Decrease the share of a connection in the shared cache.""" 354 | with self._lock: 355 | con.unshare() 356 | shared = con.shared 357 | if not shared: # connection is idle 358 | # try to remove it from shared cache 359 | with suppress(ValueError): # if pool has already been closed 360 | self._shared_cache.remove(con) 361 | if not shared: # connection has become idle, 362 | self.cache(con.con) # so add it to the idle cache 363 | 364 | def cache(self, con): 365 | """Put a dedicated connection back into the idle cache.""" 366 | with self._lock: 367 | if not self._maxcached or len(self._idle_cache) < self._maxcached: 368 | con._reset(force=self._reset) # rollback possible transaction 369 | # the idle cache is not full, so put it there 370 | self._idle_cache.append(con) # append it to the idle cache 371 | else: # if the idle cache is already full, 372 | con.close() # then close the connection 373 | self._connections -= 1 374 | self._lock.notify() 375 | 376 | def close(self): 377 | """Close all connections in the pool.""" 378 | with self._lock: 379 | while self._idle_cache: # close all idle connections 380 | con = self._idle_cache.pop(0) 381 | with suppress(Exception): 382 | con.close() 383 | if self._maxshared: # close all shared connections 384 | while self._shared_cache: 385 | con = self._shared_cache.pop(0).con 386 | with suppress(Exception): 387 | con.close() 388 | self._connections -= 1 389 | self._lock.notify_all() 390 | 391 | def __del__(self): 392 | """Delete the pool.""" 393 | # builtins (including Exceptions) might not exist anymore 394 | try: # noqa: SIM105 395 | self.close() 396 | except: # noqa: E722, S110 397 | pass 398 | 399 | def _wait_lock(self): 400 | """Wait until notified or report an error.""" 401 | if not self._blocking: 402 | raise TooManyConnectionsError 403 | self._lock.wait() 404 | 405 | 406 | # Auxiliary classes for pooled connections 407 | 408 | class PooledDedicatedDBConnection: 409 | """Auxiliary proxy class for pooled dedicated connections.""" 410 | 411 | def __init__(self, pool, con): 412 | """Create a pooled dedicated connection. 413 | 414 | pool: the corresponding PooledDB instance 415 | con: the underlying SteadyDB connection 416 | """ 417 | # basic initialization to make finalizer work 418 | self._con = None 419 | # proper initialization of the connection 420 | if not con.threadsafety(): 421 | raise NotSupportedError("Database module is not thread-safe.") 422 | self._pool = pool 423 | self._con = con 424 | 425 | def close(self): 426 | """Close the pooled dedicated connection.""" 427 | # Instead of actually closing the connection, 428 | # return it to the pool for future reuse. 429 | if self._con: 430 | self._pool.cache(self._con) 431 | self._con = None 432 | 433 | def __getattr__(self, name): 434 | """Proxy all members of the class.""" 435 | if self._con: 436 | return getattr(self._con, name) 437 | raise InvalidConnectionError 438 | 439 | def __del__(self): 440 | """Delete the pooled connection.""" 441 | # builtins (including Exceptions) might not exist anymore 442 | try: # noqa: SIM105 443 | self.close() 444 | except: # noqa: E722, S110 445 | pass 446 | 447 | def __enter__(self): 448 | """Enter a runtime context for the connection.""" 449 | return self 450 | 451 | def __exit__(self, *exc): 452 | """Exit a runtime context for the connection.""" 453 | self.close() 454 | 455 | 456 | @total_ordering 457 | class SharedDBConnection: 458 | """Auxiliary class for shared connections.""" 459 | 460 | def __init__(self, con): 461 | """Create a shared connection. 462 | 463 | con: the underlying SteadyDB connection 464 | """ 465 | self.con = con 466 | self.shared = 1 467 | 468 | def __lt__(self, other): 469 | """Check whether this connection should come before the other one.""" 470 | if self.con._transaction == other.con._transaction: 471 | return self.shared < other.shared 472 | return not self.con._transaction 473 | 474 | def __eq__(self, other): 475 | """Check whether this connection is the same as the other one.""" 476 | return (self.con._transaction == other.con._transaction 477 | and self.shared == other.shared) 478 | 479 | def share(self): 480 | """Increase the share of this connection.""" 481 | self.shared += 1 482 | 483 | def unshare(self): 484 | """Decrease the share of this connection.""" 485 | self.shared -= 1 486 | 487 | 488 | class PooledSharedDBConnection: 489 | """Auxiliary proxy class for pooled shared connections.""" 490 | 491 | def __init__(self, pool, shared_con): 492 | """Create a pooled shared connection. 493 | 494 | pool: the corresponding PooledDB instance 495 | con: the underlying SharedDBConnection 496 | """ 497 | # basic initialization to make finalizer work 498 | self._con = None 499 | # proper initialization of the connection 500 | con = shared_con.con 501 | if not con.threadsafety() > 1: 502 | raise NotSupportedError("Database connection is not thread-safe.") 503 | self._pool = pool 504 | self._shared_con = shared_con 505 | self._con = con 506 | 507 | def close(self): 508 | """Close the pooled shared connection.""" 509 | # Instead of actually closing the connection, 510 | # unshare it and/or return it to the pool. 511 | if self._con: 512 | self._pool.unshare(self._shared_con) 513 | self._shared_con = self._con = None 514 | 515 | def __getattr__(self, name): 516 | """Proxy all members of the class.""" 517 | if self._con: 518 | return getattr(self._con, name) 519 | raise InvalidConnectionError 520 | 521 | def __del__(self): 522 | """Delete the pooled connection.""" 523 | # builtins (including Exceptions) might not exist anymore 524 | try: # noqa: SIM105 525 | self.close() 526 | except: # noqa: E722, S110 527 | pass 528 | 529 | def __enter__(self): 530 | """Enter a runtime context for the connection.""" 531 | return self 532 | 533 | def __exit__(self, *exc): 534 | """Exit a runtime context for the connection.""" 535 | self.close() 536 | -------------------------------------------------------------------------------- /dbutils/pooled_pg.py: -------------------------------------------------------------------------------- 1 | """PooledPg - pooling for classic PyGreSQL connections. 2 | 3 | Implements a pool of steady, thread-safe cached connections 4 | to a PostgreSQL database which are transparently reused, 5 | using the classic (not DB-API 2 compliant) PyGreSQL API. 6 | 7 | This should result in a speedup for persistent applications such as the 8 | application server of "Webware for Python," without loss of robustness. 9 | 10 | Robustness is provided by using "hardened" SteadyPg connections. 11 | Even if the underlying database is restarted and all connections 12 | are lost, they will be automatically and transparently reopened. 13 | However, since you don't want this to happen in the middle of a database 14 | transaction, you must explicitly start transactions with the begin() 15 | method so that SteadyPg knows that the underlying connection shall not 16 | be replaced and errors passed on until the transaction is completed. 17 | 18 | Measures are taken to make the pool of connections thread-safe 19 | regardless of the fact that the classic PyGreSQL pg module itself 20 | is not thread-safe at the connection level. 21 | 22 | For more information on PostgreSQL, see: 23 | https://www.postgresql.org/ 24 | For more information on PyGreSQL, see: 25 | http://www.pygresql.org 26 | For more information on Webware for Python, see: 27 | https://webwareforpython.github.io/w4py/ 28 | 29 | 30 | Usage: 31 | 32 | First you need to set up the database connection pool by creating 33 | an instance of PooledPg, passing the following parameters: 34 | 35 | mincached: the initial number of connections in the pool 36 | (the default of 0 means no connections are made at startup) 37 | maxcached: the maximum number of connections in the pool 38 | (the default value of 0 or None means unlimited pool size) 39 | maxconnections: maximum number of connections generally allowed 40 | (the default value of 0 or None means any number of connections) 41 | blocking: determines behavior when exceeding the maximum 42 | (if this is set to true, block and wait until the number of 43 | connections decreases, but by default an error will be reported) 44 | maxusage: maximum number of reuses of a single connection 45 | (the default of 0 or None means unlimited reuse) 46 | When this maximum usage number of the connection is reached, 47 | the connection is automatically reset (closed and reopened). 48 | setsession: an optional list of SQL commands that may serve to 49 | prepare the session, e.g. ["set datestyle to german", ...] 50 | 51 | Additionally, you have to pass the parameters for the actual 52 | PostgreSQL connection which are passed via PyGreSQL, 53 | such as the names of the host, database, user, password etc. 54 | 55 | For instance, if you want a pool of at least five connections 56 | to your local database 'mydb': 57 | 58 | from dbutils.pooled_pg import PooledPg 59 | pool = PooledPg(5, dbname='mydb') 60 | 61 | Once you have set up the connection pool you can request 62 | database connections from that pool: 63 | 64 | db = pool.connection() 65 | 66 | You can use these connections just as if they were ordinary 67 | classic PyGreSQL API connections. Actually what you get is a 68 | proxy class for the hardened SteadyPg version of the connection. 69 | 70 | The connection will not be shared with other threads. If you don't need 71 | it anymore, you should immediately return it to the pool with db.close(). 72 | You can get another connection in the same way or with db.reopen(). 73 | 74 | Warning: In a threaded environment, never do the following: 75 | 76 | res = pool.connection().query(...).getresult() 77 | 78 | This would release the connection too early for reuse which may be 79 | fatal because the connections are not thread-safe. Make sure that the 80 | connection object stays alive as long as you are using it, like that: 81 | 82 | db = pool.connection() 83 | res = db.query(...).getresult() 84 | db.close() # or del db 85 | 86 | You can also a context manager for simpler code: 87 | 88 | with pool.connection() as db: 89 | res = db.query(...).getresult() 90 | 91 | Note that you need to explicitly start transactions by calling the 92 | begin() method. This ensures that the transparent reopening will be 93 | suspended until the end of the transaction, and that the connection will 94 | be rolled back before being given back to the connection pool. To end 95 | transactions, use one of the end(), commit() or rollback() methods. 96 | 97 | 98 | Ideas for improvement: 99 | 100 | * Add a thread for monitoring, restarting (or closing) bad or expired 101 | connections (similar to DBConnectionPool/ResourcePool by Warren Smith). 102 | * Optionally log usage, bad connections and exceeding of limits. 103 | 104 | 105 | Copyright, credits and license: 106 | 107 | * Contributed as supplement for Webware for Python and PyGreSQL 108 | by Christoph Zwerschke in September 2005 109 | * Based on the code of DBPool, contributed to Webware for Python 110 | by Dan Green in December 2000 111 | 112 | Licensed under the MIT license. 113 | """ 114 | 115 | from contextlib import suppress 116 | from queue import Empty, Full, Queue 117 | 118 | from . import __version__ 119 | from .steady_pg import SteadyPgConnection 120 | 121 | __all__ = [ 122 | 'PooledPg', 'PooledPgConnection', 123 | 'PooledPgError', 'InvalidConnectionError', 'TooManyConnectionsError', 124 | 'RESET_ALWAYS_ROLLBACK', 'RESET_COMPLETELY', 125 | ] 126 | 127 | # constants for "reset" parameter 128 | RESET_ALWAYS_ROLLBACK = 1 129 | RESET_COMPLETELY = 2 130 | 131 | 132 | class PooledPgError(Exception): 133 | """General PooledPg error.""" 134 | 135 | 136 | class InvalidConnectionError(PooledPgError): 137 | """Database connection is invalid.""" 138 | 139 | 140 | class TooManyConnectionsError(PooledPgError): 141 | """Too many database connections were opened.""" 142 | 143 | 144 | # deprecated alias names for error classes 145 | InvalidConnection = InvalidConnectionError 146 | TooManyConnections = TooManyConnectionsError 147 | 148 | 149 | class PooledPg: 150 | """Pool for classic PyGreSQL connections. 151 | 152 | After you have created the connection pool, you can use 153 | connection() to get pooled, steady PostgreSQL connections. 154 | """ 155 | 156 | version = __version__ 157 | 158 | def __init__( 159 | self, mincached=0, maxcached=0, 160 | maxconnections=0, blocking=False, 161 | maxusage=None, setsession=None, reset=None, 162 | *args, **kwargs): 163 | """Set up the PostgreSQL connection pool. 164 | 165 | mincached: initial number of connections in the pool 166 | (0 means no connections are made at startup) 167 | maxcached: maximum number of connections in the pool 168 | (0 or None means unlimited pool size) 169 | maxconnections: maximum number of connections generally allowed 170 | (0 or None means an arbitrary number of connections) 171 | blocking: determines behavior when exceeding the maximum 172 | (if this is set to true, block and wait until the number of 173 | connections decreases, otherwise an error will be reported) 174 | maxusage: maximum number of reuses of a single connection 175 | (0 or None means unlimited reuse) 176 | When this maximum usage number of the connection is reached, 177 | the connection is automatically reset (closed and reopened). 178 | setsession: optional list of SQL commands that may serve to prepare 179 | the session, e.g. ["set datestyle to ...", "set time zone ..."] 180 | reset: how connections should be reset when returned to the pool 181 | (0 or None to rollback transactions started with begin(), 182 | 1 to always issue a rollback, 2 for a complete reset) 183 | args, kwargs: the parameters that shall be used to establish 184 | the PostgreSQL connections using class PyGreSQL pg.DB() 185 | """ 186 | self._args, self._kwargs = args, kwargs 187 | self._maxusage = maxusage 188 | self._setsession = setsession 189 | self._reset = reset or 0 190 | if mincached is None: 191 | mincached = 0 192 | if maxcached is None: 193 | maxcached = 0 194 | if maxconnections is None: 195 | maxconnections = 0 196 | if maxcached and maxcached < mincached: 197 | maxcached = mincached 198 | if maxconnections: 199 | maxconnections = max(maxconnections, maxcached) 200 | # Create semaphore for number of allowed connections generally: 201 | from threading import Semaphore 202 | self._connections = Semaphore(maxconnections) 203 | self._blocking = blocking 204 | else: 205 | self._connections = None 206 | self._cache = Queue(maxcached) # the actual connection pool 207 | # Establish an initial number of database connections: 208 | idle = [self.connection() for i in range(mincached)] 209 | while idle: 210 | idle.pop().close() 211 | 212 | def steady_connection(self): 213 | """Get a steady, unpooled PostgreSQL connection.""" 214 | return SteadyPgConnection(self._maxusage, self._setsession, True, 215 | *self._args, **self._kwargs) 216 | 217 | def connection(self): 218 | """Get a steady, cached PostgreSQL connection from the pool.""" 219 | if self._connections and not self._connections.acquire(self._blocking): 220 | raise TooManyConnectionsError 221 | try: 222 | con = self._cache.get_nowait() 223 | except Empty: 224 | con = self.steady_connection() 225 | return PooledPgConnection(self, con) 226 | 227 | def cache(self, con): 228 | """Put a connection back into the pool cache.""" 229 | try: 230 | if self._reset == RESET_COMPLETELY: 231 | con.reset() # reset the connection completely 232 | elif self._reset == RESET_ALWAYS_ROLLBACK or con._transaction: 233 | with suppress(Exception): 234 | con.rollback() # rollback a possible transaction 235 | self._cache.put_nowait(con) # and then put it back into the cache 236 | except Full: 237 | con.close() 238 | if self._connections: 239 | self._connections.release() 240 | 241 | def close(self): 242 | """Close all connections in the pool.""" 243 | while 1: 244 | try: 245 | con = self._cache.get_nowait() 246 | with suppress(Exception): 247 | con.close() 248 | if self._connections: 249 | self._connections.release() 250 | except Empty: 251 | break 252 | 253 | def __del__(self): 254 | """Delete the pool.""" 255 | # builtins (including Exceptions) might not exist anymore 256 | try: # noqa: SIM105 257 | self.close() 258 | except: # noqa: E722, S110 259 | pass 260 | 261 | 262 | # Auxiliary class for pooled connections 263 | 264 | class PooledPgConnection: 265 | """Proxy class for pooled PostgreSQL connections.""" 266 | 267 | def __init__(self, pool, con): 268 | """Create a pooled DB-API 2 connection. 269 | 270 | pool: the corresponding PooledPg instance 271 | con: the underlying SteadyPg connection 272 | """ 273 | self._pool = pool 274 | self._con = con 275 | 276 | def close(self): 277 | """Close the pooled connection.""" 278 | # Instead of actually closing the connection, 279 | # return it to the pool so that it can be reused. 280 | if self._con: 281 | self._pool.cache(self._con) 282 | self._con = None 283 | 284 | def reopen(self): 285 | """Reopen the pooled connection.""" 286 | # If the connection is already back in the pool, 287 | # get another connection from the pool, 288 | # otherwise reopen the underlying connection. 289 | if self._con: 290 | self._con.reopen() 291 | else: 292 | self._con = self._pool.connection() 293 | 294 | def __getattr__(self, name): 295 | """Proxy all members of the class.""" 296 | if self._con: 297 | return getattr(self._con, name) 298 | raise InvalidConnectionError 299 | 300 | def __del__(self): 301 | """Delete the pooled connection.""" 302 | # builtins (including Exceptions) might not exist anymore 303 | try: # noqa: SIM105 304 | self.close() 305 | except: # noqa: E722, S110 306 | pass 307 | 308 | def __enter__(self): 309 | """Enter a runtime context for the connection.""" 310 | return self 311 | 312 | def __exit__(self, *exc): 313 | """Exit a runtime context for the connection.""" 314 | self.close() 315 | -------------------------------------------------------------------------------- /dbutils/simple_pooled_db.py: -------------------------------------------------------------------------------- 1 | """SimplePooledDB - a very simple DB-API 2 database connection pool. 2 | 3 | Implements a pool of threadsafe cached DB-API 2 connections 4 | to a database which are transparently reused. 5 | 6 | This should result in a speedup for persistent applications 7 | such as the "Webware for Python" AppServer. 8 | 9 | For more information on the DB-API 2, see: 10 | https://www.python.org/dev/peps/pep-0249/ 11 | For more information on Webware for Python, see: 12 | https://webwareforpython.github.io/w4py/ 13 | 14 | Measures are taken to make the pool of connections threadsafe 15 | regardless of whether the DB-API 2 module used is threadsafe 16 | on the connection level (threadsafety > 1) or not. It must only 17 | be threadsafe on the module level (threadsafety = 1). If the 18 | DB-API 2 module is threadsafe, the connections will be shared 19 | between threads (keep this in mind if you use transactions). 20 | 21 | Usage: 22 | 23 | The idea behind SimplePooledDB is that it's completely transparent. 24 | After you have established your connection pool, stating the 25 | DB-API 2 module to be used, the number of connections 26 | to be cached in the pool and the connection parameters, e.g. 27 | 28 | import pgdb # import used DB-API 2 module 29 | from dbutils.simple_pooled_db import PooledDB 30 | dbpool = PooledDB(pgdb, 5, host=..., database=..., user=..., ...) 31 | 32 | you can demand database connections from that pool, 33 | 34 | db = dbpool.connection() 35 | 36 | and use them just as if they were ordinary DB-API 2 connections. 37 | It's really just a proxy class. 38 | 39 | db.close() will return the connection to the pool, it will not 40 | actually close it. This is so your existing code works nicely. 41 | 42 | Ideas for improvement: 43 | 44 | * Do not create the maximum number of connections on startup 45 | already, but only a certain number and the rest on demand. 46 | * Detect and transparently reset "bad" connections. 47 | * Connections should have some sort of maximum usage limit 48 | after which they should be automatically closed and reopened. 49 | * Prefer or enforce thread-affinity for the connections, 50 | allowing for both shareable and non-shareable connections. 51 | 52 | Please note that these and other ideas have been already 53 | implemented in in PooledDB, a more sophisticated version 54 | of SimplePooledDB. You might also consider using PersistentDB 55 | instead for thread-affine persistent database connections. 56 | SimplePooledDB may still serve as a very simple reference 57 | and example implementation for developers. 58 | 59 | 60 | Copyright, credits and license: 61 | 62 | * Contributed as MiscUtils/DBPool for Webware for Python 63 | by Dan Green, December 2000 64 | * Thread safety bug found by Tom Schwaller 65 | * Fixes by Geoff Talvola (thread safety in _threadsafe_getConnection()) 66 | * Clean up by Chuck Esterbrook 67 | * Fix unthreadsafe functions which were leaking, Jay Love 68 | * Eli Green's webware-discuss comments were lifted for additional docs 69 | * Clean-up and detailed commenting, rename and move to DBUtils 70 | by Christoph Zwerschke in September 2005 71 | 72 | Licensed under the MIT license. 73 | """ 74 | 75 | from . import __version__ 76 | 77 | __all__ = [ 78 | 'PooledDB', 'PooledDBConnection', 'PooledDBError', 'NotSupportedError', 79 | ] 80 | 81 | 82 | class PooledDBError(Exception): 83 | """General PooledDB error.""" 84 | 85 | 86 | class NotSupportedError(PooledDBError): 87 | """DB-API module not supported by PooledDB.""" 88 | 89 | 90 | class PooledDBConnection: 91 | """A proxy class for pooled database connections. 92 | 93 | You don't normally deal with this class directly, 94 | but use PooledDB to get new connections. 95 | """ 96 | 97 | def __init__(self, pool, con): 98 | """Initialize pooled connection.""" 99 | self._con = con 100 | self._pool = pool 101 | 102 | def close(self): 103 | """Close the pooled connection.""" 104 | # Instead of actually closing the connection, 105 | # return it to the pool so that it can be reused. 106 | if self._con is not None: 107 | self._pool.returnConnection(self._con) 108 | self._con = None 109 | 110 | def __getattr__(self, name): 111 | """Get the attribute with the given name.""" 112 | # All other attributes are the same. 113 | return getattr(self._con, name) 114 | 115 | def __del__(self): 116 | """Delete the pooled connection.""" 117 | self.close() 118 | 119 | 120 | class PooledDB: 121 | """A very simple database connection pool. 122 | 123 | After you have created the connection pool, 124 | you can get connections using getConnection(). 125 | """ 126 | 127 | version = __version__ 128 | 129 | def __init__(self, dbapi, maxconnections, *args, **kwargs): 130 | """Set up the database connection pool. 131 | 132 | dbapi: the DB-API 2 compliant module you want to use 133 | maxconnections: the number of connections cached in the pool 134 | args, kwargs: the parameters that shall be used to establish 135 | the database connections using connect() 136 | """ 137 | try: 138 | threadsafety = dbapi.threadsafety 139 | except Exception: 140 | threadsafety = None 141 | if threadsafety == 0: 142 | raise NotSupportedError( 143 | "Database module does not support any level of threading.") 144 | if threadsafety == 1: 145 | # If there is no connection level safety, build 146 | # the pool using the synchronized queue class 147 | # that implements all the required locking semantics. 148 | from queue import Queue 149 | self._queue = Queue(maxconnections) # create the queue 150 | self.connection = self._unthreadsafe_get_connection 151 | self.addConnection = self._unthreadsafe_add_connection 152 | self.returnConnection = self._unthreadsafe_return_connection 153 | elif threadsafety in (2, 3): 154 | # If there is connection level safety, implement the 155 | # pool with an ordinary list used as a circular buffer. 156 | # We only need a minimum of locking in this case. 157 | from threading import Lock 158 | self._lock = Lock() # create a lock object to be used later 159 | self._nextConnection = 0 # index of the next connection to be used 160 | self._connections = [] # the list of connections 161 | self.connection = self._threadsafe_get_connection 162 | self.addConnection = self._threadsafe_add_connection 163 | self.returnConnection = self._threadsafe_return_connection 164 | else: 165 | raise NotSupportedError( 166 | "Database module threading support cannot be determined.") 167 | # Establish all database connections (it would be better to 168 | # only establish a part of them now, and the rest on demand). 169 | for _i in range(maxconnections): 170 | self.addConnection(dbapi.connect(*args, **kwargs)) 171 | 172 | # The following functions are used with DB-API 2 modules 173 | # that do not have connection level threadsafety, like PyGreSQL. 174 | # However, the module must be threadsafe at the module level. 175 | # Note: threadsafe/unthreadsafe refers to the DB-API 2 module, 176 | # not to this class which should be threadsafe in any case. 177 | 178 | def _unthreadsafe_get_connection(self): 179 | """Get a connection from the pool.""" 180 | return PooledDBConnection(self, self._queue.get()) 181 | 182 | def _unthreadsafe_add_connection(self, con): 183 | """Add a connection to the pool.""" 184 | self._queue.put(con) 185 | 186 | def _unthreadsafe_return_connection(self, con): 187 | """Return a connection to the pool. 188 | 189 | In this case, the connections need to be put 190 | back into the queue after they have been used. 191 | This is done automatically when the connection is closed 192 | and should never be called explicitly outside of this module. 193 | """ 194 | self._unthreadsafe_add_connection(con) 195 | 196 | # The following functions are used with DB-API 2 modules 197 | # that are threadsafe at the connection level, like psycopg. 198 | # Note: In this case, connections are shared between threads. 199 | # This may lead to problems if you use transactions. 200 | 201 | def _threadsafe_get_connection(self): 202 | """Get a connection from the pool.""" 203 | with self._lock: 204 | next_con = self._nextConnection 205 | con = PooledDBConnection(self, self._connections[next_con]) 206 | next_con += 1 207 | if next_con >= len(self._connections): 208 | next_con = 0 209 | self._nextConnection = next_con 210 | return con 211 | 212 | def _threadsafe_add_connection(self, con): 213 | """Add a connection to the pool.""" 214 | self._connections.append(con) 215 | 216 | def _threadsafe_return_connection(self, con): 217 | """Return a connection to the pool. 218 | 219 | In this case, the connections always stay in the pool, 220 | so there is no need to do anything here. 221 | """ 222 | # we don't need to do anything here 223 | -------------------------------------------------------------------------------- /dbutils/simple_pooled_pg.py: -------------------------------------------------------------------------------- 1 | """SimplePooledPg - a very simple classic PyGreSQL connection pool. 2 | 3 | Implements a pool of threadsafe cached connections 4 | to a PostgreSQL database which are transparently reused, 5 | using the classic (not DB-API 2 compliant) PyGreSQL pg API. 6 | 7 | This should result in a speedup for persistent applications 8 | such as the "Webware for Python" AppServer. 9 | 10 | For more information on PostgreSQL, see: 11 | https://www.postgresql.org/ 12 | For more information on PyGreSQL, see: 13 | http://www.pygresql.org 14 | For more information on Webware for Python, see: 15 | https://webwareforpython.github.io/w4py/ 16 | 17 | Measures are taken to make the pool of connections threadsafe 18 | regardless of the fact that the PyGreSQL pg module itself is 19 | not threadsafe at the connection level. Connections will never be 20 | shared between threads, so you can safely use transactions. 21 | 22 | Usage: 23 | 24 | The idea behind SimplePooledPg is that it's completely transparent. 25 | After you have established your connection pool, stating the 26 | number of connections to be cached in the pool and the 27 | connection parameters, e.g. 28 | 29 | from dbutils.simple_pooled_pg import PooledPg 30 | dbpool = PooledPg(5, host=..., database=..., user=..., ...) 31 | 32 | you can demand database connections from that pool, 33 | 34 | db = dbpool.connection() 35 | 36 | and use them just as if they were ordinary PyGreSQL pg API 37 | connections. It's really just a proxy class. 38 | 39 | db.close() will return the connection to the pool, it will not 40 | actually close it. This is so your existing code works nicely. 41 | 42 | Ideas for improvement: 43 | 44 | * Do not create the maximum number of connections on startup 45 | already, but only a certain number and the rest on demand. 46 | * Detect and transparently reset "bad" connections. The PyGreSQL 47 | pg API provides a status attribute and a reset() method for that. 48 | * Connections should have some sort of "maximum usage limit" 49 | after which they should be automatically closed and reopened. 50 | * Prefer or enforce thread affinity for the connections. 51 | 52 | Please note that these and other ideas have been already 53 | implemented in in PooledPg, a more sophisticated version 54 | of SimplePooledPg. You might also consider using PersistentPg 55 | instead for thread-affine persistent PyGreSQL connections. 56 | SimplePooledPg may still serve as a very simple reference 57 | and example implementation for developers. 58 | 59 | 60 | Copyright, credits and license: 61 | 62 | * Contributed as supplement for Webware for Python and PyGreSQL 63 | by Christoph Zwerschke in September 2005 64 | * Based on the code of DBPool, contributed to Webware for Python 65 | by Dan Green in December 2000 66 | 67 | Licensed under the MIT license. 68 | """ 69 | 70 | from pg import DB as PgConnection # noqa: N811 71 | 72 | from . import __version__ 73 | 74 | __all__ = ['PooledPg', 'PooledPgConnection'] 75 | 76 | 77 | class PooledPgConnection: 78 | """A proxy class for pooled PostgreSQL connections. 79 | 80 | You don't normally deal with this class directly, 81 | but use PooledPg to get new connections. 82 | """ 83 | 84 | def __init__(self, pool, con): 85 | """Initialize pooled connection.""" 86 | self._con = con 87 | self._pool = pool 88 | 89 | def close(self): 90 | """Close the pooled connection.""" 91 | # Instead of actually closing the connection, 92 | # return it to the pool so that it can be reused. 93 | if self._con is not None: 94 | self._pool.cache(self._con) 95 | self._con = None 96 | 97 | def __getattr__(self, name): 98 | """Get the attribute with the given name.""" 99 | # All other attributes are the same. 100 | return getattr(self._con, name) 101 | 102 | def __del__(self): 103 | """Delete the pooled connection.""" 104 | self.close() 105 | 106 | 107 | class PooledPg: 108 | """A very simple PostgreSQL connection pool. 109 | 110 | After you have created the connection pool, 111 | you can get connections using getConnection(). 112 | """ 113 | 114 | version = __version__ 115 | 116 | def __init__(self, maxconnections, *args, **kwargs): 117 | """Set up the PostgreSQL connection pool. 118 | 119 | maxconnections: the number of connections cached in the pool 120 | args, kwargs: the parameters that shall be used to establish 121 | the PostgreSQL connections using pg.connect() 122 | """ 123 | # Since there is no connection level safety, we 124 | # build the pool using the synchronized queue class 125 | # that implements all the required locking semantics. 126 | from queue import Queue 127 | self._queue = Queue(maxconnections) 128 | # Establish all database connections (it would be better to 129 | # only establish a part of them now, and the rest on demand). 130 | for _i in range(maxconnections): 131 | self.cache(PgConnection(*args, **kwargs)) 132 | 133 | def cache(self, con): 134 | """Add or return a connection to the pool.""" 135 | self._queue.put(con) 136 | 137 | def connection(self): 138 | """Get a connection from the pool.""" 139 | return PooledPgConnection(self, self._queue.get()) 140 | -------------------------------------------------------------------------------- /dbutils/steady_pg.py: -------------------------------------------------------------------------------- 1 | """SteadyPg - hardened classic PyGreSQL connections. 2 | 3 | Implements steady connections to a PostgreSQL database 4 | using the classic (not DB-API 2 compliant) PyGreSQL API. 5 | 6 | The connections are transparently reopened when they are 7 | closed or the database connection has been lost or when 8 | they are used more often than an optional usage limit. 9 | Only connections which have been marked as being in a database 10 | transaction with a begin() call will not be silently replaced. 11 | 12 | A typical situation where database connections are lost 13 | is when the database server or an intervening firewall is 14 | shutdown and restarted for maintenance reasons. In such a 15 | case, all database connections would become unusable, even 16 | though the database service may be already available again. 17 | 18 | The "hardened" connections provided by this module will 19 | make the database connections immediately available again. 20 | 21 | This results in a steady PostgreSQL connection that can be used 22 | by PooledPg or PersistentPg to create pooled or persistent 23 | connections to a PostgreSQL database in a threaded environment 24 | such as the application server of "Webware for Python." 25 | Note, however, that the connections themselves are not thread-safe. 26 | 27 | For more information on PostgreSQL, see: 28 | https://www.postgresql.org/ 29 | For more information on PyGreSQL, see: 30 | http://www.pygresql.org 31 | For more information on Webware for Python, see: 32 | https://webwareforpython.github.io/w4py/ 33 | 34 | 35 | Usage: 36 | 37 | You can use the class SteadyPgConnection in the same way as you 38 | would use the class DB from the classic PyGreSQL API module db. 39 | The only difference is that you may specify a usage limit as the 40 | first parameter when you open a connection (set it to None 41 | if you prefer unlimited usage), and an optional list of commands 42 | that may serve to prepare the session as the second parameter, 43 | and you can specify whether is is allowed to close the connection 44 | (by default this is true). When the connection to the PostgreSQL 45 | database is lost or has been used too often, it will be automatically 46 | reset, without further notice. 47 | 48 | from dbutils.steady_pg import SteadyPgConnection 49 | db = SteadyPgConnection(10000, ["set datestyle to german"], 50 | host=..., dbname=..., user=..., ...) 51 | ... 52 | result = db.query('...') 53 | ... 54 | db.close() 55 | 56 | 57 | Ideas for improvement: 58 | 59 | * Alternatively to the maximum number of uses, 60 | implement a maximum time to live for connections. 61 | * Optionally log usage and loss of connection. 62 | 63 | 64 | Copyright, credits and license: 65 | 66 | * Contributed as supplement for Webware for Python and PyGreSQL 67 | by Christoph Zwerschke in September 2005 68 | 69 | Licensed under the MIT license. 70 | """ 71 | 72 | from contextlib import suppress 73 | 74 | from pg import DB as PgConnection # noqa: N811 75 | 76 | from . import __version__ 77 | 78 | 79 | class SteadyPgError(Exception): 80 | """General SteadyPg error.""" 81 | 82 | 83 | class InvalidConnectionError(SteadyPgError): 84 | """Database connection is invalid.""" 85 | 86 | 87 | # deprecated alias names for error classes 88 | InvalidConnection = InvalidConnectionError 89 | 90 | 91 | class SteadyPgConnection: 92 | """Class representing steady connections to a PostgreSQL database. 93 | 94 | Underlying the connection is a classic PyGreSQL pg API database 95 | connection which is reset if the connection is lost or used too often. 96 | Thus the resulting connection is steadier ("tough and self-healing"). 97 | 98 | If you want the connection to be persistent in a threaded environment, 99 | then you should not deal with this class directly, but use either the 100 | PooledPg module or the PersistentPg module to get the connections. 101 | """ 102 | 103 | version = __version__ 104 | 105 | def __init__( 106 | self, maxusage=None, setsession=None, closeable=True, 107 | *args, **kwargs): 108 | """Create a "tough" PostgreSQL connection. 109 | 110 | A hardened version of the DB wrapper class of PyGreSQL. 111 | 112 | maxusage: maximum usage limit for the underlying PyGreSQL connection 113 | (number of uses, 0 or None means unlimited usage) 114 | When this limit is reached, the connection is automatically reset. 115 | setsession: optional list of SQL commands that may serve to prepare 116 | the session, e.g. ["set datestyle to ...", "set time zone ..."] 117 | closeable: if this is set to false, then closing the connection will 118 | be silently ignored, but by default the connection can be closed 119 | args, kwargs: the parameters that shall be used to establish 120 | the PostgreSQL connections with PyGreSQL using pg.DB() 121 | """ 122 | # basic initialization to make finalizer work 123 | self._con = None 124 | self._closed = True 125 | # proper initialization of the connection 126 | if maxusage is None: 127 | maxusage = 0 128 | if not isinstance(maxusage, int): 129 | raise TypeError("'maxusage' must be an integer value.") 130 | self._maxusage = maxusage 131 | self._setsession_sql = setsession 132 | self._closeable = closeable 133 | self._con = PgConnection(*args, **kwargs) 134 | self._transaction = False 135 | self._closed = False 136 | self._setsession() 137 | self._usage = 0 138 | 139 | def __enter__(self): 140 | """Enter the runtime context. This will start a transaction.""" 141 | self.begin() 142 | return self 143 | 144 | def __exit__(self, *exc): 145 | """Exit the runtime context. This will end the transaction.""" 146 | if exc[0] is None and exc[1] is None and exc[2] is None: 147 | self.commit() 148 | else: 149 | self.rollback() 150 | 151 | def _setsession(self): 152 | """Execute the SQL commands for session preparation.""" 153 | if self._setsession_sql: 154 | for sql in self._setsession_sql: 155 | self._con.query(sql) 156 | 157 | def _close(self): 158 | """Close the tough connection. 159 | 160 | You can always close a tough connection with this method, 161 | and it will not complain if you close it more than once. 162 | """ 163 | if not self._closed: 164 | with suppress(Exception): 165 | self._con.close() 166 | self._transaction = False 167 | self._closed = True 168 | 169 | def close(self): 170 | """Close the tough connection. 171 | 172 | You are allowed to close a tough connection by default, 173 | and it will not complain if you close it more than once. 174 | 175 | You can disallow closing connections by setting 176 | the closeable parameter to something false. In this case, 177 | closing tough connections will be silently ignored. 178 | """ 179 | if self._closeable: 180 | self._close() 181 | elif self._transaction: 182 | self.reset() 183 | 184 | def reopen(self): 185 | """Reopen the tough connection. 186 | 187 | It will not complain if the connection cannot be reopened. 188 | """ 189 | try: 190 | self._con.reopen() 191 | except Exception: 192 | if self._transaction: 193 | self._transaction = False 194 | with suppress(Exception): 195 | self._con.query('rollback') 196 | else: 197 | self._transaction = False 198 | self._closed = False 199 | self._setsession() 200 | self._usage = 0 201 | 202 | def reset(self): 203 | """Reset the tough connection. 204 | 205 | If a reset is not possible, tries to reopen the connection. 206 | It will not complain if the connection is already closed. 207 | """ 208 | try: 209 | self._con.reset() 210 | self._transaction = False 211 | self._setsession() 212 | self._usage = 0 213 | except Exception: 214 | try: 215 | self.reopen() 216 | except Exception: 217 | with suppress(Exception): 218 | self.rollback() 219 | 220 | def begin(self, sql=None): 221 | """Begin a transaction.""" 222 | self._transaction = True 223 | try: 224 | begin = self._con.begin 225 | except AttributeError: 226 | return self._con.query(sql or 'begin') 227 | else: 228 | # use existing method if available 229 | return begin(sql=sql) if sql else begin() 230 | 231 | def end(self, sql=None): 232 | """Commit the current transaction.""" 233 | self._transaction = False 234 | try: 235 | end = self._con.end 236 | except AttributeError: 237 | return self._con.query(sql or 'end') 238 | else: 239 | return end(sql=sql) if sql else end() 240 | 241 | def commit(self, sql=None): 242 | """Commit the current transaction.""" 243 | self._transaction = False 244 | try: 245 | commit = self._con.commit 246 | except AttributeError: 247 | return self._con.query(sql or 'commit') 248 | else: 249 | return commit(sql=sql) if sql else commit() 250 | 251 | def rollback(self, sql=None): 252 | """Rollback the current transaction.""" 253 | self._transaction = False 254 | try: 255 | rollback = self._con.rollback 256 | except AttributeError: 257 | return self._con.query(sql or 'rollback') 258 | else: 259 | return rollback(sql=sql) if sql else rollback() 260 | 261 | def _get_tough_method(self, method): 262 | """Return a "tough" version of a connection class method. 263 | 264 | The tough version checks whether the connection is bad (lost) 265 | and automatically and transparently tries to reset the connection 266 | if this is the case (for instance, the database has been restarted). 267 | """ 268 | def tough_method(*args, **kwargs): 269 | transaction = self._transaction 270 | if not transaction: 271 | try: 272 | # check whether connection status is bad 273 | # or the connection has been used too often 274 | if not self._con.db.status or ( 275 | self._maxusage and self._usage >= self._maxusage): 276 | raise AttributeError 277 | except Exception: 278 | self.reset() # then reset the connection 279 | try: 280 | result = method(*args, **kwargs) # try connection method 281 | except Exception: # error in query 282 | if transaction: # inside a transaction 283 | self._transaction = False 284 | raise # propagate the error 285 | if self._con.db.status: # if it was not a connection problem 286 | raise # then propagate the error 287 | self.reset() # reset the connection 288 | result = method(*args, **kwargs) # and try one more time 289 | self._usage += 1 290 | return result 291 | return tough_method 292 | 293 | def __getattr__(self, name): 294 | """Inherit the members of the standard connection class. 295 | 296 | Some methods are made "tougher" than in the standard version. 297 | """ 298 | if self._con: 299 | attr = getattr(self._con, name) 300 | if (name in ('query', 'get', 'insert', 'update', 'delete') 301 | or name.startswith('get_')): 302 | attr = self._get_tough_method(attr) 303 | return attr 304 | raise InvalidConnectionError 305 | 306 | def __del__(self): 307 | """Delete the steady connection.""" 308 | # builtins (including Exceptions) might not exist anymore 309 | try: # noqa: SIM105 310 | self._close() # make sure the connection is closed 311 | except: # noqa: E722, S110 312 | pass 313 | -------------------------------------------------------------------------------- /docs/changelog.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Changelog for DBUtils 8 | 9 | 10 | 11 |
12 |

Changelog for DBUtils

13 | 14 |
15 |

3.1.1

16 |

DBUtils 3.1.1 was released on June 4, 2025.

17 |

Changes:

18 | 21 |
22 |
23 |

3.1.0

24 |

DBUtils 3.1.0 was released on March 17, 2024.

25 |

Changes:

26 | 30 |
31 |
32 |

3.0.3

33 |

DBUtils 3.0.3 was released on April 27, 2023.

34 |

Changes:

35 | 40 |
41 |
42 |

3.0.2

43 |

DBUtils 3.0.2 was released on January 14, 2022.

44 |

The optional iterator protocol on cursors is now supported.

45 |
46 |
47 |

3.0.1

48 |

DBUtils 3.0.1 was released on December 22, 2021.

49 |

It includes InterfaceError to the default list of exceptions 50 | for which the connection failover mechanism is applied. 51 | You can override this with the failures parameter.

52 |
53 |
54 |

3.0.0

55 |

DBUtils 3.0.0 was released on November 26, 2021.

56 |

It is intended to be used with Python versions 3.6 to 3.10.

57 |

Changes:

58 | 61 |
62 |
63 |

2.0.3

64 |

DBUtils 2.0.3 was released on November 26, 2021.

65 |

Changes:

66 | 69 |
70 |
71 |

2.0.2

72 |

DBUtils 2.0.2 was released on June 8, 2021.

73 |

Changes:

74 | 77 |
78 |
79 |

2.0.1

80 |

DBUtils 2.0.1 was released on April 8, 2021.

81 |

Changes:

82 | 85 |
86 |
87 |

2.0

88 |

DBUtils 2.0 was released on September 26, 2020.

89 |

It is intended to be used with Python versions 2.7 and 3.5 to 3.9.

90 |

Changes:

91 | 101 |
102 |
103 |

1.4

104 |

DBUtils 1.4 was released on September 26, 2020.

105 |

It is intended to be used with Python versions 2.7 and 3.5 to 3.9.

106 |

Improvements:

107 | 112 |
113 |
114 |

1.3

115 |

DBUtils 1.3 was released on March 3, 2018.

116 |

It is intended to be used with Python versions 2.6, 2.7 and 3.4 to 3.7.

117 |

Improvements:

118 | 121 |
122 |
123 |

1.2

124 |

DBUtils 1.2 was released on February 5, 2017.

125 |

It is intended to be used with Python versions 2.6, 2.7 and 3.0 to 3.6.

126 |
127 |
128 |

1.1.1

129 |

DBUtils 1.1.1 was released on February 4, 2017.

130 |

It is intended to be used with Python versions 2.3 to 2.7.

131 |

Improvements:

132 | 136 |

Bugfixes:

137 | 140 |
141 |
142 |

1.1

143 |

DBUtils 1.1 was released on August 14, 2011.

144 |

Improvements:

145 | 163 |

Bugfixes:

164 | 169 |
170 |
171 |

1.0

172 |

DBUtils 1.0 was released on November 29, 2008.

173 |

It is intended to be used with Python versions 2.2 to 2.6.

174 |

Changes:

175 | 193 |

Bugfixes and improvements:

194 | 202 |
203 |
204 |

0.9.4

205 |

DBUtils 0.9.4 was released on July 7, 2007.

206 |

This release fixes a problem in the destructor code and has been supplemented 207 | with a German User's Guide.

208 |

Again, please note that the dbapi parameter has been renamed to creator 209 | in the last release, since you can now pass custom creator functions 210 | for database connections instead of DB-API 2 modules.

211 |
212 |
213 |

0.9.3

214 |

DBUtils 0.9.3 was released on May 21, 2007.

215 |

Changes:

216 | 226 |
227 |
228 |

0.9.2

229 |

DBUtils 0.9.2 was released on September 22, 2006.

230 |

It is intended to be used with Python versions 2.2 to 2.5.

231 |

Changes:

232 | 236 |
237 |
238 |

0.9.1

239 |

DBUtils 0.9.1 was released on May 8, 2006.

240 |

It is intended to be used with Python versions 2.2 to 2.4.

241 |

Changes:

242 | 250 |
251 |
252 |

0.8.1 - 2005-09-13

253 |

DBUtils 0.8.1 was released on September 13, 2005.

254 |

It is intended to be used with Python versions 2.0 to 2.4.

255 |

This is the first public release of DBUtils.

256 |
257 |
258 | 259 | 260 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | Changelog for DBUtils 2 | +++++++++++++++++++++ 3 | 4 | 3.1.1 5 | ===== 6 | 7 | DBUtils 3.1.1 was released on June 4, 2025. 8 | 9 | Changes: 10 | 11 | * Support Python version 3.13. 12 | 13 | 3.1.0 14 | ===== 15 | 16 | DBUtils 3.1.0 was released on March 17, 2024. 17 | 18 | Changes: 19 | 20 | * Support Python version 3.12, cease support for Python 3.6. 21 | * Various small internal improvements and modernizations. 22 | 23 | 3.0.3 24 | ===== 25 | 26 | DBUtils 3.0.3 was released on April 27, 2023. 27 | 28 | Changes: 29 | 30 | * Support Python version 3.11. 31 | * Improve determination of DB API module if creator is specified. 32 | * Minor fixes and section an advanced usage in docs. 33 | 34 | 3.0.2 35 | ===== 36 | 37 | DBUtils 3.0.2 was released on January 14, 2022. 38 | 39 | The optional iterator protocol on cursors is now supported. 40 | 41 | 3.0.1 42 | ===== 43 | 44 | DBUtils 3.0.1 was released on December 22, 2021. 45 | 46 | It includes ``InterfaceError`` to the default list of exceptions 47 | for which the connection failover mechanism is applied. 48 | You can override this with the ``failures`` parameter. 49 | 50 | 3.0.0 51 | ===== 52 | 53 | DBUtils 3.0.0 was released on November 26, 2021. 54 | 55 | It is intended to be used with Python versions 3.6 to 3.10. 56 | 57 | Changes: 58 | 59 | * Cease support for Python 2 and 3.5, minor optimizations. 60 | 61 | 2.0.3 62 | ===== 63 | 64 | DBUtils 2.0.3 was released on November 26, 2021. 65 | 66 | Changes: 67 | 68 | * Support Python version 3.10. 69 | 70 | 2.0.2 71 | ===== 72 | 73 | DBUtils 2.0.2 was released on June 8, 2021. 74 | 75 | Changes: 76 | 77 | * Allow using context managers for pooled connections. 78 | 79 | 2.0.1 80 | ===== 81 | 82 | DBUtils 2.0.1 was released on April 8, 2021. 83 | 84 | Changes: 85 | 86 | * Avoid "name Exception is not defined" when exiting. 87 | 88 | 2.0 89 | === 90 | 91 | DBUtils 2.0 was released on September 26, 2020. 92 | 93 | It is intended to be used with Python versions 2.7 and 3.5 to 3.9. 94 | 95 | Changes: 96 | 97 | * DBUtils does not act as a Webware plugin anymore, it is now just an ordinary 98 | Python package (of course it could be used as such also before). 99 | * The Webware ``Examples`` folder has been removed. 100 | * Folders, packages and modules have been renamed to lower-case. 101 | Particularly, you need to import ``dbutils`` instead of ``DBUtils`` now. 102 | * The internal naming conventions have also been changed to comply with PEP8. 103 | * The documentation has been adapted to reflect the changes in this version. 104 | * This changelog has been compiled from the former release notes. 105 | 106 | 1.4 107 | === 108 | 109 | DBUtils 1.4 was released on September 26, 2020. 110 | 111 | It is intended to be used with Python versions 2.7 and 3.5 to 3.9. 112 | 113 | Improvements: 114 | 115 | * The ``SteadyDB`` and ``SteadyPg`` classes only reconnect after the 116 | ``maxusage`` limit has been reached when the connection is not currently 117 | inside a transaction. 118 | 119 | 1.3 120 | === 121 | 122 | DBUtils 1.3 was released on March 3, 2018. 123 | 124 | It is intended to be used with Python versions 2.6, 2.7 and 3.4 to 3.7. 125 | 126 | Improvements: 127 | 128 | * This version now supports context handlers for connections and cursors. 129 | 130 | 1.2 131 | === 132 | 133 | DBUtils 1.2 was released on February 5, 2017. 134 | 135 | It is intended to be used with Python versions 2.6, 2.7 and 3.0 to 3.6. 136 | 137 | 1.1.1 138 | ===== 139 | 140 | DBUtils 1.1.1 was released on February 4, 2017. 141 | 142 | It is intended to be used with Python versions 2.3 to 2.7. 143 | 144 | Improvements: 145 | 146 | * Reopen ``SteadyDB`` connections when commit or rollback fails 147 | (suggested by Ben Hoyt). 148 | 149 | Bugfixes: 150 | 151 | * Fixed a problem when running under Jython (reported by Vitaly Kruglikov). 152 | 153 | 1.1 154 | === 155 | 156 | DBUtils 1.1 was released on August 14, 2011. 157 | 158 | Improvements: 159 | 160 | * The transparent reopening of connections is actually an undesired behavior 161 | if it happens during database transactions. In these cases, the transaction 162 | should fail and the error be reported back to the application instead of the 163 | rest of the transaction being executed in a new connection and therefore in 164 | a new transaction. Therefore DBUtils now allows suspending the transparent 165 | reopening during transactions. All you need to do is indicate the beginning 166 | of a transaction by calling the ``begin()`` method of the connection. 167 | DBUtils makes sure that this method always exists, even if the database 168 | driver does not support it. 169 | * If the database driver supports a ``ping()`` method, then DBUtils can use it 170 | to check whether connections are alive instead of just trying to use the 171 | connection and reestablishing it in case it was dead. Since these checks are 172 | done at the expense of some performance, you have exact control when these 173 | are executed via the new ``ping`` parameter. 174 | * ``PooledDB`` has got another new parameter ``reset`` for controlling how 175 | connections are reset before being put back into the pool. 176 | 177 | Bugfixes: 178 | 179 | * Fixed propagation of error messages when the connection was lost. 180 | * Fixed an issue with the ``setoutputsize()`` cursor method. 181 | * Fixed some minor issues with the ``DBUtilsExample`` for Webware. 182 | 183 | 184 | 1.0 185 | === 186 | 187 | DBUtils 1.0 was released on November 29, 2008. 188 | 189 | It is intended to be used with Python versions 2.2 to 2.6. 190 | 191 | Changes: 192 | 193 | * Added a ``failures`` parameter for configuring the exception classes for 194 | which the failover mechanisms is applied (as suggested by Matthew Harriger). 195 | * Added a ``closeable`` parameter for configuring whether connections can be 196 | closed (otherwise closing connections will be silently ignored). 197 | * It is now possible to override defaults via the ``creator.dbapi`` and 198 | ``creator.threadsafety`` attributes. 199 | * Added an alias method ``dedicated_connection`` as a shorthand for 200 | ``connection(shareable=False)``. 201 | * Added a version attribute to all exported classes. 202 | * Where the value ``0`` has the meaning "unlimited", parameters can now be also 203 | set to the value ``None`` instead. 204 | * It turned out that ``threading.local`` does not work properly with 205 | ``mod_wsgi``, so we use the Python implementation for thread-local data 206 | even when a faster ``threading.local`` implementation is available. 207 | A new parameter ``threadlocal`` allows you to pass an arbitrary class 208 | such as ``threading.local`` if you know it works in your environment. 209 | 210 | Bugfixes and improvements: 211 | 212 | * In some cases, when instance initialization failed or referenced objects 213 | were already destroyed, finalizers could throw exceptions or create infinite 214 | recursion (problem reported by Gregory Pinero and Jehiah Czebotar). 215 | * DBUtils now tries harder to find the underlying DB-API 2 module if only a 216 | connection creator function is specified. This had not worked before with 217 | the MySQLdb module (problem reported by Gregory Pinero). 218 | 219 | 0.9.4 220 | ===== 221 | 222 | DBUtils 0.9.4 was released on July 7, 2007. 223 | 224 | This release fixes a problem in the destructor code and has been supplemented 225 | with a German User's Guide. 226 | 227 | Again, please note that the ``dbapi`` parameter has been renamed to ``creator`` 228 | in the last release, since you can now pass custom creator functions 229 | for database connections instead of DB-API 2 modules. 230 | 231 | 0.9.3 232 | ===== 233 | 234 | DBUtils 0.9.3 was released on May 21, 2007. 235 | 236 | Changes: 237 | 238 | * Support custom creator functions for database connections. 239 | These can now be used as the first parameter instead of an DB-API module 240 | (suggested by Ezio Vernacotola). 241 | * Added destructor for steady connections. 242 | * Use setuptools_ if available. 243 | * Some code cleanup. 244 | * Some fixes in the documentation. 245 | Added Chinese translation of the User's Guide, kindly contributed by gashero. 246 | 247 | .. _setuptools: https://github.com/pypa/setuptools 248 | 249 | 0.9.2 250 | ===== 251 | 252 | DBUtils 0.9.2 was released on September 22, 2006. 253 | 254 | It is intended to be used with Python versions 2.2 to 2.5. 255 | 256 | Changes: 257 | 258 | * Renamed ``SolidDB`` to ``SteadyDB`` to avoid confusion with the "solidDB" 259 | storage engine. Accordingly, renamed ``SolidPg`` to ``SteadyPg``. 260 | 261 | 0.9.1 262 | ===== 263 | 264 | DBUtils 0.9.1 was released on May 8, 2006. 265 | 266 | It is intended to be used with Python versions 2.2 to 2.4. 267 | 268 | Changes: 269 | 270 | * Added ``_closeable`` attribute and made persistent connections not closeable 271 | by default. This allows ``PersistentDB`` to be used in the same way as you 272 | would use ``PooledDB``. 273 | * Allowed arguments in the DB-API 2 ``cursor()`` method. MySQLdb is using this 274 | to specify cursor classes. (Suggested by Michael Palmer.) 275 | * Improved the documentation and added a User's Guide. 276 | 277 | 0.8.1 - 2005-09-13 278 | ================== 279 | 280 | DBUtils 0.8.1 was released on September 13, 2005. 281 | 282 | It is intended to be used with Python versions 2.0 to 2.4. 283 | 284 | This is the first public release of DBUtils. 285 | -------------------------------------------------------------------------------- /docs/dependencies_db.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WebwareForPython/DBUtils/52db5032c9fb31341b34079f6ebacd11738a00d1/docs/dependencies_db.png -------------------------------------------------------------------------------- /docs/dependencies_pg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WebwareForPython/DBUtils/52db5032c9fb31341b34079f6ebacd11738a00d1/docs/dependencies_pg.png -------------------------------------------------------------------------------- /docs/doc.css: -------------------------------------------------------------------------------- 1 | /* 2 | Style sheet for DBUtils documentation pages. 3 | */ 4 | 5 | /* First import default style for pages created with Docutils: */ 6 | 7 | @import url(docutils.css); 8 | 9 | /* Customization for DBUtils goes here: */ 10 | 11 | p { 12 | margin-top: 6pt; 13 | margin-bottom: 6pt; 14 | text-align: justify; 15 | } 16 | li { 17 | margin-bottom: 6pt; 18 | } 19 | h1, h2 { 20 | color: #002352; 21 | } 22 | h3, h4 { 23 | color: #002352; 24 | } 25 | h1 { 26 | font-size: 18pt; 27 | } 28 | h2 { 29 | font-size: 16pt; 30 | } 31 | h3 { 32 | font-size: 14pt; 33 | } 34 | h1.title { 35 | padding: 4pt; 36 | margin-bottom: 12pt; 37 | text-align: center; 38 | border-bottom: 1pt solid #025; 39 | padding-bottom: 8pt; 40 | } 41 | .contents ul { 42 | list-style: none; 43 | margin-bottom: 24pt; 44 | padding-left: 0em; 45 | margin-left: 2em; 46 | } 47 | .contents ul li { 48 | font-size: 14pt; 49 | margin-bottom: 2pt; 50 | } 51 | .contents ul ul { 52 | list-style-type: none; 53 | margin-top: 1pt; 54 | margin-bottom: 1pt; 55 | padding-left: 0em; 56 | margin-left: 1.5em; 57 | } 58 | .contents ul ul li { 59 | font-size: 13pt; 60 | margin-bottom: 1pt; 61 | } 62 | .contents > p.topic-title { 63 | font-size: 24pt; 64 | } 65 | .warning { 66 | color: brown; 67 | } 68 | .admonition-title { 69 | background-color: #F5F5DC; 70 | padding: 1pt 3pt; 71 | } 72 | .admonition-title::before { 73 | content: "⚠"; 74 | margin-right: .5em; 75 | } 76 | -------------------------------------------------------------------------------- /docs/docutils.css: -------------------------------------------------------------------------------- 1 | /* CSS 3 style sheet for the output of Docutils 0.21 HTML writer. */ 2 | div.dedication,nav.contents{padding:0;font-style:italic}h1.title,table tr{text-align:left}.footnote,pre.code,pre.doctest-block,pre.literal-block,pre.math{overflow:auto}body{font-family:Georgia,serif;background-color:#fafaf6;font-size:1.2em;line-height:1.4;margin:auto}main{counter-reset:figure table}footer,header,main{padding:.5em 5%;background-color:#fefef8;max-width:100rem}.citation,.footnote,.topic,div.line-block,dl,h1,h2,h3,h4,h5,h6,li,ol,p,table,ul{margin-top:.5em;margin-bottom:.5em}.topic,blockquote,figure{margin:.5em 2%;padding-left:1em}dl>dd{margin-bottom:.5em}p:first-child{margin-top:0}p:last-child{margin-bottom:0}div.line-block div.line-block,dl.option-list,figure>img,pre.code,pre.doctest-block,pre.literal-block,pre.math{margin-left:calc(2% + 1em)}footer,header{font-size:smaller}h2,h3,h4,p.section-subtitle,p.sidebar-subtitle,p.sidebar-title,p.subtitle,p.topic-title{font-weight:400;font-style:italic;text-align:left}.sectnum{font-style:normal}h1.title{margin-top:2.4em;margin-bottom:2em;font-size:2.4em}h1+p.subtitle{margin-top:-2em;margin-bottom:2em;font-size:2em}section{margin-top:2em}.contents>p.topic-title,h2{font-size:2.2em}h2+p.section-subtitle{font-size:1.6em}h3{font-size:1.2em}h3+p.section-subtitle{font-size:1.1em}figure.marginal>figcaption,h4,p.section-subtitle{font-size:1em}div.dedication{margin-left:0;font-size:1.2em}div.dedication p.topic-title{display:none}.topic p.attribution,blockquote p.attribution{text-align:right}ul.auto-toc>li>p{padding-left:1em;text-indent:-1em}nav.contents ul{padding-left:1em}hr{border:0;border-top:1px solid #ccc;margin:1em 10%}ol,ul{padding-left:1.1em}dd{margin-left:1.5em}dd>dl:first-child,dd>ol:first-child,dd>ul:first-child{clear:left}dl.docinfo>dd,dl.field-list>dd,dl.option-list>dd{margin-left:4em}dl.field-list.narrow>dd{margin-left:3em}dl.field-list.run-in>dd p{display:block}dl.description>dt,dl.docinfo>dt,dl.field-list>dt{font-weight:400;font-style:italic}dl.description>dt{clear:left;float:left;margin:0;padding:0 .5em 0 0}dl.description>dd:after{display:block;content:"";clear:both}.citation-list,.footnote-list{display:contents}.citation{padding-left:1.5em}.citation .label{margin-left:-1.5em}figure{display:flex;flex-wrap:wrap;align-items:flex-start}figure.fullwidth>img,figure>img{margin:0 .5em .5em 0;padding:0}figcaption{font-size:.8em}.fullwidth>figcaption{font-size:inherit}figure.numbered>figcaption>p:before{counter-increment:figure;content:"Figure " counter(figure) ": "}table.booktabs{border-top:2px solid;border-bottom:2px solid}table.booktabs *{border:0}table.booktabs th{border-bottom:thin solid}table.numbered>caption:before{counter-increment:table;content:"Table " counter(table) ": "}.admonition,.system-message{border-style:solid;border-color:silver;border-width:thin;margin:1em 0;padding:.5em}.attention p.admonition-title,.caution p.admonition-title,.danger p.admonition-title,.warning p.admonition-title,div.error{color:maroon}code .comment,pre.code .comment{color:#5c6576}code .keyword,pre.code .keyword{color:#3b0d06;font-weight:700}code .literal.string,pre.code .literal.string{color:#0c5404}code .name.builtin,pre.code .name.builtin{color:#352b84}code .deleted,pre.code .deleted{background-color:#deb0a1}code .inserted,pre.code .inserted{background-color:#a3d289}.sans{font-family:"Gill Sans","Gill Sans MT",Calibri,"Lucida Sans","Noto Sans",sans-serif;letter-spacing:.02em}a{color:inherit}a:link,a:link:hover{text-decoration:underline}.backrefs a:link,.contents a:link,a.citation-reference:link,a.image-reference:link,a.toc-backref:link,a[href^="#system-message"],a[role=doc-backlink]:link,a[role=doc-noteref]:link{text-decoration:none}.contents>p.topic-title,.fullwidth,footer,h1,h2,h3,header,hr.docutils{clear:both}div.align-left,figure.align-left,img.align-left,svg.align-left,table.align-left,video.align-left{margin-left:0;padding-left:0;padding-right:.5em;clear:left;float:left}figure.align-left>img{margin-left:0;padding-left:0}div.align-right,img.align-right,svg.align-right,video.align-right{padding-left:.5em;clear:right;float:right}figure.align-right{clear:right;float:right}figure.align-right>img{justify-self:right;padding:0}table.align-right{margin-right:2.5%}figure.align-center{align-content:center;justify-content:center}figure.align-center>img{padding-left:0;justify-self:center}.admonition.marginal,.marginal,.topic.marginal,aside.sidebar{background-color:#efefea;box-sizing:border-box;margin-left:2%;margin-right:0;padding:.5em;font-size:.8em}aside.sidebar{background-color:inherit}.footnote{font-size:smaller}@media (min-width:35em){footer,header,main{padding:.5em calc(15% - 3rem);line-height:1.6}.admonition.marginal,.marginal,.topic.marginal,aside.sidebar{max-width:45%;float:right;clear:right}dl.docinfo>dd,dl.field-list>dd,dl.option-list>dd{margin-left:6em}}@media (min-width:65em){main,section{display:grid;grid-template-columns:[content] minmax(0,6fr) [margin] 3fr [end];grid-column-gap:calc(3em + 1%)}main>section,section>section{grid-column:1/end}footer,header,main{padding-right:5%}section>figure{display:contents}.citation.align-left,.footnote.align-left,figure>img,main>*,section>*{grid-column:content}.citation.align-left{font-size:1em;padding-left:1.5em}.citation.align-left .label{margin-left:-1.5em}figure>img{margin:.5em 2%;padding-left:1em}.admonition.marginal,.citation,.footnote,.marginal,.topic.marginal,aside.sidebar,figcaption{grid-column:margin;width:auto;max-width:55em;margin:.5em 0;border:none;padding:0;font-size:.8em;text-align:initial;background-color:inherit}.admonition.marginal{padding:.5em}figure.marginal{display:block;margin:.5em 0}.citation,.footnote{padding-left:0}.citation .label,.footnote .label{margin-left:0}.fullwidth,.fullwidth figcaption,.fullwidth img,aside.system-message,div.abstract,div.dedication,dl.docinfo,h1.title,nav.contents,p.subtitle,pre{grid-column:content/end;margin-right:calc(10% - 3rem);max-width:55em}}@media (min-width:100em){footer,header,main{padding-left:30%}main>nav.contents{position:fixed;top:0;left:0;box-sizing:border-box;width:25%;height:100vh;margin:0;background-color:#fafaf6;padding:5.5em 2%;overflow:auto}main>nav.contents>*{padding-left:0}} -------------------------------------------------------------------------------- /docs/main.rst: -------------------------------------------------------------------------------- 1 | DBUtils User's Guide 2 | ++++++++++++++++++++ 3 | 4 | :Version: 3.1.1 5 | :Translations: English | German_ 6 | 7 | .. _German: main.de.html 8 | 9 | .. contents:: Contents 10 | 11 | 12 | Synopsis 13 | ======== 14 | 15 | DBUtils_ is a suite of Python modules allowing to connect in a safe and 16 | efficient way between a threaded Python_ application and a database. 17 | 18 | DBUtils has been originally written particularly for `Webware for Python`_ as 19 | the application and PyGreSQL_ as the adapter to a PostgreSQL_ database, but it 20 | can meanwhile be used for any other Python application and `DB-API 2`_ 21 | conformant database adapter. 22 | 23 | 24 | Modules 25 | ======= 26 | 27 | The DBUtils suite is realized as a Python package containing 28 | two subsets of modules, one for use with arbitrary DB-API 2 modules, 29 | the other one for use with the classic PyGreSQL module. 30 | 31 | +------------------+------------------------------------------+ 32 | | Universal DB-API 2 variant | 33 | +==================+==========================================+ 34 | | steady_db | Hardened DB-API 2 connections | 35 | +------------------+------------------------------------------+ 36 | | pooled_db | Pooling for DB-API 2 connections | 37 | +------------------+------------------------------------------+ 38 | | persistent_db | Persistent DB-API 2 connections | 39 | +------------------+------------------------------------------+ 40 | | simple_pooled_db | Simple pooling for DB-API 2 | 41 | +------------------+------------------------------------------+ 42 | 43 | +------------------+------------------------------------------+ 44 | | Classic PyGreSQL variant | 45 | +==================+==========================================+ 46 | | steady_pg | Hardened classic PyGreSQL connections | 47 | +------------------+------------------------------------------+ 48 | | pooled_pg | Pooling for classic PyGreSQL connections | 49 | +------------------+------------------------------------------+ 50 | | persistent_pg | Persistent classic PyGreSQL connections | 51 | +------------------+------------------------------------------+ 52 | | simple_pooled_pg | Simple pooling for classic PyGreSQL | 53 | +------------------+------------------------------------------+ 54 | 55 | The dependencies of the modules in the universal DB-API 2 variant 56 | are as indicated in the following diagram: 57 | 58 | .. image:: dependencies_db.png 59 | 60 | The dependencies of the modules in the classic PyGreSQL variant 61 | are similar: 62 | 63 | .. image:: dependencies_pg.png 64 | 65 | 66 | Download 67 | ======== 68 | 69 | You can download the actual version of DBUtils from 70 | the Python Package Index at:: 71 | 72 | https://pypi.python.org/pypi/DBUtils 73 | 74 | The source code repository can be found here on GitHub:: 75 | 76 | https://github.com/WebwareForPython/DBUtils 77 | 78 | 79 | Installation 80 | ============ 81 | 82 | Installation 83 | ------------ 84 | The package can be installed in the usual way:: 85 | 86 | python setup.py install 87 | 88 | It is even easier to download and install the package in one go using `pip`_:: 89 | 90 | pip install DBUtils 91 | 92 | .. _pip: https://pip.pypa.io/ 93 | 94 | 95 | Requirements 96 | ============ 97 | 98 | DBUtils supports Python_ versions 3.7 to 3.13. 99 | 100 | The modules in the classic PyGreSQL variant need PyGreSQL_ version 4.0 101 | or above, while the modules in the universal DB-API 2 variant run with 102 | any Python `DB-API 2`_ compliant database interface module. 103 | 104 | 105 | Functionality 106 | ============= 107 | 108 | This section will refer to the names in the DB-API 2 variant only, 109 | but the same applies to the classic PyGreSQL variant. 110 | 111 | DBUtils installs itself as a package ``dbutils`` containing all the modules 112 | that are described in this guide. Each of these modules contains essentially 113 | one class with an analogous name that provides the corresponding functionality. 114 | For instance, the module ``dbutils.pooled_db`` contains the class ``PooledDB``. 115 | 116 | SimplePooledDB (simple_pooled_db) 117 | --------------------------------- 118 | The class ``SimplePooledDB`` in ``dbutils.simple_pooled_db`` is a very basic 119 | reference implementation of a pooled database connection. It is much less 120 | sophisticated than the regular ``pooled_db`` module and is particularly lacking 121 | the failover functionality. ``dbutils.simple_pooled_db`` is essentially the 122 | same as the ``MiscUtils.DBPool`` module that is part of Webware for Python. 123 | You should consider it a demonstration of concept rather than something 124 | that should go into production. 125 | 126 | SteadyDBConnection (steady_db) 127 | ------------------------------ 128 | The class ``SteadyDBConnection`` in the module ``dbutils.steady_db`` implements 129 | "hardened" connections to a database, based on ordinary connections made by any 130 | DB-API 2 database module. A "hardened" connection will transparently reopen 131 | upon access when it has been closed or the database connection has been lost 132 | or when it is used more often than an optional usage limit. 133 | 134 | A typical example where this is needed is when the database has been 135 | restarted while your application is still running and has open connections 136 | to the database, or when your application accesses a remote database in 137 | a network that is separated by a firewall and the firewall has been 138 | restarted and lost its state. 139 | 140 | Usually, you will not use the ``steady_db`` module directly; it merely serves 141 | as a basis for the next two modules, ``persistent_db`` and ``Pooled_db``. 142 | 143 | PersistentDB (persistent_db) 144 | ---------------------------- 145 | The class ``PersistentDB`` in the module ``dbutils.persistent_db`` implements 146 | steady, thread-affine, persistent connections to a database, using any DB-API 2 147 | database module. "Thread-affine" and "persistent" means that the individual 148 | database connections stay assigned to the respective threads and will not be 149 | closed during the lifetime of the threads. 150 | 151 | The following diagram shows the connection layers involved when you 152 | are using ``persistent_db`` connections: 153 | 154 | .. image:: persistent.png 155 | 156 | Whenever a thread opens a database connection for the first time, a new 157 | connection to the database will be opened that will be used from now on 158 | for this specific thread. When the thread closes the database connection, 159 | it will still be kept open so that the next time when a connection is 160 | requested by the same thread, this already opened connection can be used. 161 | The connection will be closed automatically when the thread dies. 162 | 163 | In short: ``persistent_db`` tries to recycle database connections to 164 | increase the overall database access performance of your threaded application, 165 | but it makes sure that connections are never shared between threads. 166 | 167 | Therefore, ``persistent_db`` will work perfectly even if the underlying 168 | DB-API module is not thread-safe at the connection level, and it will 169 | avoid problems when other threads change the database session or perform 170 | transactions spreading over more than one SQL command. 171 | 172 | PooledDB (pooled_db) 173 | -------------------- 174 | The class ``PooledDB`` in the module ``dbutils.pooled_db`` implements a pool 175 | of steady, thread-safe cached connections to a database which are transparently 176 | reused, using any DB-API 2 database module. 177 | 178 | The following diagram shows the connection layers involved when you 179 | are using ``pooled_db`` connections: 180 | 181 | .. image:: pooled.png 182 | 183 | As the diagram indicates, ``pooled_db`` can share opened database connections 184 | between different threads. This will happen by default if you set up the 185 | connection pool with a positive value of ``maxshared`` and the underlying 186 | DB-API 2 is thread-safe at the connection level, but you can also request 187 | dedicated database connections that will not be shared between threads. 188 | Besides the pool of shared connections, you can also set up a pool of 189 | at least ``mincached`` and at the most ``maxcached`` idle connections that 190 | will be used whenever a thread is requesting a dedicated database connection 191 | or the pool of shared connections is not yet full. When a thread closes a 192 | connection that is not shared anymore, it is returned back to the pool of 193 | idle connections so that it can be recycled again. 194 | 195 | If the underlying DB-API module is not thread-safe, thread locks will be 196 | used to ensure that the ``pooled_db`` connections are thread-safe. So you 197 | don't need to worry about that, but you should be careful to use dedicated 198 | connections whenever you change the database session or perform transactions 199 | spreading over more than one SQL command. 200 | 201 | Which one to use? 202 | ----------------- 203 | Both ``persistent_db`` and ``pooled_db`` serve the same purpose to improve 204 | the database access performance by recycling database connections, while 205 | preserving stability even if database connection will be disrupted. 206 | 207 | So which of these two modules should you use? From the above explanations 208 | it is clear that ``persistent_db`` will make more sense if your application 209 | keeps a constant number of threads which frequently use the database. In 210 | this case, you will always have the same amount of open database connections. 211 | However, if your application frequently starts and ends threads, then it 212 | will be better to use ``pooled_db``. The latter will also allow more 213 | fine-tuning, particularly if you are using a thread-safe DB-API 2 module. 214 | 215 | Since the interface of both modules is similar, you can easily switch from 216 | one to the other and check which one will suit better. 217 | 218 | 219 | Usage 220 | ===== 221 | 222 | The usage of all the modules is similar, but there are also some differences 223 | in the initialization between the "Pooled" and "Persistent" variants and also 224 | between the universal DB-API 2 and the classic PyGreSQL variants. 225 | 226 | We will cover here only the ``persistent_db`` module and the more complex 227 | ``pooled_db`` module. For the details of the other modules, have a look 228 | at their module docstrings. Using the Python interpreter console, you can 229 | display the documentation of the ``pooled_db`` module as follows (this 230 | works analogously for the other modules):: 231 | 232 | help(pooled_db) 233 | 234 | PersistentDB (persistent_db) 235 | ---------------------------- 236 | In order to make use of the ``persistent_db`` module, you first need to set 237 | up a generator for your kind of database connections by creating an instance 238 | of ``persistent_db``, passing the following parameters: 239 | 240 | * ``creator``: either an arbitrary function returning new DB-API 2 241 | connection objects or a DB-API 2 compliant database module 242 | 243 | * ``maxusage``: the maximum number of reuses of a single connection 244 | (the default of ``0`` or ``None`` means unlimited reuse) 245 | 246 | Whenever the limit is reached, the connection will be reset. 247 | 248 | * ``setsession``: an optional list of SQL commands that may serve to 249 | prepare the session, e.g. ``["set datestyle to german", ...]`` 250 | 251 | * ``failures``: an optional exception class or a tuple of exception classes 252 | for which the connection failover mechanism shall be applied, 253 | if the default (OperationalError, InterfaceError, InternalError) 254 | is not adequate for the used database module 255 | 256 | * ``ping``: an optional flag controlling when connections are checked 257 | with the ``ping()`` method if such a method is available 258 | (``0`` = ``None`` = never, ``1`` = default = whenever it is requested, 259 | ``2`` = when a cursor is created, ``4`` = when a query is executed, 260 | ``7`` = always, and all other bit combinations of these values) 261 | 262 | * ``closeable``: if this is set to true, then closing connections will 263 | be allowed, but by default this will be silently ignored 264 | 265 | * ``threadlocal``: an optional class for representing thread-local data 266 | that will be used instead of our Python implementation 267 | (threading.local is faster, but cannot be used in all cases) 268 | 269 | * The creator function or the connect function of the DB-API 2 compliant 270 | database module specified as the creator will receive any additional 271 | parameters such as the host, database, user, password etc. You may 272 | choose some or all of these parameters in your own creator function, 273 | allowing for sophisticated failover and load-balancing mechanisms. 274 | 275 | For instance, if you are using ``pgdb`` as your DB-API 2 database module and 276 | want every connection to your local database ``mydb`` to be reused 1000 times:: 277 | 278 | import pgdb # import used DB-API 2 module 279 | from dbutils.persistent_db import PersistentDB 280 | persist = PersistentDB(pgdb, 1000, database='mydb') 281 | 282 | Once you have set up the generator with these parameters, you can request 283 | database connections of that kind:: 284 | 285 | db = persist.connection() 286 | 287 | You can use these connections just as if they were ordinary DB-API 2 288 | connections. Actually what you get is the hardened ``steady_db`` version of 289 | the underlying DB-API 2 connection. 290 | 291 | Closing a persistent connection with ``db.close()`` will be silently 292 | ignored since it would be reopened at the next usage anyway and 293 | contrary to the intent of having persistent connections. Instead, 294 | the connection will be automatically closed when the thread dies. 295 | You can change this behavior by setting the ``closeable`` parameter. 296 | 297 | .. warning:: 298 | Note that you need to explicitly start transactions by calling the 299 | ``begin()`` method. This ensures that the transparent reopening will be 300 | suspended until the end of the transaction, and that the connection 301 | will be rolled back before being reused by the same thread. 302 | 303 | By setting the ``threadlocal`` parameter to ``threading.local``, getting 304 | connections may become a bit faster, but this may not work in all 305 | environments (for instance, ``mod_wsgi`` is known to cause problems 306 | since it clears the ``threading.local`` data between requests). 307 | 308 | PooledDB (pooled_db) 309 | -------------------- 310 | In order to make use of the ``pooled_db`` module, you first need to set up the 311 | database connection pool by creating an instance of ``pooled_db``, passing the 312 | following parameters: 313 | 314 | * ``creator``: either an arbitrary function returning new DB-API 2 315 | connection objects or a DB-API 2 compliant database module 316 | 317 | * ``mincached`` : the initial number of idle connections in the pool 318 | (the default of ``0`` means no connections are made at startup) 319 | 320 | * ``maxcached``: the maximum number of idle connections in the pool 321 | (the default value of ``0`` or ``None`` means unlimited pool size) 322 | 323 | * ``maxshared``: maximum number of shared connections allowed 324 | (the default value of ``0`` or ``None`` means all connections are dedicated) 325 | 326 | When this maximum number is reached, connections are shared if they 327 | have been requested as shareable. 328 | 329 | * ``maxconnections``: maximum number of connections generally allowed 330 | (the default value of ``0`` or ``None`` means any number of connections) 331 | 332 | * ``blocking``: determines behavior when exceeding the maximum 333 | 334 | If this is set to true, block and wait until the number of 335 | connections decreases, but by default an error will be reported. 336 | 337 | * ``maxusage``: maximum number of reuses of a single connection 338 | (the default of ``0`` or ``None`` means unlimited reuse) 339 | 340 | When this maximum usage number of the connection is reached, 341 | the connection is automatically reset (closed and reopened). 342 | 343 | * ``setsession``: an optional list of SQL commands that may serve to 344 | prepare the session, e.g. ``["set datestyle to german", ...]`` 345 | 346 | * ``reset``: how connections should be reset when returned to the pool 347 | (``False`` or ``None`` to rollback transactions started with ``begin()``, 348 | the default value ``True`` always issues a rollback for safety's sake) 349 | 350 | * ``failures``: an optional exception class or a tuple of exception classes 351 | for which the connection failover mechanism shall be applied, 352 | if the default (OperationalError, InterfaceError, InternalError) 353 | is not adequate for the used database module 354 | 355 | * ``ping``: an optional flag controlling when connections are checked 356 | with the ``ping()`` method if such a method is available 357 | (``0`` = ``None`` = never, ``1`` = default = whenever fetched from the pool, 358 | ``2`` = when a cursor is created, ``4`` = when a query is executed, 359 | ``7`` = always, and all other bit combinations of these values) 360 | 361 | * The creator function or the connect function of the DB-API 2 compliant 362 | database module specified as the creator will receive any additional 363 | parameters such as the host, database, user, password etc. You may 364 | choose some or all of these parameters in your own creator function, 365 | allowing for sophisticated failover and load-balancing mechanisms. 366 | 367 | For instance, if you are using ``pgdb`` as your DB-API 2 database module and 368 | want a pool of at least five connections to your local database ``mydb``:: 369 | 370 | import pgdb # import used DB-API 2 module 371 | from dbutils.pooled_db import PooledDB 372 | pool = PooledDB(pgdb, 5, database='mydb') 373 | 374 | Once you have set up the connection pool you can request database connections 375 | from that pool:: 376 | 377 | db = pool.connection() 378 | 379 | You can use these connections just as if they were ordinary DB-API 2 380 | connections. Actually what you get is the hardened ``steady_db`` version of 381 | the underlying DB-API 2 connection. 382 | 383 | Please note that the connection may be shared with other threads by default 384 | if you set a non-zero ``maxshared`` parameter and the DB-API 2 module allows 385 | this. If you want to have a dedicated connection, use:: 386 | 387 | db = pool.connection(shareable=False) 388 | 389 | Instead of this, you can also get a dedicated connection as follows:: 390 | 391 | db = pool.dedicated_connection() 392 | 393 | If you don't need it anymore, you should immediately return it to the 394 | pool with ``db.close()``. You can get another connection in the same way. 395 | 396 | ⚠ Warning: In a threaded environment, never do the following:: 397 | 398 | pool.connection().cursor().execute(...) 399 | 400 | This would release the connection too early for reuse which may be fatal 401 | if the connections are not thread-safe. Make sure that the connection 402 | object stays alive as long as you are using it, like that:: 403 | 404 | db = pool.connection() 405 | cur = db.cursor() 406 | cur.execute(...) 407 | res = cur.fetchone() 408 | cur.close() # or del cur 409 | db.close() # or del db 410 | 411 | You can also use context managers for simpler code:: 412 | 413 | with pool.connection() as db: 414 | with db.cursor() as cur: 415 | cur.execute(...) 416 | res = cur.fetchone() 417 | 418 | .. warning:: 419 | Note that you need to explicitly start transactions by calling the 420 | ``begin()`` method. This ensures that the connection will not be shared 421 | with other threads, that the transparent reopening will be suspended 422 | until the end of the transaction, and that the connection will be rolled 423 | back before being given back to the connection pool. 424 | 425 | 426 | Advanced Usage 427 | ============== 428 | Sometimes you may want to prepare connections before they are used by 429 | DBUtils, in ways that are not possible by just using the right parameters. 430 | For instance, ``pyodbc`` may require to configure connections by calling 431 | the ``setencoding()`` method of the connection. You can do this by passing 432 | a modified ``connect()`` function to ``PersistentDB`` or ``PooledDB`` as 433 | ``creator`` (the first argument), like this:: 434 | 435 | from pyodbc import connect 436 | from dbutils.pooled_db import PooledDB 437 | 438 | def creator(): 439 | con = connect(...) 440 | con.setdecoding(...) 441 | return con 442 | 443 | creator.dbapi = pyodbc 444 | 445 | db_pool = PooledDB(creator, mincached=5) 446 | 447 | 448 | Notes 449 | ===== 450 | If you are using one of the popular object-relational mappers SQLObject_ 451 | or SQLAlchemy_, you won't need DBUtils, since they come with their own 452 | connection pools. SQLObject 2 (SQL-API) is actually borrowing some code 453 | from DBUtils to split the pooling out into a separate layer. 454 | 455 | Also note that when you are using a solution like the Apache webserver 456 | with mod_python_ or mod_wsgi_, then your Python code will be usually run 457 | in the context of the webserver's child processes. So if you are using 458 | the ``pooled_db`` module, and several of these child processes are running, 459 | you will have as much database connection pools. If these processes are 460 | running many threads, this may still be a reasonable approach, but if these 461 | processes don't spawn more than one worker thread, as in the case of Apache's 462 | "prefork" multi-processing module, this approach does not make sense. 463 | If you're running such a configuration, you should resort to a middleware 464 | for connection pooling that supports multi-processing, such as pgpool_ 465 | or pgbouncer_ for the PostgreSQL database. 466 | 467 | 468 | Future 469 | ====== 470 | Some ideas for future improvements: 471 | 472 | * Alternatively to the maximum number of uses of a connection, 473 | implement a maximum time to live for connections. 474 | * Create modules ``monitor_db`` and ``monitor_pg`` that will run in a separate 475 | thread, monitoring the pool of the idle connections and maybe also the 476 | shared connections respectively the thread-affine connections. If a 477 | disrupted connection is detected, then it will be reestablished automatically 478 | by the monitoring thread. This will be useful in a scenario where a database 479 | powering a website is restarted during the night. Without the monitoring 480 | thread, the users would experience a slight delay in the next morning, 481 | because only then, the disrupted database connections will be detected and 482 | the pool will be rebuilt. With the monitoring thread, this will already 483 | happen during the night, shortly after the disruption. 484 | The monitoring thread could also be configured to generally recreate 485 | the connection pool every day shortly before the users arrive. 486 | * Optionally log usage, bad connections and exceeding of limits. 487 | 488 | 489 | Bug reports and feedback 490 | ======================== 491 | You can transmit bug reports, patches and feedback by creating issues_ or 492 | `pull requests`_ on the GitHub project page for DBUtils. 493 | 494 | .. _GitHub-Projektseite: https://github.com/WebwareForPython/DBUtils 495 | .. _Issues: https://github.com/WebwareForPython/DBUtils/issues 496 | .. _Pull Requests: https://github.com/WebwareForPython/DBUtils/pulls 497 | 498 | 499 | Links 500 | ===== 501 | Some links to related and alternative software: 502 | 503 | * DBUtils_ 504 | * Python_ 505 | * `Webware for Python`_ framework 506 | * Python `DB-API 2`_ 507 | * PostgreSQL_ database 508 | * PyGreSQL_ Python adapter for PostgreSQL 509 | * pgpool_ middleware for PostgreSQL connection pooling 510 | * pgbouncer_ lightweight PostgreSQL connection pooling 511 | * SQLObject_ object-relational mapper 512 | * SQLAlchemy_ object-relational mapper 513 | 514 | .. _DBUtils: https://github.com/WebwareForPython/DBUtils 515 | .. _Python: https://www.python.org 516 | .. _Webware for Python: https://webwareforpython.github.io/w4py/ 517 | .. _Webware for Python mailing list: https://lists.sourceforge.net/lists/listinfo/webware-discuss 518 | .. _DB-API 2: https://www.python.org/dev/peps/pep-0249/ 519 | .. _The Python DB-API: http://www.linuxjournal.com/article/2605 520 | .. _PostgresQL: https://www.postgresql.org/ 521 | .. _PyGreSQL: https://www.pygresql.org/ 522 | .. _SQLObject: http://www.sqlobject.org/ 523 | .. _SQLAlchemy: https://www.sqlalchemy.org 524 | .. _Apache: https://httpd.apache.org/ 525 | .. _mod_python: http://modpython.org/ 526 | .. _mod_wsgi: https://github.com/GrahamDumpleton/mod_wsgi 527 | .. _pgpool: https://www.pgpool.net/ 528 | .. _pgbouncer: https://pgbouncer.github.io/ 529 | 530 | 531 | Credits 532 | ======= 533 | 534 | :Author: `Christoph Zwerschke`_ 535 | 536 | :Contributions: DBUtils uses code, input and suggestions made by 537 | Ian Bicking, Chuck Esterbrook (Webware for Python), Dan Green (DBTools), 538 | Jay Love, Michael Palmer, Tom Schwaller, Geoffrey Talvola, 539 | Warren Smith (DbConnectionPool), Ezio Vernacotola, Jehiah Czebotar, 540 | Matthew Harriger, Gregory Piñero and Josef van Eenbergen. 541 | 542 | .. _Christoph Zwerschke: https://github.com/Cito 543 | 544 | 545 | Copyright and License 546 | ===================== 547 | 548 | Copyright © 2005-2025 by Christoph Zwerschke. 549 | All Rights Reserved. 550 | 551 | DBUtils is free and open source software, 552 | licensed under the `MIT license`__. 553 | 554 | __ https://opensource.org/licenses/MIT 555 | -------------------------------------------------------------------------------- /docs/make.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.12 2 | 3 | """Build HTML from reST files.""" 4 | 5 | from pathlib import Path 6 | 7 | from docutils.core import publish_file 8 | 9 | print("Creating the documentation...") 10 | 11 | for rst_file in Path().glob('*.rst'): 12 | rst_path = Path(rst_file) 13 | name = Path(rst_file).stem 14 | lang = Path(name).suffix 15 | if lang.startswith('.'): 16 | lang = lang[1:] 17 | if lang == 'zh': 18 | lang = 'zh_cn' 19 | else: 20 | lang = 'en' 21 | html_path = Path(name + '.html') 22 | print(name, lang) 23 | 24 | with rst_path.open(encoding='utf-8-sig') as source, \ 25 | html_path.open('w', encoding='utf-8') as destination: 26 | output = publish_file( 27 | writer_name='html5', source=source, destination=destination, 28 | enable_exit_status=True, 29 | settings_overrides={ 30 | "stylesheet_path": 'doc.css', 31 | "embed_stylesheet": False, 32 | "toc_backlinks": False, 33 | "language_code": lang, 34 | "exit_status_level": 2}) 35 | 36 | print("Done.") 37 | -------------------------------------------------------------------------------- /docs/persistent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WebwareForPython/DBUtils/52db5032c9fb31341b34079f6ebacd11738a00d1/docs/persistent.png -------------------------------------------------------------------------------- /docs/pooled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WebwareForPython/DBUtils/52db5032c9fb31341b34079f6ebacd11738a00d1/docs/pooled.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = ["setuptools>=77"] 4 | 5 | [project] 6 | name = "DBUtils" 7 | version = "3.1.1" 8 | description = "Database connections for multi-threaded environments." 9 | license = "MIT" 10 | authors = [{name = "Christoph Zwerschke", email = "cito@online.de"}] 11 | requires-python = ">=3.7" 12 | classifiers = [ 13 | "Development Status :: 5 - Production/Stable", 14 | "Environment :: Web Environment", 15 | "Intended Audience :: Developers", 16 | "Operating System :: OS Independent", 17 | "Programming Language :: Python", 18 | "Programming Language :: Python :: 3", 19 | "Programming Language :: Python :: 3.7", 20 | "Programming Language :: Python :: 3.8", 21 | "Programming Language :: Python :: 3.9", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: 3.12", 25 | "Programming Language :: Python :: 3.13", 26 | "Topic :: Database", 27 | "Topic :: Internet :: WWW/HTTP :: Dynamic Content", 28 | "Topic :: Software Development :: Libraries :: Python Modules", 29 | ] 30 | [project.optional-dependencies] 31 | pg = ["PyGreSQL>=5"] 32 | docs = ["docutils"] 33 | tests = ["pytest>=7", "ruff"] 34 | 35 | [project.readme] 36 | file = "README.md" 37 | content-type = "text/markdown" 38 | 39 | [project.urls] 40 | Homepage = "https://webwareforpython.github.io/DBUtils/" 41 | Download = "https://pypi.org/project/DBUtils/" 42 | Documentation = "https://webwareforpython.github.io/DBUtils/main.html" 43 | Changelog = "https://webwareforpython.github.io/DBUtils/changelog.html" 44 | "Issue Tracker" = "https://github.com/WebwareForPython/DBUtils/issues" 45 | "Source Code" = "https://github.com/WebwareForPython/DBUtils" 46 | 47 | [tool.setuptools] 48 | packages = ["dbutils"] 49 | platforms = ["any"] 50 | include-package-data = false 51 | 52 | [tool.ruff] 53 | line-length = 79 54 | target-version = "py37" 55 | 56 | [tool.ruff.lint] 57 | select = [ 58 | "A", # flake8-builtins 59 | # "ANN", # flake8-annotations 60 | "ARG", # flake8-unused-arguments 61 | "B", # flake8-bugbear 62 | # "BLE", # flake8-blind-except 63 | "C4", # flake8-comprehensions 64 | "C90", # McCabe cyclomatic complexity 65 | "COM", # flake8-commas 66 | "D", # pydocstyle 67 | "DTZ", # flake8-datetimez 68 | "E", # pycodestyle 69 | # "EM", # flake8-errmsg 70 | "ERA", # eradicate 71 | "EXE", # flake8-executable 72 | "F", # Pyflakes 73 | # "FBT", # flake8-boolean-trap 74 | "G", # flake8-logging-format 75 | "I", # isort 76 | "ICN", # flake8-import-conventions 77 | "INP", # flake8-no-pep420 78 | "INT", # flake8-gettext 79 | "ISC", # flake8-implicit-str-concat 80 | "N", # pep8-naming 81 | "PGH", # pygrep-hooks 82 | "PIE", # flake8-pie 83 | "PL", # Pylint 84 | "PT", # flake8-pytest-style 85 | "PTH", # flake8-use-pathlib 86 | "PYI", # flake8-pyi 87 | # "Q", # flake8-quotes 88 | "RET", # flake8-return 89 | "RSE", # flake8-raise 90 | "RUF", # Ruff-specific rules 91 | "S", # flake8-bandit 92 | # "SLF", # flake8-self 93 | "SIM", # flake8-simplify 94 | "T10", # flake8-debugger 95 | "T20", # flake8-print 96 | "TCH", # flake8-type-checking 97 | "TID", # flake8-tidy-imports 98 | # "TRY", # tryceratops 99 | "UP", # pyupgrade 100 | "W", # pycodestyle 101 | "YTT", # flake8-2020 102 | ] 103 | # Note: use `ruff rule ...` to see explanations of rules 104 | ignore = [ 105 | "D203", # no blank line before class docstring 106 | "D213", # multi-line docstrings should not start at second line 107 | "RUF022", # __all__ can have custom order 108 | ] 109 | 110 | [tool.ruff.lint.mccabe] 111 | max-complexity = 30 112 | 113 | [tool.ruff.lint.flake8-quotes] 114 | inline-quotes = "double" 115 | 116 | [tool.ruff.lint.pylint] 117 | max-args = 12 118 | max-branches = 35 119 | max-statements = 95 120 | 121 | [tool.ruff.lint.per-file-ignores] 122 | "docs/*" = [ 123 | "INP001", # allow stand-alone scripts 124 | "T201", # allow print statements 125 | ] 126 | "tests/*" = [ 127 | "D", # no docstrings necessary here 128 | "PLR2004", # allow magic values 129 | "S101", # allow assert statements 130 | ] 131 | 132 | [tool.codespell] 133 | skip = '.git,.tox,.venv,*.de.html,*.de.rst,build,dist,local' 134 | quiet-level = 2 135 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """The DBUtils tests package.""" 2 | 3 | # make sure the mock pg module is installed 4 | from . import mock_pg as pg # noqa: F401 5 | -------------------------------------------------------------------------------- /tests/mock_db.py: -------------------------------------------------------------------------------- 1 | """This module serves as a mock object for the DB-API 2 module""" 2 | 3 | import sys 4 | 5 | import pytest 6 | 7 | __all__ = ['dbapi'] 8 | 9 | 10 | threadsafety = 2 11 | 12 | 13 | @pytest.fixture 14 | def dbapi(): 15 | """Get mock DB API 2 module.""" 16 | mock_db = sys.modules[__name__] 17 | mock_db.threadsafety = 2 18 | return mock_db 19 | 20 | 21 | class Error(Exception): 22 | pass 23 | 24 | 25 | class DatabaseError(Error): 26 | pass 27 | 28 | 29 | class OperationalError(DatabaseError): 30 | pass 31 | 32 | 33 | class InterfaceError(DatabaseError): 34 | pass 35 | 36 | 37 | class InternalError(DatabaseError): 38 | pass 39 | 40 | 41 | class ProgrammingError(DatabaseError): 42 | pass 43 | 44 | 45 | def connect(database=None, user=None): 46 | return Connection(database, user) 47 | 48 | 49 | class Connection: 50 | 51 | has_ping = False 52 | num_pings = 0 53 | 54 | def __init__(self, database=None, user=None): 55 | self.database = database 56 | self.user = user 57 | self.valid = False 58 | if database == 'error': 59 | raise OperationalError 60 | self.open_cursors = 0 61 | self.num_uses = 0 62 | self.num_queries = 0 63 | self.num_pings = 0 64 | self.session = [] 65 | self.valid = True 66 | 67 | def close(self): 68 | if not self.valid: 69 | raise InternalError 70 | self.open_cursors = 0 71 | self.num_uses = 0 72 | self.num_queries = 0 73 | self.session = [] 74 | self.valid = False 75 | 76 | def commit(self): 77 | if not self.valid: 78 | raise InternalError 79 | self.session.append('commit') 80 | 81 | def rollback(self): 82 | if not self.valid: 83 | raise InternalError 84 | self.session.append('rollback') 85 | 86 | def ping(self): 87 | cls = self.__class__ 88 | cls.num_pings += 1 89 | if not cls.has_ping: 90 | raise AttributeError 91 | if not self.valid: 92 | raise OperationalError 93 | 94 | def cursor(self, name=None): 95 | if not self.valid: 96 | raise InternalError 97 | return Cursor(self, name) 98 | 99 | 100 | class Cursor: 101 | 102 | def __init__(self, con, name=None): 103 | self.con = con 104 | self.valid = False 105 | if name == 'error': 106 | raise OperationalError 107 | self.result = None 108 | self.inputsizes = [] 109 | self.outputsizes = {} 110 | con.open_cursors += 1 111 | self.valid = True 112 | 113 | def close(self): 114 | if not self.valid: 115 | raise InternalError 116 | self.con.open_cursors -= 1 117 | self.valid = False 118 | 119 | def execute(self, operation): 120 | if not self.valid or not self.con.valid: 121 | raise InternalError 122 | self.con.num_uses += 1 123 | if operation.startswith('select '): 124 | self.con.num_queries += 1 125 | self.result = operation[7:] 126 | elif operation.startswith('set '): 127 | self.con.session.append(operation[4:]) 128 | self.result = None 129 | elif operation == 'get sizes': 130 | self.result = (self.inputsizes, self.outputsizes) 131 | self.inputsizes = [] 132 | self.outputsizes = {} 133 | else: 134 | raise ProgrammingError 135 | 136 | def fetchone(self): 137 | if not self.valid: 138 | raise InternalError 139 | result = self.result 140 | self.result = None 141 | return result 142 | 143 | def callproc(self, procname): 144 | if not self.valid or not self.con.valid or not procname: 145 | raise InternalError 146 | self.con.num_uses += 1 147 | 148 | def setinputsizes(self, sizes): 149 | if not self.valid: 150 | raise InternalError 151 | self.inputsizes = sizes 152 | 153 | def setoutputsize(self, size, column=None): 154 | if not self.valid: 155 | raise InternalError 156 | self.outputsizes[column] = size 157 | 158 | def __del__(self): 159 | if self.valid: 160 | self.close() 161 | -------------------------------------------------------------------------------- /tests/mock_pg.py: -------------------------------------------------------------------------------- 1 | """This module serves as a mock object for the pg API module""" 2 | 3 | import sys 4 | 5 | sys.modules['pg'] = sys.modules[__name__] 6 | 7 | 8 | class Error(Exception): 9 | pass 10 | 11 | 12 | class DatabaseError(Error): 13 | pass 14 | 15 | 16 | class InternalError(DatabaseError): 17 | pass 18 | 19 | 20 | class ProgrammingError(DatabaseError): 21 | pass 22 | 23 | 24 | def connect(*args, **kwargs): 25 | return PgConnection(*args, **kwargs) 26 | 27 | 28 | class PgConnection: 29 | """The underlying pg API connection class.""" 30 | 31 | def __init__(self, dbname=None, user=None): 32 | self.db = dbname 33 | self.user = user 34 | self.num_queries = 0 35 | self.session = [] 36 | if dbname == 'error': 37 | self.status = False 38 | self.valid = False 39 | raise InternalError 40 | self.status = True 41 | self.valid = True 42 | 43 | def close(self): 44 | if not self.valid: 45 | raise InternalError 46 | self.num_queries = 0 47 | self.session = [] 48 | self.status = False 49 | self.valid = False 50 | 51 | def reset(self): 52 | self.num_queries = 0 53 | self.session = [] 54 | self.status = True 55 | self.valid = True 56 | 57 | def query(self, qstr): 58 | if not self.valid: 59 | raise InternalError 60 | if qstr in ('begin', 'end', 'commit', 'rollback'): 61 | self.session.append(qstr) 62 | return None 63 | if qstr.startswith('select '): 64 | self.num_queries += 1 65 | return qstr[7:] 66 | if qstr.startswith('set '): 67 | self.session.append(qstr[4:]) 68 | return None 69 | raise ProgrammingError 70 | 71 | 72 | class DB: 73 | """Wrapper class for the pg API connection class.""" 74 | 75 | def __init__(self, *args, **kw): 76 | self.db = connect(*args, **kw) 77 | self.dbname = self.db.db 78 | self.__args = args, kw 79 | 80 | def __getattr__(self, name): 81 | if not self.db: 82 | raise AttributeError 83 | return getattr(self.db, name) 84 | 85 | def close(self): 86 | if not self.db: 87 | raise InternalError 88 | self.db.close() 89 | self.db = None 90 | 91 | def reopen(self): 92 | if self.db: 93 | self.close() 94 | try: 95 | self.db = connect(*self.__args[0], **self.__args[1]) 96 | except Exception: 97 | self.db = None 98 | raise 99 | 100 | def query(self, qstr): 101 | if not self.db: 102 | raise InternalError 103 | return self.db.query(qstr) 104 | 105 | def get_tables(self): 106 | if not self.db: 107 | raise InternalError 108 | return 'test' 109 | -------------------------------------------------------------------------------- /tests/test_persistent_db.py: -------------------------------------------------------------------------------- 1 | """Test the PersistentDB module. 2 | 3 | Note: 4 | We don't test performance here, so the test does not predicate 5 | whether PersistentDB actually will help in improving performance or not. 6 | We also assume that the underlying SteadyDB connections are tested. 7 | 8 | Copyright and credit info: 9 | 10 | * This test was contributed by Christoph Zwerschke 11 | """ 12 | 13 | from queue import Empty, Queue 14 | from threading import Thread 15 | 16 | import pytest 17 | 18 | from dbutils.persistent_db import NotSupportedError, PersistentDB, local 19 | 20 | from .mock_db import dbapi # noqa: F401 21 | 22 | 23 | def test_version(): 24 | from dbutils import __version__, persistent_db 25 | assert persistent_db.__version__ == __version__ 26 | assert PersistentDB.version == __version__ 27 | 28 | 29 | @pytest.mark.parametrize("threadsafety", [None, 0]) 30 | def test_no_threadsafety(dbapi, threadsafety): # noqa: F811 31 | dbapi.threadsafety = threadsafety 32 | with pytest.raises(NotSupportedError): 33 | PersistentDB(dbapi) 34 | 35 | 36 | @pytest.mark.parametrize("closeable", [False, True]) 37 | def test_close(dbapi, closeable): # noqa: F811 38 | persist = PersistentDB(dbapi, closeable=closeable) 39 | db = persist.connection() 40 | assert db._con.valid is True 41 | db.close() 42 | assert closeable ^ db._con.valid 43 | db.close() 44 | assert closeable ^ db._con.valid 45 | db._close() 46 | assert db._con.valid is False 47 | db._close() 48 | assert db._con.valid is False 49 | 50 | 51 | def test_connection(dbapi): # noqa: F811 52 | persist = PersistentDB(dbapi) 53 | db = persist.connection() 54 | db_con = db._con 55 | assert db_con.database is None 56 | assert db_con.user is None 57 | db2 = persist.connection() 58 | assert db == db2 59 | db3 = persist.dedicated_connection() 60 | assert db == db3 61 | db3.close() 62 | db2.close() 63 | db.close() 64 | 65 | 66 | def test_threads(dbapi): # noqa: F811 67 | num_threads = 3 68 | persist = PersistentDB(dbapi, closeable=True) 69 | query_queue, result_queue = [], [] 70 | for _i in range(num_threads): 71 | query_queue.append(Queue(1)) 72 | result_queue.append(Queue(1)) 73 | 74 | def run_queries(idx): 75 | this_db = persist.connection() 76 | db = None 77 | while True: 78 | try: 79 | q = query_queue[idx].get(timeout=1) 80 | except Empty: 81 | q = None 82 | if not q: 83 | break 84 | db = persist.connection() 85 | if db != this_db: 86 | res = 'error - not persistent' 87 | elif q == 'ping': 88 | res = 'ok - thread alive' 89 | elif q == 'close': 90 | db.close() 91 | res = 'ok - connection closed' 92 | else: 93 | cursor = db.cursor() 94 | cursor.execute(q) 95 | res = cursor.fetchone() 96 | cursor.close() 97 | res = f'{idx}({db._usage}): {res}' 98 | result_queue[idx].put(res, timeout=1) 99 | if db: 100 | db.close() 101 | 102 | threads = [] 103 | for i in range(num_threads): 104 | thread = Thread(target=run_queries, args=(i,)) 105 | threads.append(thread) 106 | thread.start() 107 | for i in range(num_threads): 108 | query_queue[i].put('ping', timeout=1) 109 | for i in range(num_threads): 110 | r = result_queue[i].get(timeout=1) 111 | assert r == f'{i}(0): ok - thread alive' 112 | assert threads[i].is_alive() 113 | for i in range(num_threads): 114 | for j in range(i + 1): 115 | query_queue[i].put(f'select test{j}', timeout=1) 116 | r = result_queue[i].get(timeout=1) 117 | assert r == f'{i}({j + 1}): test{j}' 118 | query_queue[1].put('select test4', timeout=1) 119 | r = result_queue[1].get(timeout=1) 120 | assert r == '1(3): test4' 121 | query_queue[1].put('close', timeout=1) 122 | r = result_queue[1].get(timeout=1) 123 | assert r == '1(3): ok - connection closed' 124 | for j in range(2): 125 | query_queue[1].put(f'select test{j}', timeout=1) 126 | r = result_queue[1].get(timeout=1) 127 | assert r == f'1({j + 1}): test{j}' 128 | for i in range(num_threads): 129 | assert threads[i].is_alive() 130 | query_queue[i].put('ping', timeout=1) 131 | for i in range(num_threads): 132 | r = result_queue[i].get(timeout=1) 133 | assert r == f'{i}({i + 1}): ok - thread alive' 134 | assert threads[i].is_alive() 135 | for i in range(num_threads): 136 | query_queue[i].put(None, timeout=1) 137 | 138 | 139 | def test_maxusage(dbapi): # noqa: F811 140 | persist = PersistentDB(dbapi, 20) 141 | db = persist.connection() 142 | assert db._maxusage == 20 143 | for i in range(100): 144 | cursor = db.cursor() 145 | cursor.execute(f'select test{i}') 146 | r = cursor.fetchone() 147 | cursor.close() 148 | assert r == f'test{i}' 149 | assert db._con.valid is True 150 | j = i % 20 + 1 151 | assert db._usage == j 152 | assert db._con.num_uses == j 153 | assert db._con.num_queries == j 154 | 155 | 156 | def test_setsession(dbapi): # noqa: F811 157 | persist = PersistentDB(dbapi, 3, ('set datestyle',)) 158 | db = persist.connection() 159 | assert db._maxusage == 3 160 | assert db._setsession_sql == ('set datestyle',) 161 | assert db._con.session == ['datestyle'] 162 | cursor = db.cursor() 163 | cursor.execute('set test') 164 | cursor.fetchone() 165 | cursor.close() 166 | for _i in range(3): 167 | assert db._con.session == ['datestyle', 'test'] 168 | cursor = db.cursor() 169 | cursor.execute('select test') 170 | cursor.fetchone() 171 | cursor.close() 172 | assert db._con.session == ['datestyle'] 173 | 174 | 175 | def test_threadlocal(dbapi): # noqa: F811 176 | persist = PersistentDB(dbapi) 177 | assert isinstance(persist.thread, local) 178 | 179 | class Threadlocal: 180 | pass 181 | 182 | persist = PersistentDB(dbapi, threadlocal=Threadlocal) 183 | assert isinstance(persist.thread, Threadlocal) 184 | 185 | 186 | def test_ping_check(dbapi): # noqa: F811 187 | con_cls = dbapi.Connection 188 | con_cls.has_ping = True 189 | con_cls.num_pings = 0 190 | persist = PersistentDB(dbapi, 0, None, None, 0, True) 191 | db = persist.connection() 192 | assert db._con.valid is True 193 | assert con_cls.num_pings == 0 194 | db.close() 195 | db = persist.connection() 196 | assert db._con.valid is False 197 | assert con_cls.num_pings == 0 198 | persist = PersistentDB(dbapi, 0, None, None, 1, True) 199 | db = persist.connection() 200 | assert db._con.valid is True 201 | assert con_cls.num_pings == 1 202 | db.close() 203 | db = persist.connection() 204 | assert db._con.valid is True 205 | assert con_cls.num_pings == 2 206 | persist = PersistentDB(dbapi, 0, None, None, 2, True) 207 | db = persist.connection() 208 | assert db._con.valid is True 209 | assert con_cls.num_pings == 2 210 | db.close() 211 | db = persist.connection() 212 | assert db._con.valid is False 213 | assert con_cls.num_pings == 2 214 | cursor = db.cursor() 215 | assert db._con.valid is True 216 | assert con_cls.num_pings == 3 217 | cursor.execute('select test') 218 | assert db._con.valid is True 219 | assert con_cls.num_pings == 3 220 | persist = PersistentDB(dbapi, 0, None, None, 4, True) 221 | db = persist.connection() 222 | assert db._con.valid is True 223 | assert con_cls.num_pings == 3 224 | db.close() 225 | db = persist.connection() 226 | assert db._con.valid is False 227 | assert con_cls.num_pings == 3 228 | cursor = db.cursor() 229 | db._con.close() 230 | assert db._con.valid is False 231 | assert con_cls.num_pings == 3 232 | cursor.execute('select test') 233 | assert db._con.valid is True 234 | assert con_cls.num_pings == 4 235 | con_cls.has_ping = False 236 | con_cls.num_pings = 0 237 | 238 | 239 | def test_failed_transaction(dbapi): # noqa: F811 240 | persist = PersistentDB(dbapi) 241 | db = persist.connection() 242 | cursor = db.cursor() 243 | db._con.close() 244 | cursor.execute('select test') 245 | db.begin() 246 | db._con.close() 247 | with pytest.raises(dbapi.InternalError): 248 | cursor.execute('select test') 249 | cursor.execute('select test') 250 | db.begin() 251 | db.cancel() 252 | db._con.close() 253 | cursor.execute('select test') 254 | 255 | 256 | def test_context_manager(dbapi): # noqa: F811 257 | persist = PersistentDB(dbapi) 258 | with persist.connection() as db: 259 | with db.cursor() as cursor: 260 | cursor.execute('select test') 261 | r = cursor.fetchone() 262 | assert r == 'test' 263 | -------------------------------------------------------------------------------- /tests/test_persistent_pg.py: -------------------------------------------------------------------------------- 1 | """Test the PersistentPg module. 2 | 3 | Note: 4 | We don't test performance here, so the test does not predicate 5 | whether PersistentPg actually will help in improving performance or not. 6 | We also assume that the underlying SteadyPg connections are tested. 7 | 8 | Copyright and credit info: 9 | 10 | * This test was contributed by Christoph Zwerschke 11 | """ 12 | 13 | from queue import Empty, Queue 14 | from threading import Thread 15 | 16 | import pg 17 | import pytest 18 | 19 | from dbutils.persistent_pg import PersistentPg 20 | 21 | 22 | def test_version(): 23 | from dbutils import __version__, persistent_pg 24 | assert persistent_pg.__version__ == __version__ 25 | assert PersistentPg.version == __version__ 26 | 27 | 28 | @pytest.mark.parametrize("closeable", [False, True]) 29 | def test_close(closeable): 30 | persist = PersistentPg(closeable=closeable) 31 | db = persist.connection() 32 | assert db._con.db 33 | assert db._con.valid is True 34 | db.close() 35 | assert closeable ^ (db._con.db is not None and db._con.valid) 36 | db.close() 37 | assert closeable ^ (db._con.db is not None and db._con.valid) 38 | db._close() 39 | assert not db._con.db 40 | db._close() 41 | assert not db._con.db 42 | 43 | 44 | def test_threads(): 45 | num_threads = 3 46 | persist = PersistentPg() 47 | query_queue, result_queue = [], [] 48 | for _i in range(num_threads): 49 | query_queue.append(Queue(1)) 50 | result_queue.append(Queue(1)) 51 | 52 | def run_queries(idx): 53 | this_db = persist.connection().db 54 | db = None 55 | while True: 56 | try: 57 | q = query_queue[idx].get(timeout=1) 58 | except Empty: 59 | q = None 60 | if not q: 61 | break 62 | db = persist.connection() 63 | if db.db != this_db: 64 | res = 'error - not persistent' 65 | elif q == 'ping': 66 | res = 'ok - thread alive' 67 | elif q == 'close': 68 | db.db.close() 69 | res = 'ok - connection closed' 70 | else: 71 | res = db.query(q) 72 | res = f'{idx}({db._usage}): {res}' 73 | result_queue[idx].put(res, timeout=1) 74 | if db: 75 | db.close() 76 | 77 | threads = [] 78 | for i in range(num_threads): 79 | thread = Thread(target=run_queries, args=(i,)) 80 | threads.append(thread) 81 | thread.start() 82 | for i in range(num_threads): 83 | query_queue[i].put('ping', timeout=1) 84 | for i in range(num_threads): 85 | r = result_queue[i].get(timeout=1) 86 | assert r == f'{i}(0): ok - thread alive' 87 | assert threads[i].is_alive() 88 | for i in range(num_threads): 89 | for j in range(i + 1): 90 | query_queue[i].put(f'select test{j}', timeout=1) 91 | r = result_queue[i].get(timeout=1) 92 | assert r == f'{i}({j + 1}): test{j}' 93 | query_queue[1].put('select test4', timeout=1) 94 | r = result_queue[1].get(timeout=1) 95 | assert r == '1(3): test4' 96 | query_queue[1].put('close', timeout=1) 97 | r = result_queue[1].get(timeout=1) 98 | assert r == '1(3): ok - connection closed' 99 | for j in range(2): 100 | query_queue[1].put(f'select test{j}', timeout=1) 101 | r = result_queue[1].get(timeout=1) 102 | assert r == f'1({j + 1}): test{j}' 103 | for i in range(num_threads): 104 | assert threads[i].is_alive() 105 | query_queue[i].put('ping', timeout=1) 106 | for i in range(num_threads): 107 | r = result_queue[i].get(timeout=1) 108 | assert r == f'{i}({i + 1}): ok - thread alive' 109 | assert threads[i].is_alive() 110 | for i in range(num_threads): 111 | query_queue[i].put(None, timeout=1) 112 | 113 | 114 | def test_maxusage(): 115 | persist = PersistentPg(20) 116 | db = persist.connection() 117 | assert db._maxusage == 20 118 | for i in range(100): 119 | r = db.query(f'select test{i}') 120 | assert r == f'test{i}' 121 | assert db.db.status 122 | j = i % 20 + 1 123 | assert db._usage == j 124 | assert db.num_queries == j 125 | 126 | 127 | def test_setsession(): 128 | persist = PersistentPg(3, ('set datestyle',)) 129 | db = persist.connection() 130 | assert db._maxusage == 3 131 | assert db._setsession_sql == ('set datestyle',) 132 | assert db.db.session == ['datestyle'] 133 | db.query('set test') 134 | for _i in range(3): 135 | assert db.db.session == ['datestyle', 'test'] 136 | db.query('select test') 137 | assert db.db.session == ['datestyle'] 138 | 139 | 140 | def test_failed_transaction(): 141 | persist = PersistentPg() 142 | db = persist.connection() 143 | db._con.close() 144 | assert db.query('select test') == 'test' 145 | db.begin() 146 | db._con.close() 147 | with pytest.raises(pg.InternalError): 148 | db.query('select test') 149 | assert db.query('select test') == 'test' 150 | db.begin() 151 | assert db.query('select test') == 'test' 152 | db.rollback() 153 | db._con.close() 154 | assert db.query('select test') == 'test' 155 | 156 | 157 | def test_context_manager(): 158 | persist = PersistentPg() 159 | with persist.connection() as db: 160 | db.query('select test') 161 | assert db.num_queries == 1 162 | -------------------------------------------------------------------------------- /tests/test_pooled_pg.py: -------------------------------------------------------------------------------- 1 | """Test the PooledPg module. 2 | 3 | Note: 4 | We don't test performance here, so the test does not predicate 5 | whether PooledPg actually will help in improving performance or not. 6 | We also assume that the underlying SteadyPg connections are tested. 7 | 8 | Copyright and credit info: 9 | 10 | * This test was contributed by Christoph Zwerschke 11 | """ 12 | 13 | from queue import Empty, Queue 14 | from threading import Thread 15 | 16 | import pg # noqa: F401 17 | import pytest 18 | 19 | from dbutils.pooled_pg import ( 20 | InvalidConnectionError, 21 | PooledPg, 22 | TooManyConnectionsError, 23 | ) 24 | from dbutils.steady_pg import SteadyPgConnection 25 | 26 | 27 | def test_version(): 28 | from dbutils import __version__, pooled_pg 29 | assert pooled_pg.__version__ == __version__ 30 | assert PooledPg.version == __version__ 31 | 32 | 33 | def test_create_connection(): 34 | pool = PooledPg( 35 | 1, 1, 0, False, None, None, False, 36 | 'PooledPgTestDB', user='PooledPgTestUser') 37 | assert hasattr(pool, '_cache') 38 | assert pool._cache.qsize() == 1 39 | assert hasattr(pool, '_maxusage') 40 | assert pool._maxusage is None 41 | assert hasattr(pool, '_setsession') 42 | assert pool._setsession is None 43 | assert hasattr(pool, '_reset') 44 | assert not pool._reset 45 | db_con = pool._cache.get(0) 46 | pool._cache.put(db_con, 0) 47 | assert isinstance(db_con, SteadyPgConnection) 48 | db = pool.connection() 49 | assert pool._cache.qsize() == 0 50 | assert hasattr(db, '_con') 51 | assert db._con == db_con 52 | assert hasattr(db, 'query') 53 | assert hasattr(db, 'num_queries') 54 | assert db.num_queries == 0 55 | assert hasattr(db, '_maxusage') 56 | assert db._maxusage == 0 57 | assert hasattr(db, '_setsession_sql') 58 | assert db._setsession_sql is None 59 | assert hasattr(db, 'dbname') 60 | assert db.dbname == 'PooledPgTestDB' 61 | assert hasattr(db, 'user') 62 | assert db.user == 'PooledPgTestUser' 63 | db.query('select test') 64 | assert db.num_queries == 1 65 | pool = PooledPg(1) 66 | db = pool.connection() 67 | assert hasattr(db, 'dbname') 68 | assert db.dbname is None 69 | assert hasattr(db, 'user') 70 | assert db.user is None 71 | assert hasattr(db, 'num_queries') 72 | assert db.num_queries == 0 73 | pool = PooledPg(0, 0, 0, False, 3, ('set datestyle',)) 74 | assert pool._maxusage == 3 75 | assert pool._setsession == ('set datestyle',) 76 | db = pool.connection() 77 | assert db._maxusage == 3 78 | assert db._setsession_sql == ('set datestyle',) 79 | 80 | 81 | def test_close_connection(): 82 | pool = PooledPg( 83 | 0, 1, 0, False, None, None, False, 84 | 'PooledPgTestDB', user='PooledPgTestUser') 85 | db = pool.connection() 86 | assert hasattr(db, '_con') 87 | db_con = db._con 88 | assert isinstance(db_con, SteadyPgConnection) 89 | assert hasattr(pool, '_cache') 90 | assert pool._cache.qsize() == 0 91 | assert db.num_queries == 0 92 | db.query('select test') 93 | assert db.num_queries == 1 94 | db.close() 95 | with pytest.raises(InvalidConnectionError): 96 | assert db.num_queries 97 | db = pool.connection() 98 | assert hasattr(db, 'dbname') 99 | assert db.dbname == 'PooledPgTestDB' 100 | assert hasattr(db, 'user') 101 | assert db.user == 'PooledPgTestUser' 102 | assert db.num_queries == 1 103 | db.query('select test') 104 | assert db.num_queries == 2 105 | db = pool.connection() 106 | assert pool._cache.qsize() == 1 107 | assert pool._cache.get(0) == db_con 108 | assert db 109 | del db 110 | 111 | 112 | def test_min_max_cached(): 113 | pool = PooledPg(3) 114 | assert hasattr(pool, '_cache') 115 | assert pool._cache.qsize() == 3 116 | cache = [pool.connection() for _i in range(3)] 117 | assert pool._cache.qsize() == 0 118 | for _i in range(3): 119 | cache.pop().close() 120 | assert pool._cache.qsize() == 3 121 | for _i in range(6): 122 | cache.append(pool.connection()) 123 | assert pool._cache.qsize() == 0 124 | for _i in range(6): 125 | cache.pop().close() 126 | assert pool._cache.qsize() == 6 127 | pool = PooledPg(3, 4) 128 | assert hasattr(pool, '_cache') 129 | assert pool._cache.qsize() == 3 130 | cache = [pool.connection() for _i in range(3)] 131 | assert pool._cache.qsize() == 0 132 | for _i in range(3): 133 | cache.pop().close() 134 | assert pool._cache.qsize() == 3 135 | for _i in range(6): 136 | cache.append(pool.connection()) 137 | assert pool._cache.qsize() == 0 138 | for _i in range(6): 139 | cache.pop().close() 140 | assert pool._cache.qsize() == 4 141 | pool = PooledPg(3, 2) 142 | assert hasattr(pool, '_cache') 143 | assert pool._cache.qsize() == 3 144 | cache = [pool.connection() for _i in range(4)] 145 | assert pool._cache.qsize() == 0 146 | for _i in range(4): 147 | cache.pop().close() 148 | assert pool._cache.qsize() == 3 149 | pool = PooledPg(2, 5) 150 | assert hasattr(pool, '_cache') 151 | assert pool._cache.qsize() == 2 152 | cache = [pool.connection() for _i in range(10)] 153 | assert pool._cache.qsize() == 0 154 | for _i in range(10): 155 | cache.pop().close() 156 | assert pool._cache.qsize() == 5 157 | 158 | 159 | def test_max_connections(): 160 | pool = PooledPg(1, 2, 3) 161 | assert pool._cache.qsize() == 1 162 | cache = [pool.connection() for _i in range(3)] 163 | assert pool._cache.qsize() == 0 164 | with pytest.raises(TooManyConnectionsError): 165 | pool.connection() 166 | pool = PooledPg(0, 1, 1, False) 167 | assert pool._blocking == 0 168 | assert pool._cache.qsize() == 0 169 | db = pool.connection() 170 | assert pool._cache.qsize() == 0 171 | with pytest.raises(TooManyConnectionsError): 172 | pool.connection() 173 | assert db 174 | del db 175 | assert cache 176 | del cache 177 | pool = PooledPg(1, 2, 1) 178 | assert pool._cache.qsize() == 1 179 | cache = [pool.connection()] 180 | assert pool._cache.qsize() == 0 181 | cache.append(pool.connection()) 182 | assert pool._cache.qsize() == 0 183 | with pytest.raises(TooManyConnectionsError): 184 | pool.connection() 185 | pool = PooledPg(3, 2, 1, False) 186 | assert pool._cache.qsize() == 3 187 | cache = [pool.connection() for _i in range(3)] 188 | assert len(cache) == 3 189 | assert pool._cache.qsize() == 0 190 | with pytest.raises(TooManyConnectionsError): 191 | pool.connection() 192 | pool = PooledPg(1, 1, 1, True) 193 | assert pool._blocking == 1 194 | assert pool._cache.qsize() == 1 195 | db = pool.connection() 196 | assert pool._cache.qsize() == 0 197 | 198 | def connection(): 199 | pool.connection().query('set thread') 200 | 201 | thread = Thread(target=connection) 202 | thread.start() 203 | thread.join(0.1) 204 | assert thread.is_alive() 205 | assert pool._cache.qsize() == 0 206 | session = db._con.session 207 | assert session == [] 208 | del db 209 | thread.join(0.1) 210 | assert not thread.is_alive() 211 | assert pool._cache.qsize() == 1 212 | db = pool.connection() 213 | assert pool._cache.qsize() == 0 214 | assert session == ['thread'] 215 | assert db 216 | del db 217 | 218 | 219 | def test_one_thread_two_connections(): 220 | pool = PooledPg(2) 221 | db1 = pool.connection() 222 | for _i in range(5): 223 | db1.query('select test') 224 | db2 = pool.connection() 225 | assert db1 != db2 226 | assert db1._con != db2._con 227 | for _i in range(7): 228 | db2.query('select test') 229 | assert db1.num_queries == 5 230 | assert db2.num_queries == 7 231 | del db1 232 | db1 = pool.connection() 233 | assert db1 != db2 234 | assert db1._con != db2._con 235 | assert hasattr(db1, 'query') 236 | for _i in range(3): 237 | db1.query('select test') 238 | assert db1.num_queries == 8 239 | db2.query('select test') 240 | assert db2.num_queries == 8 241 | 242 | 243 | def test_three_threads_two_connections(): 244 | pool = PooledPg(2, 2, 2, True) 245 | queue = Queue(3) 246 | 247 | def connection(): 248 | queue.put(pool.connection(), timeout=1) 249 | 250 | for _i in range(3): 251 | Thread(target=connection).start() 252 | db1 = queue.get(timeout=1) 253 | db2 = queue.get(timeout=1) 254 | db1_con = db1._con 255 | db2_con = db2._con 256 | assert db1 != db2 257 | assert db1_con != db2_con 258 | with pytest.raises(Empty): 259 | queue.get(timeout=0.1) 260 | del db1 261 | db1 = queue.get(timeout=1) 262 | assert db1 != db2 263 | assert db1._con != db2._con 264 | assert db1._con == db1_con 265 | 266 | 267 | def test_reset_transaction(): 268 | pool = PooledPg(1) 269 | db = pool.connection() 270 | db.begin() 271 | con = db._con 272 | assert con._transaction 273 | db.query('select test') 274 | assert con.num_queries == 1 275 | db.close() 276 | assert pool.connection()._con is con 277 | assert not con._transaction 278 | assert con.session == ['begin', 'rollback'] 279 | assert con.num_queries == 1 280 | pool = PooledPg(1, reset=1) 281 | db = pool.connection() 282 | db.begin() 283 | con = db._con 284 | assert con._transaction 285 | assert con.session == ['rollback', 'begin'] 286 | db.query('select test') 287 | assert con.num_queries == 1 288 | db.close() 289 | assert pool.connection()._con is con 290 | assert not con._transaction 291 | assert con.session == ['rollback', 'begin', 'rollback', 'rollback'] 292 | assert con.num_queries == 1 293 | pool = PooledPg(1, reset=2) 294 | db = pool.connection() 295 | db.begin() 296 | con = db._con 297 | assert con._transaction 298 | assert con.session == ['begin'] 299 | db.query('select test') 300 | assert con.num_queries == 1 301 | db.close() 302 | assert pool.connection()._con is con 303 | assert not con._transaction 304 | assert con.session == [] 305 | assert con.num_queries == 0 306 | 307 | 308 | def test_context_manager(): 309 | pool = PooledPg(1, 1, 1) 310 | with pool.connection() as db: 311 | db_con = db._con._con 312 | db.query('select test') 313 | assert db_con.num_queries == 1 314 | with pytest.raises(TooManyConnectionsError): 315 | pool.connection() 316 | with pool.connection() as db: 317 | db_con = db._con._con 318 | db.query('select test') 319 | assert db_con.num_queries == 2 320 | with pytest.raises(TooManyConnectionsError): 321 | pool.connection() 322 | -------------------------------------------------------------------------------- /tests/test_simple_pooled_db.py: -------------------------------------------------------------------------------- 1 | """Test the SimplePooledDB module. 2 | 3 | Note: 4 | We don't test performance here, so the test does not predicate 5 | whether SimplePooledDB actually will help in improving performance or not. 6 | We also do not test any real world DB-API 2 module, we just 7 | mock the basic connection functionality of an arbitrary module. 8 | 9 | Copyright and credit info: 10 | 11 | * This test was contributed by Christoph Zwerschke 12 | """ 13 | 14 | from queue import Empty, Queue 15 | from threading import Thread 16 | 17 | import pytest 18 | 19 | from dbutils import simple_pooled_db 20 | 21 | from . import mock_db as dbapi 22 | 23 | 24 | def my_db_pool(threadsafety, max_connections): 25 | """Get simple PooledDB connection.""" 26 | dbapi_threadsafety = dbapi.threadsafety 27 | dbapi.threadsafety = threadsafety 28 | try: 29 | return simple_pooled_db.PooledDB( 30 | dbapi, max_connections, 31 | 'SimplePooledDBTestDB', 'SimplePooledDBTestUser') 32 | finally: 33 | dbapi.threadsafety = dbapi_threadsafety 34 | 35 | 36 | def test_version(): 37 | from dbutils import __version__ 38 | assert simple_pooled_db.__version__ == __version__ 39 | assert simple_pooled_db.PooledDB.version == __version__ 40 | 41 | 42 | @pytest.mark.parametrize("threadsafety", [None, -1, 0, 4]) 43 | def test_no_threadsafety(threadsafety): 44 | with pytest.raises(simple_pooled_db.NotSupportedError): 45 | my_db_pool(threadsafety, 1) 46 | 47 | 48 | @pytest.mark.parametrize("threadsafety", [1, 2, 3]) 49 | def test_create_connection(threadsafety): 50 | dbpool = my_db_pool(threadsafety, 1) 51 | db = dbpool.connection() 52 | assert hasattr(db, 'cursor') 53 | assert hasattr(db, 'open_cursors') 54 | assert db.open_cursors == 0 55 | assert hasattr(db, 'database') 56 | assert db.database == 'SimplePooledDBTestDB' 57 | assert hasattr(db, 'user') 58 | assert db.user == 'SimplePooledDBTestUser' 59 | cursor = db.cursor() 60 | assert cursor is not None 61 | assert db.open_cursors == 1 62 | del cursor 63 | 64 | 65 | @pytest.mark.parametrize("threadsafety", [1, 2, 3]) 66 | def test_close_connection(threadsafety): 67 | db_pool = my_db_pool(threadsafety, 1) 68 | db = db_pool.connection() 69 | assert db.open_cursors == 0 70 | cursor1 = db.cursor() 71 | assert cursor1 is not None 72 | assert db.open_cursors == 1 73 | db.close() 74 | assert not hasattr(db, 'open_cursors') 75 | db = db_pool.connection() 76 | assert hasattr(db, 'database') 77 | assert db.database == 'SimplePooledDBTestDB' 78 | assert hasattr(db, 'user') 79 | assert db.user == 'SimplePooledDBTestUser' 80 | assert db.open_cursors == 1 81 | cursor2 = db.cursor() 82 | assert cursor2 is not None 83 | assert db.open_cursors == 2 84 | del cursor2 85 | del cursor1 86 | 87 | 88 | @pytest.mark.parametrize("threadsafety", [1, 2, 3]) 89 | def test_two_connections(threadsafety): 90 | db_pool = my_db_pool(threadsafety, 2) 91 | db1 = db_pool.connection() 92 | cursors1 = [db1.cursor() for _i_ in range(5)] 93 | db2 = db_pool.connection() 94 | assert db1 != db2 95 | cursors2 = [db2.cursor() for _i in range(7)] 96 | assert db1.open_cursors == 5 97 | assert db2.open_cursors == 7 98 | db1.close() 99 | db1 = db_pool.connection() 100 | assert db1 != db2 101 | assert hasattr(db1, 'cursor') 102 | for _i in range(3): 103 | cursors1.append(db1.cursor()) 104 | assert db1.open_cursors == 8 105 | cursors2.append(db2.cursor()) 106 | assert db2.open_cursors == 8 107 | del cursors2 108 | del cursors1 109 | 110 | 111 | def test_threadsafety_1(): 112 | db_pool = my_db_pool(1, 2) 113 | queue = Queue(3) 114 | 115 | def connection(): 116 | queue.put(db_pool.connection()) 117 | 118 | threads = [Thread(target=connection).start() for _i in range(3)] 119 | assert len(threads) == 3 120 | db1 = queue.get(timeout=1) 121 | db2 = queue.get(timeout=1) 122 | assert db1 != db2 123 | assert db1._con != db2._con 124 | with pytest.raises(Empty): 125 | queue.get(timeout=0.1) 126 | db2.close() 127 | db3 = queue.get(timeout=1) 128 | assert db1 != db3 129 | assert db1._con != db3._con 130 | 131 | 132 | @pytest.mark.parametrize("threadsafety", [2, 3]) 133 | def test_threadsafety_2(threadsafety): 134 | dbpool = my_db_pool(threadsafety, 2) 135 | db1 = dbpool.connection() 136 | db2 = dbpool.connection() 137 | cursors = [dbpool.connection().cursor() for _i in range(100)] 138 | assert db1.open_cursors == 50 139 | assert db2.open_cursors == 50 140 | assert cursors 141 | del cursors 142 | -------------------------------------------------------------------------------- /tests/test_simple_pooled_pg.py: -------------------------------------------------------------------------------- 1 | """Test the SimplePooledPg module. 2 | 3 | Note: 4 | We don't test performance here, so the test does not predicate 5 | whether SimplePooledPg actually will help in improving performance or not. 6 | 7 | 8 | Copyright and credit info: 9 | 10 | * This test was contributed by Christoph Zwerschke 11 | """ 12 | 13 | from queue import Empty, Queue 14 | from threading import Thread 15 | 16 | import pg # noqa: F401 17 | import pytest 18 | 19 | from dbutils import simple_pooled_pg 20 | 21 | 22 | def my_db_pool(max_connections): 23 | """Get simple PooledPg connection.""" 24 | return simple_pooled_pg.PooledPg( 25 | max_connections, 'SimplePooledPgTestDB', 'SimplePooledPgTestUser') 26 | 27 | 28 | def test_version(): 29 | from dbutils import __version__ 30 | assert simple_pooled_pg.__version__ == __version__ 31 | assert simple_pooled_pg.PooledPg.version == __version__ 32 | 33 | 34 | def test_create_connection(): 35 | db_pool = my_db_pool(1) 36 | db = db_pool.connection() 37 | assert hasattr(db, 'query') 38 | assert hasattr(db, 'num_queries') 39 | assert db.num_queries == 0 40 | assert hasattr(db, 'dbname') 41 | assert db.dbname == 'SimplePooledPgTestDB' 42 | assert hasattr(db, 'user') 43 | assert db.user == 'SimplePooledPgTestUser' 44 | db.query('select 1') 45 | assert db.num_queries == 1 46 | 47 | 48 | def test_close_connection(): 49 | db_pool = my_db_pool(1) 50 | db = db_pool.connection() 51 | assert db.num_queries == 0 52 | db.query('select 1') 53 | assert db.num_queries == 1 54 | db.close() 55 | assert not hasattr(db, 'num_queries') 56 | db = db_pool.connection() 57 | assert hasattr(db, 'dbname') 58 | assert db.dbname == 'SimplePooledPgTestDB' 59 | assert hasattr(db, 'user') 60 | assert db.user == 'SimplePooledPgTestUser' 61 | assert db.num_queries == 1 62 | db.query('select 1') 63 | assert db.num_queries == 2 64 | 65 | 66 | def test_two_connections(): 67 | db_pool = my_db_pool(2) 68 | db1 = db_pool.connection() 69 | for _i in range(5): 70 | db1.query('select 1') 71 | db2 = db_pool.connection() 72 | assert db1 != db2 73 | assert db1._con != db2._con 74 | for _i in range(7): 75 | db2.query('select 1') 76 | assert db1.num_queries == 5 77 | assert db2.num_queries == 7 78 | db1.close() 79 | db1 = db_pool.connection() 80 | assert db1 != db2 81 | assert db1._con != db2._con 82 | assert hasattr(db1, 'query') 83 | for _i in range(3): 84 | db1.query('select 1') 85 | assert db1.num_queries == 8 86 | db2.query('select 1') 87 | assert db2.num_queries == 8 88 | 89 | 90 | def test_threads(): 91 | db_pool = my_db_pool(2) 92 | queue = Queue(3) 93 | 94 | def connection(): 95 | queue.put(db_pool.connection()) 96 | 97 | threads = [Thread(target=connection).start() for _i in range(3)] 98 | assert len(threads) == 3 99 | db1 = queue.get(timeout=1) 100 | db2 = queue.get(timeout=1) 101 | assert db1 != db2 102 | assert db1._con != db2._con 103 | with pytest.raises(Empty): 104 | queue.get(timeout=0.1) 105 | db2.close() 106 | db3 = queue.get(timeout=1) 107 | assert db1 != db3 108 | assert db1._con != db3._con 109 | -------------------------------------------------------------------------------- /tests/test_steady_db.py: -------------------------------------------------------------------------------- 1 | """Test the SteadyDB module. 2 | 3 | Note: 4 | We do not test any real DB-API 2 module, but we just 5 | mock the basic DB-API 2 connection functionality. 6 | 7 | Copyright and credit info: 8 | 9 | * This test was contributed by Christoph Zwerschke 10 | """ 11 | 12 | import pytest 13 | 14 | from dbutils.steady_db import SteadyDBConnection, SteadyDBCursor 15 | from dbutils.steady_db import connect as steady_db_connect 16 | 17 | from . import mock_db as dbapi 18 | 19 | 20 | def test_version(): 21 | from dbutils import __version__, steady_db 22 | assert steady_db.__version__ == __version__ 23 | assert steady_db.SteadyDBConnection.version == __version__ 24 | 25 | 26 | def test_mocked_connection(): 27 | db = dbapi.connect( 28 | 'SteadyDBTestDB', user='SteadyDBTestUser') 29 | db.__class__.has_ping = False 30 | db.__class__.num_pings = 0 31 | assert hasattr(db, 'database') 32 | assert db.database == 'SteadyDBTestDB' 33 | assert hasattr(db, 'user') 34 | assert db.user == 'SteadyDBTestUser' 35 | assert hasattr(db, 'cursor') 36 | assert hasattr(db, 'close') 37 | assert hasattr(db, 'open_cursors') 38 | assert hasattr(db, 'num_uses') 39 | assert hasattr(db, 'num_queries') 40 | assert hasattr(db, 'session') 41 | assert hasattr(db, 'valid') 42 | assert db.valid 43 | assert db.open_cursors == 0 44 | for _i in range(3): 45 | cursor = db.cursor() 46 | assert db.open_cursors == 1 47 | cursor.close() 48 | assert db.open_cursors == 0 49 | cursor = [] 50 | for i in range(3): 51 | cursor.append(db.cursor()) 52 | assert db.open_cursors == i + 1 53 | del cursor 54 | assert db.open_cursors == 0 55 | cursor = db.cursor() 56 | assert hasattr(cursor, 'execute') 57 | assert hasattr(cursor, 'fetchone') 58 | assert hasattr(cursor, 'callproc') 59 | assert hasattr(cursor, 'close') 60 | assert hasattr(cursor, 'valid') 61 | assert cursor.valid 62 | assert db.open_cursors == 1 63 | for i in range(3): 64 | assert db.num_uses == i 65 | assert db.num_queries == i 66 | cursor.execute(f'select test{i}') 67 | assert cursor.fetchone() == f'test{i}' 68 | assert cursor.valid 69 | assert db.open_cursors == 1 70 | for _i in range(4): 71 | cursor.callproc('test') 72 | cursor.close() 73 | assert not cursor.valid 74 | assert db.open_cursors == 0 75 | assert db.num_uses == 7 76 | assert db.num_queries == 3 77 | with pytest.raises(dbapi.InternalError): 78 | cursor.close() 79 | with pytest.raises(dbapi.InternalError): 80 | cursor.execute('select test') 81 | assert db.valid 82 | assert not db.__class__.has_ping 83 | assert db.__class__.num_pings == 0 84 | with pytest.raises(AttributeError): 85 | db.ping() 86 | assert db.__class__.num_pings == 1 87 | db.__class__.has_ping = True 88 | assert db.ping() is None 89 | assert db.__class__.num_pings == 2 90 | db.close() 91 | assert not db.valid 92 | assert db.num_uses == 0 93 | assert db.num_queries == 0 94 | with pytest.raises(dbapi.InternalError): 95 | db.close() 96 | with pytest.raises(dbapi.InternalError): 97 | db.cursor() 98 | with pytest.raises(dbapi.OperationalError): 99 | db.ping() 100 | assert db.__class__.num_pings == 3 101 | db.__class__.has_ping = False 102 | db.__class__.num_pings = 0 103 | 104 | 105 | def test_broken_connection(): 106 | with pytest.raises(TypeError): 107 | SteadyDBConnection(None) 108 | with pytest.raises(TypeError): 109 | SteadyDBCursor(None) 110 | db = steady_db_connect(dbapi, database='ok') 111 | for _i in range(3): 112 | db.close() 113 | del db 114 | with pytest.raises(dbapi.OperationalError): 115 | steady_db_connect(dbapi, database='error') 116 | db = steady_db_connect(dbapi, database='ok') 117 | cursor = db.cursor() 118 | for _i in range(3): 119 | cursor.close() 120 | cursor = db.cursor('ok') 121 | for _i in range(3): 122 | cursor.close() 123 | with pytest.raises(dbapi.OperationalError): 124 | db.cursor('error') 125 | 126 | 127 | @pytest.mark.parametrize("closeable", [False, True]) 128 | def test_close(closeable): 129 | db = steady_db_connect(dbapi, closeable=closeable) 130 | assert db._con.valid 131 | db.close() 132 | assert closeable ^ db._con.valid 133 | db.close() 134 | assert closeable ^ db._con.valid 135 | db._close() 136 | assert not db._con.valid 137 | db._close() 138 | assert not db._con.valid 139 | 140 | 141 | def test_connection(): # noqa: PLR0915 142 | db = steady_db_connect( 143 | dbapi, 0, None, None, None, True, 144 | 'SteadyDBTestDB', user='SteadyDBTestUser') 145 | assert isinstance(db, SteadyDBConnection) 146 | assert hasattr(db, '_con') 147 | assert hasattr(db, '_usage') 148 | assert db._usage == 0 149 | assert hasattr(db._con, 'valid') 150 | assert db._con.valid 151 | assert hasattr(db._con, 'cursor') 152 | assert hasattr(db._con, 'close') 153 | assert hasattr(db._con, 'open_cursors') 154 | assert hasattr(db._con, 'num_uses') 155 | assert hasattr(db._con, 'num_queries') 156 | assert hasattr(db._con, 'session') 157 | assert hasattr(db._con, 'database') 158 | assert db._con.database == 'SteadyDBTestDB' 159 | assert hasattr(db._con, 'user') 160 | assert db._con.user == 'SteadyDBTestUser' 161 | assert hasattr(db, 'cursor') 162 | assert hasattr(db, 'close') 163 | assert db._con.open_cursors == 0 164 | for _i in range(3): 165 | cursor = db.cursor() 166 | assert db._con.open_cursors == 1 167 | cursor.close() 168 | assert db._con.open_cursors == 0 169 | cursor = [] 170 | for i in range(3): 171 | cursor.append(db.cursor()) 172 | assert db._con.open_cursors == i + 1 173 | del cursor 174 | assert db._con.open_cursors == 0 175 | cursor = db.cursor() 176 | assert hasattr(cursor, 'execute') 177 | assert hasattr(cursor, 'fetchone') 178 | assert hasattr(cursor, 'callproc') 179 | assert hasattr(cursor, 'close') 180 | assert hasattr(cursor, 'valid') 181 | assert cursor.valid 182 | assert db._con.open_cursors == 1 183 | for i in range(3): 184 | assert db._usage == i 185 | assert db._con.num_uses == i 186 | assert db._con.num_queries == i 187 | cursor.execute(f'select test{i}') 188 | assert cursor.fetchone() == f'test{i}' 189 | assert cursor.valid 190 | assert db._con.open_cursors == 1 191 | for _i in range(4): 192 | cursor.callproc('test') 193 | cursor.close() 194 | assert not cursor.valid 195 | assert db._con.open_cursors == 0 196 | assert db._usage == 7 197 | assert db._con.num_uses == 7 198 | assert db._con.num_queries == 3 199 | cursor.close() 200 | cursor.execute('select test8') 201 | assert cursor.valid 202 | assert db._con.open_cursors == 1 203 | assert cursor.fetchone() == 'test8' 204 | assert db._usage == 8 205 | assert db._con.num_uses == 8 206 | assert db._con.num_queries == 4 207 | assert db._con.valid 208 | db.close() 209 | assert not db._con.valid 210 | assert db._con.open_cursors == 0 211 | assert db._usage == 8 212 | assert db._con.num_uses == 0 213 | assert db._con.num_queries == 0 214 | with pytest.raises(dbapi.InternalError): 215 | db._con.close() 216 | db.close() 217 | with pytest.raises(dbapi.InternalError): 218 | db._con.cursor() 219 | cursor = db.cursor() 220 | assert db._con.valid 221 | cursor.execute('select test11') 222 | assert cursor.fetchone() == 'test11' 223 | cursor.execute('select test12') 224 | assert cursor.fetchone() == 'test12' 225 | cursor.callproc('test') 226 | assert db._usage == 3 227 | assert db._con.num_uses == 3 228 | assert db._con.num_queries == 2 229 | cursor2 = db.cursor() 230 | assert db._con.open_cursors == 2 231 | cursor2.execute('select test13') 232 | assert cursor2.fetchone() == 'test13' 233 | assert db._con.num_queries == 3 234 | db.close() 235 | assert db._con.open_cursors == 0 236 | assert db._con.num_queries == 0 237 | cursor = db.cursor() 238 | assert cursor.valid 239 | cursor.callproc('test') 240 | cursor._cursor.valid = False 241 | assert not cursor.valid 242 | with pytest.raises(dbapi.InternalError): 243 | cursor._cursor.callproc('test') 244 | cursor.callproc('test') 245 | assert cursor.valid 246 | cursor._cursor.callproc('test') 247 | assert db._usage == 2 248 | assert db._con.num_uses == 3 249 | db._con.valid = cursor._cursor.valid = False 250 | cursor.callproc('test') 251 | assert cursor.valid 252 | assert db._usage == 1 253 | assert db._con.num_uses == 1 254 | cursor.execute('set this') 255 | db.commit() 256 | cursor.execute('set that') 257 | db.rollback() 258 | assert db._con.session == ['this', 'commit', 'that', 'rollback'] 259 | 260 | 261 | def test_connection_context_handler(): 262 | db = steady_db_connect( 263 | dbapi, 0, None, None, None, True, 264 | 'SteadyDBTestDB', user='SteadyDBTestUser') 265 | assert db._con.session == [] 266 | with db as con: 267 | con.cursor().execute('select test') 268 | assert db._con.session == ['commit'] 269 | try: 270 | with db as con: 271 | con.cursor().execute('error') 272 | except dbapi.ProgrammingError: 273 | error = True 274 | else: 275 | error = False 276 | assert error 277 | assert db._con.session == ['commit', 'rollback'] 278 | 279 | 280 | def test_cursor_context_handler(): 281 | db = steady_db_connect( 282 | dbapi, 0, None, None, None, True, 283 | 'SteadyDBTestDB', user='SteadyDBTestUser') 284 | assert db._con.open_cursors == 0 285 | with db.cursor() as cursor: 286 | assert db._con.open_cursors == 1 287 | cursor.execute('select test') 288 | assert cursor.fetchone() == 'test' 289 | assert db._con.open_cursors == 0 290 | 291 | 292 | def test_cursor_as_iterator_provided(): 293 | db = steady_db_connect( 294 | dbapi, 0, None, None, None, True, 295 | 'SteadyDBTestDB', user='SteadyDBTestUser') 296 | assert db._con.open_cursors == 0 297 | cursor = db.cursor() 298 | assert db._con.open_cursors == 1 299 | cursor.execute('select test') 300 | _cursor = cursor._cursor 301 | try: 302 | assert not hasattr(_cursor, 'iter') 303 | _cursor.__iter__ = lambda: ['test-iter'] 304 | assert list(iter(cursor)) == ['test'] 305 | finally: 306 | del _cursor.__iter__ 307 | cursor.close() 308 | assert db._con.open_cursors == 0 309 | 310 | 311 | def test_cursor_as_iterator_created(): 312 | db = steady_db_connect( 313 | dbapi, 0, None, None, None, True, 314 | 'SteadyDBTestDB', user='SteadyDBTestUser') 315 | assert db._con.open_cursors == 0 316 | cursor = db.cursor() 317 | assert db._con.open_cursors == 1 318 | cursor.execute('select test') 319 | assert list(iter(cursor)) == ['test'] 320 | cursor.close() 321 | assert db._con.open_cursors == 0 322 | 323 | 324 | def test_connection_creator_function(): 325 | db1 = steady_db_connect( 326 | dbapi, 0, None, None, None, True, 327 | 'SteadyDBTestDB', user='SteadyDBTestUser') 328 | db2 = steady_db_connect( 329 | dbapi.connect, 0, None, None, None, True, 330 | 'SteadyDBTestDB', user='SteadyDBTestUser') 331 | assert db1.dbapi() == db2.dbapi() 332 | assert db1.threadsafety() == db2.threadsafety() 333 | assert db1._creator == db2._creator 334 | assert db1._args == db2._args 335 | assert db1._kwargs == db2._kwargs 336 | db2.close() 337 | db1.close() 338 | 339 | 340 | def test_connection_maxusage(): 341 | db = steady_db_connect(dbapi, 10) 342 | cursor = db.cursor() 343 | for i in range(100): 344 | cursor.execute(f'select test{i}') 345 | r = cursor.fetchone() 346 | assert r == f'test{i}' 347 | assert db._con.valid 348 | j = i % 10 + 1 349 | assert db._usage == j 350 | assert db._con.num_uses == j 351 | assert db._con.num_queries == j 352 | assert db._con.open_cursors == 1 353 | db.begin() 354 | for i in range(100): 355 | cursor.callproc('test') 356 | assert db._con.valid 357 | if i == 49: 358 | db.commit() 359 | j = i % 10 + 1 if i > 49 else i + 11 360 | assert db._usage == j 361 | assert db._con.num_uses == j 362 | j = 0 if i > 49 else 10 363 | assert db._con.num_queries == j 364 | for i in range(10): 365 | if i == 7: 366 | db._con.valid = cursor._cursor.valid = False 367 | cursor.execute(f'select test{i}') 368 | r = cursor.fetchone() 369 | assert r == f'test{i}' 370 | j = i % 7 + 1 371 | assert db._usage == j 372 | assert db._con.num_uses == j 373 | assert db._con.num_queries == j 374 | for i in range(10): 375 | if i == 5: 376 | db._con.valid = cursor._cursor.valid = False 377 | cursor.callproc('test') 378 | j = (i + (3 if i < 5 else -5)) % 10 + 1 379 | assert db._usage == j 380 | assert db._con.num_uses == j 381 | j = 3 if i < 5 else 0 382 | assert db._con.num_queries == j 383 | db.close() 384 | cursor.execute('select test1') 385 | assert cursor.fetchone() == 'test1' 386 | assert db._usage == 1 387 | assert db._con.num_uses == 1 388 | assert db._con.num_queries == 1 389 | 390 | 391 | def test_connection_setsession(): 392 | db = steady_db_connect(dbapi, 3, ('set time zone', 'set datestyle')) 393 | assert hasattr(db, '_usage') 394 | assert db._usage == 0 395 | assert hasattr(db._con, 'open_cursors') 396 | assert db._con.open_cursors == 0 397 | assert hasattr(db._con, 'num_uses') 398 | assert db._con.num_uses == 2 399 | assert hasattr(db._con, 'num_queries') 400 | assert db._con.num_queries == 0 401 | assert hasattr(db._con, 'session') 402 | assert tuple(db._con.session) == ('time zone', 'datestyle') 403 | for _i in range(11): 404 | db.cursor().execute('select test') 405 | assert db._con.open_cursors == 0 406 | assert db._usage == 2 407 | assert db._con.num_uses == 4 408 | assert db._con.num_queries == 2 409 | assert db._con.session == ['time zone', 'datestyle'] 410 | db.cursor().execute('set test') 411 | assert db._con.open_cursors == 0 412 | assert db._usage == 3 413 | assert db._con.num_uses == 5 414 | assert db._con.num_queries == 2 415 | assert db._con.session == ['time zone', 'datestyle', 'test'] 416 | db.cursor().execute('select test') 417 | assert db._con.open_cursors == 0 418 | assert db._usage == 1 419 | assert db._con.num_uses == 3 420 | assert db._con.num_queries == 1 421 | assert db._con.session == ['time zone', 'datestyle'] 422 | db.cursor().execute('set test') 423 | assert db._con.open_cursors == 0 424 | assert db._usage == 2 425 | assert db._con.num_uses == 4 426 | assert db._con.num_queries == 1 427 | assert db._con.session == ['time zone', 'datestyle', 'test'] 428 | db.cursor().execute('select test') 429 | assert db._con.open_cursors == 0 430 | assert db._usage == 3 431 | assert db._con.num_uses == 5 432 | assert db._con.num_queries == 2 433 | assert db._con.session == ['time zone', 'datestyle', 'test'] 434 | db.close() 435 | db.cursor().execute('set test') 436 | assert db._con.open_cursors == 0 437 | assert db._usage == 1 438 | assert db._con.num_uses == 3 439 | assert db._con.num_queries == 0 440 | assert db._con.session == ['time zone', 'datestyle', 'test'] 441 | db.close() 442 | db.cursor().execute('select test') 443 | assert db._con.open_cursors == 0 444 | assert db._usage == 1 445 | assert db._con.num_uses == 3 446 | assert db._con.num_queries == 1 447 | assert db._con.session == ['time zone', 'datestyle'] 448 | 449 | 450 | def test_connection_failures(): 451 | db = steady_db_connect(dbapi) 452 | db.close() 453 | db.cursor() 454 | db = steady_db_connect(dbapi, failures=dbapi.InternalError) 455 | db.close() 456 | db.cursor() 457 | db = steady_db_connect(dbapi, failures=dbapi.OperationalError) 458 | db.close() 459 | with pytest.raises(dbapi.InternalError): 460 | db.cursor() 461 | db = steady_db_connect(dbapi, failures=( 462 | dbapi.OperationalError, dbapi.InterfaceError)) 463 | db.close() 464 | with pytest.raises(dbapi.InternalError): 465 | db.cursor() 466 | db = steady_db_connect(dbapi, failures=( 467 | dbapi.OperationalError, dbapi.InterfaceError, dbapi.InternalError)) 468 | db.close() 469 | db.cursor() 470 | 471 | 472 | def test_connection_failure_error(): 473 | db = steady_db_connect(dbapi) 474 | cursor = db.cursor() 475 | db.close() 476 | cursor.execute('select test') 477 | cursor = db.cursor() 478 | db.close() 479 | with pytest.raises(dbapi.ProgrammingError): 480 | cursor.execute('error') 481 | 482 | 483 | def test_connection_set_sizes(): 484 | db = steady_db_connect(dbapi) 485 | cursor = db.cursor() 486 | cursor.execute('get sizes') 487 | result = cursor.fetchone() 488 | assert result == ([], {}) 489 | cursor.setinputsizes([7, 42, 6]) 490 | cursor.setoutputsize(9) 491 | cursor.setoutputsize(15, 3) 492 | cursor.setoutputsize(42, 7) 493 | cursor.execute('get sizes') 494 | result = cursor.fetchone() 495 | assert result == ([7, 42, 6], {None: 9, 3: 15, 7: 42}) 496 | cursor.execute('get sizes') 497 | result = cursor.fetchone() 498 | assert result == ([], {}) 499 | cursor.setinputsizes([6, 42, 7]) 500 | cursor.setoutputsize(7) 501 | cursor.setoutputsize(15, 3) 502 | cursor.setoutputsize(42, 9) 503 | db.close() 504 | cursor.execute('get sizes') 505 | result = cursor.fetchone() 506 | assert result == ([6, 42, 7], {None: 7, 3: 15, 9: 42}) 507 | 508 | 509 | def test_connection_ping_check(): 510 | con_cls = dbapi.Connection 511 | con_cls.has_ping = False 512 | con_cls.num_pings = 0 513 | db = steady_db_connect(dbapi) 514 | db.cursor().execute('select test') 515 | assert con_cls.num_pings == 0 516 | db.close() 517 | db.cursor().execute('select test') 518 | assert con_cls.num_pings == 0 519 | assert db._ping_check() is None 520 | assert con_cls.num_pings == 1 521 | db = steady_db_connect(dbapi, ping=7) 522 | db.cursor().execute('select test') 523 | assert con_cls.num_pings == 2 524 | db.close() 525 | db.cursor().execute('select test') 526 | assert con_cls.num_pings == 2 527 | assert db._ping_check() is None 528 | assert con_cls.num_pings == 2 529 | con_cls.has_ping = True 530 | db = steady_db_connect(dbapi) 531 | db.cursor().execute('select test') 532 | assert con_cls.num_pings == 2 533 | db.close() 534 | db.cursor().execute('select test') 535 | assert con_cls.num_pings == 2 536 | assert db._ping_check() 537 | assert con_cls.num_pings == 3 538 | db = steady_db_connect(dbapi, ping=1) 539 | db.cursor().execute('select test') 540 | assert con_cls.num_pings == 3 541 | db.close() 542 | db.cursor().execute('select test') 543 | assert con_cls.num_pings == 3 544 | assert db._ping_check() 545 | assert con_cls.num_pings == 4 546 | db.close() 547 | assert db._ping_check() 548 | assert con_cls.num_pings == 5 549 | db = steady_db_connect(dbapi, ping=7) 550 | db.cursor().execute('select test') 551 | assert con_cls.num_pings == 7 552 | db.close() 553 | db.cursor().execute('select test') 554 | assert con_cls.num_pings == 9 555 | db = steady_db_connect(dbapi, ping=3) 556 | assert con_cls.num_pings == 9 557 | db.cursor() 558 | assert con_cls.num_pings == 10 559 | db.close() 560 | cursor = db.cursor() 561 | assert con_cls.num_pings == 11 562 | cursor.execute('select test') 563 | assert con_cls.num_pings == 11 564 | db = steady_db_connect(dbapi, ping=5) 565 | assert con_cls.num_pings == 11 566 | db.cursor() 567 | assert con_cls.num_pings == 11 568 | db.close() 569 | cursor = db.cursor() 570 | assert con_cls.num_pings == 11 571 | cursor.execute('select test') 572 | assert con_cls.num_pings == 12 573 | db.close() 574 | cursor = db.cursor() 575 | assert con_cls.num_pings == 12 576 | cursor.execute('select test') 577 | assert con_cls.num_pings == 13 578 | db = steady_db_connect(dbapi, ping=7) 579 | assert con_cls.num_pings == 13 580 | db.cursor() 581 | assert con_cls.num_pings == 14 582 | db.close() 583 | cursor = db.cursor() 584 | assert con_cls.num_pings == 15 585 | cursor.execute('select test') 586 | assert con_cls.num_pings == 16 587 | db.close() 588 | cursor = db.cursor() 589 | assert con_cls.num_pings == 17 590 | cursor.execute('select test') 591 | assert con_cls.num_pings == 18 592 | db.close() 593 | cursor.execute('select test') 594 | assert con_cls.num_pings == 20 595 | con_cls.has_ping = False 596 | con_cls.num_pings = 0 597 | 598 | 599 | def test_begin_transaction(): 600 | db = steady_db_connect(dbapi, database='ok') 601 | cursor = db.cursor() 602 | cursor.close() 603 | cursor.execute('select test12') 604 | assert cursor.fetchone() == 'test12' 605 | db.begin() 606 | cursor = db.cursor() 607 | cursor.close() 608 | with pytest.raises(dbapi.InternalError): 609 | cursor.execute('select test12') 610 | cursor.execute('select test12') 611 | assert cursor.fetchone() == 'test12' 612 | db.close() 613 | db.begin() 614 | with pytest.raises(dbapi.InternalError): 615 | cursor.execute('select test12') 616 | cursor.execute('select test12') 617 | assert cursor.fetchone() == 'test12' 618 | db.begin() 619 | with pytest.raises(dbapi.ProgrammingError): 620 | cursor.execute('error') 621 | cursor.close() 622 | cursor.execute('select test12') 623 | assert cursor.fetchone() == 'test12' 624 | 625 | 626 | def test_with_begin_extension(): 627 | db = steady_db_connect(dbapi, database='ok') 628 | db._con._begin_called_with = None 629 | 630 | def begin(a, b=None, c=7): 631 | db._con._begin_called_with = (a, b, c) 632 | 633 | db._con.begin = begin 634 | db.begin(42, 6) 635 | cursor = db.cursor() 636 | cursor.execute('select test13') 637 | assert cursor.fetchone() == 'test13' 638 | assert db._con._begin_called_with == (42, 6, 7) 639 | 640 | 641 | def test_cancel_transaction(): 642 | db = steady_db_connect(dbapi, database='ok') 643 | cursor = db.cursor() 644 | db.begin() 645 | cursor.execute('select test14') 646 | assert cursor.fetchone() == 'test14' 647 | db.cancel() 648 | cursor.execute('select test14') 649 | assert cursor.fetchone() == 'test14' 650 | 651 | 652 | def test_with_cancel_extension(): 653 | db = steady_db_connect(dbapi, database='ok') 654 | db._con._cancel_called = None 655 | 656 | def cancel(): 657 | db._con._cancel_called = 'yes' 658 | 659 | db._con.cancel = cancel 660 | db.begin() 661 | cursor = db.cursor() 662 | cursor.execute('select test15') 663 | assert cursor.fetchone() == 'test15' 664 | db.cancel() 665 | assert db._con._cancel_called == 'yes' 666 | 667 | 668 | def test_reset_transaction(): 669 | db = steady_db_connect(dbapi, database='ok') 670 | db.begin() 671 | assert not db._con.session 672 | db.close() 673 | assert not db._con.session 674 | db = steady_db_connect(dbapi, database='ok', closeable=False) 675 | db.begin() 676 | assert not db._con.session 677 | db.close() 678 | assert db._con.session == ['rollback'] 679 | 680 | 681 | def test_commit_error(): 682 | db = steady_db_connect(dbapi, database='ok') 683 | db.begin() 684 | assert not db._con.session 685 | assert db._con.valid 686 | db.commit() 687 | assert db._con.session == ['commit'] 688 | assert db._con.valid 689 | db.begin() 690 | db._con.valid = False 691 | con = db._con 692 | with pytest.raises(dbapi.InternalError): 693 | db.commit() 694 | assert not db._con.session 695 | assert db._con.valid 696 | assert con is not db._con 697 | db.begin() 698 | assert not db._con.session 699 | assert db._con.valid 700 | db.commit() 701 | assert db._con.session == ['commit'] 702 | assert db._con.valid 703 | 704 | 705 | def test_rollback_error(): 706 | db = steady_db_connect(dbapi, database='ok') 707 | db.begin() 708 | assert not db._con.session 709 | assert db._con.valid 710 | db.rollback() 711 | assert db._con.session == ['rollback'] 712 | assert db._con.valid 713 | db.begin() 714 | db._con.valid = False 715 | con = db._con 716 | with pytest.raises(dbapi.InternalError): 717 | db.rollback() 718 | assert not db._con.session 719 | assert db._con.valid 720 | assert con is not db._con 721 | db.begin() 722 | assert not db._con.session 723 | assert db._con.valid 724 | db.rollback() 725 | assert db._con.session == ['rollback'] 726 | assert db._con.valid 727 | -------------------------------------------------------------------------------- /tests/test_steady_pg.py: -------------------------------------------------------------------------------- 1 | """Test the SteadyPg module. 2 | 3 | Note: 4 | We do not test the real PyGreSQL module, but we just 5 | mock the basic connection functionality of that module. 6 | We assume that the PyGreSQL module will detect lost 7 | connections correctly and set the status flag accordingly. 8 | 9 | Copyright and credit info: 10 | 11 | * This test was contributed by Christoph Zwerschke 12 | """ 13 | 14 | import sys 15 | 16 | import pg 17 | import pytest 18 | 19 | from dbutils.steady_pg import SteadyPgConnection 20 | 21 | 22 | def test_version(): 23 | from dbutils import __version__, steady_pg 24 | assert steady_pg.__version__ == __version__ 25 | assert steady_pg.SteadyPgConnection.version == __version__ 26 | 27 | 28 | def test_mocked_connection(): 29 | db_cls = pg.DB 30 | db = db_cls( 31 | 'SteadyPgTestDB', user='SteadyPgTestUser') 32 | assert hasattr(db, 'db') 33 | assert hasattr(db.db, 'status') 34 | assert db.db.status 35 | assert hasattr(db.db, 'query') 36 | assert hasattr(db.db, 'close') 37 | assert not hasattr(db.db, 'reopen') 38 | assert hasattr(db, 'reset') 39 | assert hasattr(db.db, 'num_queries') 40 | assert hasattr(db.db, 'session') 41 | assert not hasattr(db.db, 'get_tables') 42 | assert hasattr(db.db, 'db') 43 | assert db.db.db == 'SteadyPgTestDB' 44 | assert hasattr(db.db, 'user') 45 | assert db.db.user == 'SteadyPgTestUser' 46 | assert hasattr(db, 'query') 47 | assert hasattr(db, 'close') 48 | assert hasattr(db, 'reopen') 49 | assert hasattr(db, 'reset') 50 | assert hasattr(db, 'num_queries') 51 | assert hasattr(db, 'session') 52 | assert hasattr(db, 'get_tables') 53 | assert hasattr(db, 'dbname') 54 | assert db.dbname == 'SteadyPgTestDB' 55 | assert hasattr(db, 'user') 56 | assert db.user == 'SteadyPgTestUser' 57 | for i in range(3): 58 | assert db.num_queries == i 59 | assert db.query(f'select test{i}') == f'test{i}' 60 | assert db.db.status 61 | db.reopen() 62 | assert db.db.status 63 | assert db.num_queries == 0 64 | assert db.query('select test4') == 'test4' 65 | assert db.get_tables() == 'test' 66 | db.close() 67 | try: 68 | status = db.db.status 69 | except AttributeError: 70 | status = False 71 | assert not status 72 | with pytest.raises(pg.InternalError): 73 | db.close() 74 | with pytest.raises(pg.InternalError): 75 | db.query('select test') 76 | with pytest.raises(pg.InternalError): 77 | db.get_tables() 78 | 79 | 80 | def test_broken_connection(): 81 | with pytest.raises(TypeError): 82 | SteadyPgConnection('wrong') 83 | db = SteadyPgConnection(dbname='ok') 84 | internal_error_cls = sys.modules[db._con.__module__].InternalError 85 | for _i in range(3): 86 | db.close() 87 | del db 88 | with pytest.raises(internal_error_cls): 89 | SteadyPgConnection(dbname='error') 90 | 91 | 92 | @pytest.mark.parametrize("closeable", [False, True]) 93 | def test_close(closeable): 94 | db = SteadyPgConnection(closeable=closeable) 95 | assert db._con.db 96 | assert db._con.valid is True 97 | db.close() 98 | assert closeable ^ (db._con.db is not None and db._con.valid) 99 | db.close() 100 | assert closeable ^ (db._con.db is not None and db._con.valid) 101 | db._close() 102 | assert not db._con.db 103 | db._close() 104 | assert not db._con.db 105 | 106 | 107 | def test_connection(): 108 | db = SteadyPgConnection( 109 | 0, None, 1, 'SteadyPgTestDB', user='SteadyPgTestUser') 110 | assert hasattr(db, 'db') 111 | assert hasattr(db, '_con') 112 | assert db.db == db._con.db 113 | assert hasattr(db, '_usage') 114 | assert db._usage == 0 115 | assert hasattr(db.db, 'status') 116 | assert db.db.status 117 | assert hasattr(db.db, 'query') 118 | assert hasattr(db.db, 'close') 119 | assert not hasattr(db.db, 'reopen') 120 | assert hasattr(db.db, 'reset') 121 | assert hasattr(db.db, 'num_queries') 122 | assert hasattr(db.db, 'session') 123 | assert hasattr(db.db, 'db') 124 | assert db.db.db == 'SteadyPgTestDB' 125 | assert hasattr(db.db, 'user') 126 | assert db.db.user == 'SteadyPgTestUser' 127 | assert not hasattr(db.db, 'get_tables') 128 | assert hasattr(db, 'query') 129 | assert hasattr(db, 'close') 130 | assert hasattr(db, 'reopen') 131 | assert hasattr(db, 'reset') 132 | assert hasattr(db, 'num_queries') 133 | assert hasattr(db, 'session') 134 | assert hasattr(db, 'dbname') 135 | assert db.dbname == 'SteadyPgTestDB' 136 | assert hasattr(db, 'user') 137 | assert db.user == 'SteadyPgTestUser' 138 | assert hasattr(db, 'get_tables') 139 | for i in range(3): 140 | assert db._usage == i 141 | assert db.num_queries == i 142 | assert db.query(f'select test{i}') == f'test{i}' 143 | assert db.db.status 144 | assert db.get_tables() == 'test' 145 | assert db.db.status 146 | assert db._usage == 4 147 | assert db.num_queries == 3 148 | db.reopen() 149 | assert db.db.status 150 | assert db._usage == 0 151 | assert db.num_queries == 0 152 | assert db.query('select test') == 'test' 153 | assert db.db.status 154 | assert hasattr(db._con, 'status') 155 | assert db._con.status 156 | assert hasattr(db._con, 'close') 157 | assert hasattr(db._con, 'query') 158 | db.close() 159 | try: 160 | status = db.db.status 161 | except AttributeError: 162 | status = False 163 | assert not status 164 | assert hasattr(db._con, 'close') 165 | assert hasattr(db._con, 'query') 166 | internal_error_cls = sys.modules[db._con.__module__].InternalError 167 | with pytest.raises(internal_error_cls): 168 | db._con.close() 169 | with pytest.raises(internal_error_cls): 170 | db._con.query('select test') 171 | assert db.query('select test') == 'test' 172 | assert db.db.status 173 | assert db._usage == 1 174 | assert db.num_queries == 1 175 | db.db.status = False 176 | assert not db.db.status 177 | assert db.query('select test') == 'test' 178 | assert db.db.status 179 | assert db._usage == 1 180 | assert db.num_queries == 1 181 | db.db.status = False 182 | assert not db.db.status 183 | assert db.get_tables() == 'test' 184 | assert db.db.status 185 | assert db._usage == 1 186 | assert db.num_queries == 0 187 | 188 | 189 | def test_connection_context_handler(): 190 | db = SteadyPgConnection( 191 | 0, None, 1, 'SteadyPgTestDB', user='SteadyPgTestUser') 192 | assert db.session == [] 193 | with db: 194 | db.query('select test') 195 | assert db.session == ['begin', 'commit'] 196 | try: 197 | with db: 198 | db.query('error') 199 | except pg.ProgrammingError: 200 | error = True 201 | else: 202 | error = False 203 | assert error 204 | assert db._con.session == ['begin', 'commit', 'begin', 'rollback'] 205 | 206 | 207 | def test_connection_maxusage(): 208 | db = SteadyPgConnection(10) 209 | for i in range(100): 210 | r = db.query(f'select test{i}') 211 | assert r == f'test{i}' 212 | assert db.db.status 213 | j = i % 10 + 1 214 | assert db._usage == j 215 | assert db.num_queries == j 216 | db.begin() 217 | for i in range(100): 218 | r = db.get_tables() 219 | assert r == 'test' 220 | assert db.db.status 221 | if i == 49: 222 | db.commit() 223 | j = i % 10 + 1 if i > 49 else i + 11 224 | assert db._usage == j 225 | j = 0 if i > 49 else 10 226 | assert db.num_queries == j 227 | for i in range(10): 228 | if i == 7: 229 | db.db.status = False 230 | r = db.query(f'select test{i}') 231 | assert r == f'test{i}' 232 | j = i % 7 + 1 233 | assert db._usage == j 234 | assert db.num_queries == j 235 | for i in range(10): 236 | if i == 5: 237 | db.db.status = False 238 | r = db.get_tables() 239 | assert r == 'test' 240 | j = (i + (3 if i < 5 else -5)) % 10 + 1 241 | assert db._usage == j 242 | j = 3 if i < 5 else 0 243 | assert db.num_queries == j 244 | db.close() 245 | assert db.query('select test1') == 'test1' 246 | assert db._usage == 1 247 | assert db.num_queries == 1 248 | db.reopen() 249 | assert db._usage == 0 250 | assert db.num_queries == 0 251 | assert db.query('select test2') == 'test2' 252 | assert db._usage == 1 253 | assert db.num_queries == 1 254 | 255 | 256 | def test_connection_setsession(): 257 | db = SteadyPgConnection(3, ('set time zone', 'set datestyle')) 258 | assert hasattr(db, 'num_queries') 259 | assert db.num_queries == 0 260 | assert hasattr(db, 'session') 261 | assert tuple(db.session) == ('time zone', 'datestyle') 262 | for _i in range(11): 263 | db.query('select test') 264 | assert db.num_queries == 2 265 | assert db.session == ['time zone', 'datestyle'] 266 | db.query('set test') 267 | assert db.num_queries == 2 268 | assert db.session == ['time zone', 'datestyle', 'test'] 269 | db.query('select test') 270 | assert db.num_queries == 1 271 | assert db.session == ['time zone', 'datestyle'] 272 | db.close() 273 | db.query('set test') 274 | assert db.num_queries == 0 275 | assert db.session == ['time zone', 'datestyle', 'test'] 276 | 277 | 278 | @pytest.mark.parametrize("closeable", [False, True]) 279 | def test_begin(closeable): 280 | db = SteadyPgConnection(closeable=closeable) 281 | db.begin() 282 | assert db.session == ['begin'] 283 | db.query('select test') 284 | assert db.num_queries == 1 285 | db.close() 286 | db.query('select test') 287 | assert db.num_queries == 1 288 | db.begin() 289 | assert db.session == ['begin'] 290 | db.db.close() 291 | with pytest.raises(pg.InternalError): 292 | db.query('select test') 293 | assert db.num_queries == 0 294 | db.query('select test') 295 | assert db.num_queries == 1 296 | assert db.begin('select sql:begin') == 'sql:begin' 297 | assert db.num_queries == 2 298 | 299 | 300 | @pytest.mark.parametrize("closeable", [False, True]) 301 | def test_end(closeable): 302 | db = SteadyPgConnection(closeable=closeable) 303 | db.begin() 304 | db.query('select test') 305 | db.end() 306 | assert db.session == ['begin', 'end'] 307 | db.db.close() 308 | db.query('select test') 309 | assert db.num_queries == 1 310 | assert db.begin('select sql:end') == 'sql:end' 311 | assert db.num_queries == 2 312 | db.begin() 313 | db.query('select test') 314 | db.commit() 315 | assert db.session == ['begin', 'commit'] 316 | db.db.close() 317 | db.query('select test') 318 | assert db.num_queries == 1 319 | assert db.begin('select sql:commit') == 'sql:commit' 320 | assert db.num_queries == 2 321 | db.begin() 322 | db.query('select test') 323 | db.rollback() 324 | assert db.session == ['begin', 'rollback'] 325 | db.db.close() 326 | db.query('select test') 327 | assert db.num_queries == 1 328 | assert db.begin('select sql:rollback') == 'sql:rollback' 329 | assert db.num_queries == 2 330 | -------------------------------------------------------------------------------- /tests/test_threading_local.py: -------------------------------------------------------------------------------- 1 | """Test the ThreadingLocal module.""" 2 | 3 | from threading import Thread 4 | 5 | from dbutils.persistent_db import local 6 | 7 | 8 | def test_getattr(): 9 | my_data = local() 10 | my_data.number = 42 11 | assert my_data.number == 42 12 | 13 | 14 | def test_dict(): 15 | my_data = local() 16 | my_data.number = 42 17 | assert my_data.__dict__ == {'number': 42} 18 | my_data.__dict__.setdefault('widgets', []) 19 | assert my_data.widgets == [] 20 | 21 | 22 | def test_threadlocal(): 23 | def f(): 24 | items = sorted(my_data.__dict__.items()) 25 | log.append(items) 26 | my_data.number = 11 27 | log.append(my_data.number) 28 | my_data = local() 29 | my_data.number = 42 30 | log = [] 31 | thread = Thread(target=f) 32 | thread.start() 33 | thread.join() 34 | assert log == [[], 11] 35 | assert my_data.number == 42 36 | 37 | 38 | def test_subclass(): 39 | 40 | class MyLocal(local): 41 | number = 2 42 | initialized = 0 43 | 44 | def __init__(self, **kw): 45 | if self.initialized: 46 | raise SystemError 47 | self.initialized = 1 48 | self.__dict__.update(kw) 49 | 50 | def squared(self): 51 | return self.number ** 2 52 | 53 | my_data = MyLocal(color='red') 54 | assert my_data.number == 2 55 | assert my_data.color == 'red' 56 | del my_data.color 57 | assert my_data.squared() == 4 58 | 59 | def f(): 60 | items = sorted(my_data.__dict__.items()) 61 | log.append(items) 62 | my_data.number = 7 63 | log.append(my_data.number) 64 | 65 | log = [] 66 | thread = Thread(target=f) 67 | thread.start() 68 | thread.join() 69 | assert log == [[('color', 'red'), ('initialized', 1)], 7] 70 | assert my_data.number == 2 71 | assert not hasattr(my_data, 'color') 72 | 73 | class MyLocal(local): 74 | __slots__ = ('number',) 75 | 76 | my_data = MyLocal() 77 | my_data.number = 42 78 | my_data.color = 'red' 79 | thread = Thread(target=f) 80 | thread.start() 81 | thread.join() 82 | assert my_data.number == 7 83 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py3{9,10,11,12,13}, ruff, manifest, docs, spell 3 | 4 | [testenv:py37] 5 | skip_install = true 6 | deps = dist/dbutils-3.1.1-py3-none-any.whl[tests] 7 | commands = pytest {posargs} 8 | 9 | [testenv:py38] 10 | skip_install = true 11 | deps = dist/dbutils-3.1.1-py3-none-any.whl[tests] 12 | commands = pytest {posargs} 13 | 14 | [testenv] 15 | setenv = 16 | PYTHONPATH = {toxinidir} 17 | extras = tests 18 | commands = pytest {posargs} 19 | 20 | [testenv:spell] 21 | basepython = python3.12 22 | deps = codespell 23 | commands = codespell . 24 | 25 | [testenv:ruff] 26 | basepython = python3.12 27 | deps = ruff 28 | commands = ruff check . 29 | 30 | [testenv:manifest] 31 | basepython = python3.12 32 | deps = check-manifest 33 | commands = check-manifest -v 34 | 35 | [testenv:docs] 36 | basepython = python3.12 37 | extras = docs 38 | changedir = docs 39 | commands = python make.py 40 | --------------------------------------------------------------------------------