├── .github └── workflows │ └── tox.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.rst ├── docker-compose.yml ├── example ├── __init__.py └── simple_web_server.py ├── hasql ├── __init__.py ├── aiopg.py ├── aiopg_sa.py ├── asyncpg.py ├── asyncpgsa.py ├── asyncsqlalchemy.py ├── balancer_policy │ ├── __init__.py │ ├── base.py │ ├── greedy.py │ ├── random_weighted.py │ └── round_robin.py ├── base.py ├── metrics.py ├── psycopg3.py ├── py.typed └── utils.py ├── pylama.ini ├── pytest.ini ├── resources ├── diagram.svg └── logo.svg ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── conftest.py ├── mocks │ ├── __init__.py │ └── pool_manager.py ├── test_aiopg.py ├── test_aiopg_sa.py ├── test_asyncpg.py ├── test_asyncsqlalchemy.py ├── test_balancer_policy.py ├── test_base_pool_manager.py ├── test_metrics.py ├── test_psycopg3.py ├── test_trouble.py └── test_utils.py └── tox.ini /.github/workflows/tox.yml: -------------------------------------------------------------------------------- 1 | name: tox 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | lint: 11 | 12 | runs-on: ubuntu-latest 13 | 14 | strategy: 15 | matrix: 16 | linter: 17 | - lint 18 | - checkdoc 19 | - mypy 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: tox ${{ matrix.linter }} 24 | uses: docker://snakepacker/python:all 25 | env: 26 | TOXENV: ${{ matrix.linter }} 27 | with: 28 | args: tox 29 | 30 | build: 31 | needs: lint 32 | runs-on: ubuntu-latest 33 | 34 | services: 35 | postgres: 36 | image: mdillon/postgis:11-alpine 37 | ports: 38 | - 5432:5432 39 | env: 40 | POSTGRES_USER: test 41 | POSTGRES_PASSWORD: test 42 | POSTGRES_DB: test 43 | 44 | strategy: 45 | fail-fast: false 46 | 47 | matrix: 48 | toxenv: 49 | - py38 50 | - py39 51 | - py310 52 | - py311 53 | 54 | steps: 55 | - uses: actions/checkout@v2 56 | 57 | - name: tox ${{ matrix.toxenv }} 58 | uses: docker://snakepacker/python:all 59 | env: 60 | FORCE_COLOR: 1 61 | TOXENV: ${{ matrix.toxenv }} 62 | PG_DSN: postgres://test:test@postgres:5432/test 63 | COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} 64 | with: 65 | args: /bin/bash -c "wait-for-port postgres:5432 && tox" 66 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ 3 | .DS_Store 4 | 5 | build/ 6 | env/ 7 | __pycache__/ 8 | .pytest_cache/ 9 | dist/ 10 | *.egg-info/ 11 | *.ipynb 12 | .ipynb_checkpoints/ 13 | version.py 14 | 15 | *.log 16 | .coverage 17 | .tox/ 18 | htmlcov/ 19 | /.venv* 20 | 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-exclude tests * 2 | recursive-exclude __pycache__ * 3 | exclude .* 4 | 5 | include README.rst 6 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://raw.githubusercontent.com/aiokitchen/hasql/master/resources/logo.svg 2 | :width: 365 3 | :height: 265 4 | 5 | hasql 6 | ===== 7 | 8 | ``hasql`` is a library for acquiring actual connections to masters and replicas 9 | in high available PostgreSQL clusters. 10 | 11 | .. image:: https://raw.githubusercontent.com/aiokitchen/hasql/master/resources/diagram.svg 12 | 13 | Features 14 | ======== 15 | 16 | * completely asynchronous api 17 | * automatic detection of the host role in the cluster 18 | * health-checks for each host and automatic traffic outage for 19 | unavailable hosts 20 | * autodetection of hosts role changes, in case replica 21 | host will be promoted to master 22 | * different policies for load balancing 23 | * support for ``asyncpg``, ``psycopg3``, ``aiopg``, ``sqlalchemy`` and ``asyncpgsa`` 24 | 25 | 26 | Usage 27 | ===== 28 | 29 | Some useful examples 30 | 31 | Creating connection pool 32 | ************************ 33 | 34 | When acquiring a connection, the connection object of the used driver is 35 | returned (``aiopg.connection.Connection`` for **aiopg** and 36 | ``asyncpg.pool.PoolConnectionProxy`` for **asyncpg** and **asyncpgsa**) 37 | 38 | 39 | Database URL specirication rules 40 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 41 | 42 | * Multiple hosts should be passed comma separated 43 | 44 | * multihost example: 45 | 46 | * ``postgresql://db1,db2,db3/`` 47 | * split result: 48 | 49 | * ``postgresql://db1:5432/`` 50 | * ``postgresql://db2:5432/`` 51 | * ``postgresql://db3:5432/`` 52 | * The non-default port for each host might be passed after hostnames. e.g. 53 | 54 | * multihost example: 55 | 56 | * ``postgresql://db1:1234,db2:5678,db3/`` 57 | * split result: 58 | 59 | * ``postgresql://db1:1234/`` 60 | * ``postgresql://db2:5678/`` 61 | * ``postgresql://db3:5432/`` 62 | * The special case for non-default port for all hosts 63 | 64 | * multihost example: 65 | 66 | * ``postgresql://db1,db2,db3:6432/`` 67 | * split result: 68 | 69 | * ``postgresql://db1:6432/`` 70 | * ``postgresql://db2:6432/`` 71 | * ``postgresql://db3:6432/`` 72 | 73 | 74 | For ``aiopg`` or ``aiopg.sa`` 75 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 76 | 77 | **aiopg** must be installed as a requirement. 78 | 79 | Code example using ``aiopg``: 80 | 81 | .. code-block:: python 82 | 83 | from hasql.aiopg import PoolManager 84 | 85 | hosts = ",".join([ 86 | "master-host:5432", 87 | "replica-host-1:5432", 88 | "replica-host-2:5432", 89 | ]) 90 | 91 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 92 | 93 | async def create_pool(dsn) -> PoolManager: 94 | pool = PoolManager(multihost_dsn) 95 | 96 | # Waiting for 1 master and 1 replica will be available 97 | await pool.ready(masters_count=1, replicas_count=1) 98 | return pool 99 | 100 | Code example using ``aiopg.sa``: 101 | 102 | .. code-block:: python 103 | 104 | from hasql.aiopg_sa import PoolManager 105 | 106 | hosts = ",".join([ 107 | "master-host:5432", 108 | "replica-host-1:5432", 109 | "replica-host-2:5432", 110 | ]) 111 | 112 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 113 | 114 | async def create_pool(dsn) -> PoolManager: 115 | pool = PoolManager(multihost_dsn) 116 | 117 | # Waiting for 1 master and 1 replica will be available 118 | await pool.ready(masters_count=1, replicas_count=1) 119 | return pool 120 | 121 | For ``asyncpg`` 122 | ~~~~~~~~~~~~~~~ 123 | 124 | **asyncpg** must be installed as a requirement 125 | 126 | .. code-block:: python 127 | 128 | from hasql.asyncpg import PoolManager 129 | 130 | hosts = ",".join([ 131 | "master-host:5432", 132 | "replica-host-1:5432", 133 | "replica-host-2:5432", 134 | ]) 135 | 136 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 137 | 138 | async def create_pool(dsn) -> PoolManager: 139 | pool = PoolManager(multihost_dsn) 140 | 141 | # Waiting for 1 master and 1 replica will be available 142 | await pool.ready(masters_count=1, replicas_count=1) 143 | return pool 144 | 145 | For ``sqlalchemy`` 146 | ~~~~~~~~~~~~~~~~~~ 147 | 148 | **sqlalchemy[asyncio] & asyncpg** must be installed as requirements 149 | 150 | .. code-block:: python 151 | 152 | from hasql.asyncsqlalchemy import PoolManager 153 | 154 | hosts = ",".join([ 155 | "master-host:5432", 156 | "replica-host-1:5432", 157 | "replica-host-2:5432", 158 | ]) 159 | 160 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 161 | 162 | 163 | async def create_pool(dsn) -> PoolManager: 164 | pool = PoolManager( 165 | multihost_dsn, 166 | 167 | # Use master for acquire_replica, if no replicas available 168 | fallback_master=True, 169 | 170 | # You can pass pool-specific options 171 | pool_factory_kwargs=dict( 172 | pool_size=10, 173 | max_overflow=5 174 | ) 175 | ) 176 | 177 | # Waiting for 1 master and 1 replica will be available 178 | await pool.ready(masters_count=1, replicas_count=1) 179 | return pool 180 | 181 | 182 | For ``asyncpgsa`` 183 | ~~~~~~~~~~~~~~~~~ 184 | 185 | **asyncpgsa** must be installed as a requirement 186 | 187 | .. code-block:: python 188 | 189 | from hasql.asyncpgsa import PoolManager 190 | 191 | hosts = ",".join([ 192 | "master-host:5432", 193 | "replica-host-1:5432", 194 | "replica-host-2:5432", 195 | ]) 196 | 197 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 198 | 199 | async def create_pool(dsn) -> PoolManager: 200 | pool = PoolManager(multihost_dsn) 201 | 202 | # Waiting for 1 master and 1 replica will be available 203 | await asyncio.gather( 204 | pool.wait_masters_ready(1), 205 | pool.wait_replicas_ready(1) 206 | ) 207 | return pool 208 | 209 | 210 | For ``psycopg3`` 211 | ~~~~~~~~~~~~~~~~ 212 | 213 | **psycopg3** must be installed as a requirement (package name is `psycopg`) 214 | 215 | .. code-block:: python 216 | 217 | from hasql.psycopg3 import PoolManager 218 | 219 | 220 | hosts = ",".join([ 221 | "master-host:5432", 222 | "replica-host-1:5432", 223 | "replica-host-2:5432", 224 | ]) 225 | multihost_dsn = f"postgresql://user:password@{hosts}/dbname" 226 | 227 | async def create_pool(dsn) -> PoolManager: 228 | pool = PoolManager(multihost_dsn) 229 | 230 | # Waiting for 1 master and 1 replica will be available 231 | await pool.ready(masters_count=1, replicas_count=1) 232 | return pool 233 | 234 | 235 | Acquiring connections 236 | ********************* 237 | 238 | Connections should be acquired with async context manager: 239 | 240 | Acquiring master connection 241 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 242 | 243 | .. code-block:: python 244 | 245 | async def do_something(): 246 | pool = await create_pool(multihost_dsn) 247 | async with pool.acquire(read_only=False) as connection: 248 | ... 249 | 250 | or 251 | 252 | .. code-block:: python 253 | 254 | async def do_something(): 255 | pool = await create_pool(multihost_dsn) 256 | async with pool.acquire_master() as connection: 257 | ... 258 | 259 | Acquiring replica connection 260 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 261 | 262 | .. code-block:: python 263 | 264 | async def do_something(): 265 | pool = await create_pool(multihost_dsn) 266 | async with pool.acquire(read_only=True) as connection: 267 | ... 268 | 269 | or 270 | 271 | .. code-block:: python 272 | 273 | async def do_something(): 274 | pool = await create_pool(multihost_dsn) 275 | async with pool.acquire_replica() as connection: 276 | ... 277 | 278 | Without context manager (really not recommended) 279 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 280 | 281 | .. code-block:: python 282 | 283 | async def do_something(): 284 | pool = await create_pool(multihost_dsn) 285 | connection = await pool.acquire(read_only=False) 286 | await pool.release(connection) 287 | 288 | or more useful 289 | 290 | .. code-block:: python 291 | 292 | async def do_something(): 293 | pool = await create_pool(multihost_dsn) 294 | try: 295 | connection = await pool.acquire(read_only=False) 296 | finally: 297 | await pool.release(connection) 298 | 299 | How it works? 300 | ============= 301 | 302 | For each host from dsn string, a connection pool is created. From each pool one 303 | connection is reserved, which is used to check the availability of the host and 304 | its role. The minimum and maximum number of connections in the pool increases 305 | by 1 (to reserve a system connection). 306 | 307 | For each pool a background task is created, in which the host availability and 308 | its role (master or replica) is checked once every `refresh_delay` second. 309 | 310 | When switching hosts roles, hasql detects this with a slight delay. 311 | 312 | For PostgreSQL, when switching the master, all connections to all hosts are 313 | broken (the details of implementing PostgreSQL). 314 | 315 | If there are no available hosts, the methods acquire(), acquire_master(), and 316 | acquire_replica() wait until the host with the desired role startup. 317 | 318 | Overview 319 | ======== 320 | 321 | * hasql.base.BasePoolManager 322 | * ``__init__(dsn, acquire_timeout, refresh_delay, refresh_timeout, fallback_master, master_as_replica_weight, balancer_policy, pool_factory_kwargs)``: 323 | 324 | * ``dsn: str`` - Connection string used by the connection. 325 | 326 | * ``acquire_timeout: Union[int, float]`` - Default timeout (in seconds) 327 | for connection operations. 1 sec by default. 328 | 329 | * ``refresh_delay: Union[int, float]`` - Delay time (in seconds) 330 | between host polls. 1 sec by default. 331 | 332 | * ``refresh_timeout: Union[int, float]`` - Timeout (in seconds) for 333 | trying to connect and get the host role. 1 sec by default. 334 | 335 | * ``fallback_master: bool`` - Use connections from master if replicas 336 | are missing. False by default. 337 | 338 | * ``master_as_replica_weight: float`` - Probability of using the master 339 | as a replica (from 0. to 1.; 0. - master is not used as a replica; 340 | 1. - master can be used as a replica). 341 | 342 | * ``balancer_policy: type`` - Connection pool balancing policy 343 | (`hasql.balancer_policy.GreedyBalancerPolicy`, 344 | `hasql.balancer_policy.RandomWeightedBalancerPolicy` or 345 | `hasql.balancer_policy.RoundRobinBalancerPolicy`). 346 | 347 | * ``stopwatch_window_size: int`` - Window size for calculating the 348 | median response time of each pool. 349 | 350 | * ``pool_factory_kwargs: Optional[dict]`` - Connection pool creation 351 | parameters that are passed to pool factory. 352 | 353 | * ``get_pool_freesize(pool)`` 354 | Getting the number of free connections in the connection pool. Returns 355 | number of free connections in the connection pool. 356 | 357 | * ``pool`` - Pool for which you to be getting the number of 358 | free connections. 359 | 360 | * coroutine async-with ``acquire_from_pool(pool, **kwargs)`` 361 | Acquire a connection from pool. Returns connection to the database. 362 | 363 | * ``pool`` - Pool from which you to be acquiring the connection. 364 | 365 | * ``kwargs`` - Arguments to be passed to the pool acquire() method. 366 | 367 | * coroutine ``release_to_pool(connection, pool, **kwargs)`` 368 | A coroutine that reverts connection conn to pool for future recycling. 369 | 370 | * ``connection`` - Connection to be released. 371 | 372 | * ``pool`` - Pool to which you are returning the connection. 373 | 374 | * ``kwargs`` - Arguments to be passed to the pool release() method. 375 | 376 | * ``is_connection_closed(connection)`` 377 | Returns True if connection is closed. 378 | 379 | * ``get_last_response_time(pool)`` 380 | Returns database host last response time (in seconds). 381 | 382 | * coroutine async-with 383 | ``acquire(read_only, fallback_master, timeout, **kwargs)`` 384 | Acquire a connection from free pool. 385 | 386 | * ``readonly: bool`` - ``True`` if need return connection to replica, 387 | ``False`` - to master. False by default. 388 | 389 | * ``fallback_master: Optional[bool]`` - Use connections from master 390 | if replicas are missing. If None, then the default value is used. 391 | 392 | * ``master_as_replica_weight: float`` - Probability of using the master 393 | as a replica (from 0. to 1.; 0. - master is not used as a replica; 394 | 1. - master can be used as a replica). 395 | 396 | * ``timeout: Union[int, float]`` - Timeout (in seconds) for connection 397 | operations. 398 | 399 | * ``kwargs`` - Arguments to be passed to the pool acquire() method. 400 | 401 | * coroutine async-with ``acquire_master(timeout, **kwargs)`` 402 | Acquire a connection from free master pool. 403 | Equivalent ``acquire(read_only=False)`` 404 | 405 | * ``timeout: Union[int, float]`` - Timeout (in seconds) for 406 | connection operations. 407 | 408 | * ``kwargs`` - Arguments to be passed to the pool acquire() method. 409 | 410 | * coroutine async-with 411 | ``acquire_replica(fallback_master, timeout, **kwargs)`` 412 | Acquire a connection from free master pool. 413 | Equivalent ``acquire(read_only=True)`` 414 | 415 | * ``fallback_master: Optional[bool]`` - Use connections from master if 416 | replicas are missing. If None, then the default value is used. 417 | 418 | * ``master_as_replica_weight: float`` - Probability of using the master 419 | as a replica (from 0. to 1.; 0. - master is not used as a replica; 420 | 1. - master can be used as a replica). 421 | 422 | * ``timeout: Union[int, float]`` - Timeout (in seconds) for connection 423 | operations. 424 | 425 | * ``kwargs`` - Arguments to be passed to the pool acquire() method. 426 | 427 | * coroutine ``release(connection, **kwargs)`` 428 | A coroutine that reverts connection conn to pool for future recycling. 429 | 430 | * ``connection`` - Connection to be released. 431 | * ``kwargs`` - Arguments to be passed to the pool release() method. 432 | 433 | * coroutine ``close()`` 434 | Close pool. Mark all pool connections to be closed on getting back to 435 | pool. Closed pool doesn’t allow to acquire new connections. 436 | 437 | * coroutine ``terminate()`` 438 | Terminate pool. Close pool with instantly closing all acquired 439 | connections also. 440 | 441 | * coroutine ``wait_next_pool_check(timeout)`` 442 | Waiting for the next step to update host roles. 443 | 444 | * coroutine ``ready(masters_count, replicas_count, timeout)`` 445 | Waiting for a connection to the database hosts. If masters_count is 446 | ``None`` and replicas_count is None, then connection to all hosts 447 | is expected. 448 | 449 | * ``masters_count: Optional[int]`` - Minimum number of master hosts. 450 | ``None`` by default. 451 | 452 | * ``replicas_count: Optional[int]`` - Minimum number of replica hosts. 453 | ``None`` by default. 454 | 455 | * ``timeout: Union[int, float]`` - Timeout for database connections. 456 | 10 seconds by default. 457 | 458 | * coroutine ``wait_all_ready()``` 459 | Waiting to connect to all database hosts. 460 | 461 | * coroutine ``wait_masters_ready(masters_count)`` 462 | Waiting for connection to the specified number of 463 | database master servers. 464 | 465 | * ``masters_count: int`` - Minimum number of master hosts. 466 | 467 | * coroutine `wait_replicas_ready(replicas_count)` 468 | Waiting for connection to the specified number of 469 | database replica servers. 470 | 471 | * ``replicas_count: int`` - Minimum number of replica hosts. 472 | 473 | * coroutine ``get_pool(read_only, fallback_master)`` 474 | Returns connection pool with the maximum number of free connections. 475 | 476 | * ``readonly: bool`` - True if need return replica pool, 477 | ``False`` - master pool. 478 | 479 | * ``fallback_master: Optional[bool]`` - Returns master pool if 480 | replicas are missing. False by default. 481 | 482 | * coroutine ``get_master_pools()`` 483 | Returns a list of all master pools. 484 | 485 | * coroutine ``get_replica_pools(fallback_master)`` 486 | Returns a list of all replica pools. 487 | 488 | * ``fallback_master: Optional[bool]`` - Returns a list of all master 489 | pools if replicas are missing. False by default. 490 | 491 | * ``pool_is_master(pool)`` 492 | Returns True if connection is master. 493 | 494 | * ``pool_is_replica(pool)`` 495 | Returns True if connection is replica. 496 | 497 | * ``register_connection(connection, pool)`` 498 | Match connection with the pool from which it was taken. 499 | It is necessary for the release() method to work correctly. 500 | 501 | * ``hasql.aiopg.PoolManager`` 502 | 503 | * ``hasql.aiopg_sa.PoolManager`` 504 | 505 | * ``hasql.asyncpg.PoolManager`` 506 | 507 | * ``hasql.asyncpgsa.PoolManager`` 508 | 509 | * ``hasql.psycopg3.PoolManager`` 510 | 511 | Balancer policies 512 | ================= 513 | 514 | * ``hasql.balancer_policy.GreedyBalancerPolicy`` 515 | Chooses pool with the most free connections. If there are several such pools, 516 | a random one is taken. 517 | 518 | * ``hasql.balancer_policy.RandomWeightedBalancerPolicy`` 519 | Chooses random pool according to their weights. The weight is inversely 520 | proportional to the response time of the database of the respective pool 521 | (faster response - higher weight). 522 | 523 | * ``hasql.balancer_policy.RoundRobinBalancerPolicy`` 524 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | postgres: 5 | restart: always 6 | image: mdillon/postgis:11-alpine 7 | expose: 8 | - 5432 9 | environment: 10 | POSTGRES_USER: test 11 | POSTGRES_PASSWORD: test 12 | POSTGRES_DB: test 13 | 14 | test: 15 | image: snakepacker/python:all 16 | working_dir: /mnt 17 | command: > 18 | bash -c ' 19 | pip install -U pip tox && wait-for-port postgres:5432 && tox -r 20 | ' 21 | environment: 22 | PG_DSN: postgres://test:test@postgres:5432/test 23 | volumes: 24 | - .:/mnt 25 | depends_on: 26 | - postgres 27 | -------------------------------------------------------------------------------- /example/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiokitchen/hasql/04574ca482a79188c197812b594a9208926cbeca/example/__init__.py -------------------------------------------------------------------------------- /example/simple_web_server.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from dataclasses import asdict 3 | 4 | import aiohttp.web 5 | from aiohttp.web_urldispatcher import View 6 | from aiomisc import entrypoint 7 | from aiomisc.service.aiohttp import AIOHTTPService 8 | 9 | from hasql.aiopg import PoolManager 10 | 11 | parser = argparse.ArgumentParser() 12 | group = parser.add_argument_group('HTTP options') 13 | 14 | group.add_argument("-l", "--address", default="::", 15 | help="Listen HTTP address") 16 | group.add_argument("-p", "--port", type=int, default=8080, 17 | help="Listen HTTP port") 18 | 19 | group.add_argument("--dsn", type=str, help="DSN to connect") 20 | group.add_argument("--pg-maxsize", type=int, help="PG pool max size") 21 | group.add_argument("--pg-minsize", type=int, help="PG pool min size") 22 | 23 | 24 | class BaseView(View): 25 | @property 26 | def pool(self) -> PoolManager: 27 | return self.request.app['pool'] 28 | 29 | 30 | class MasterHandler(BaseView): 31 | async def get(self): 32 | async with self.pool.acquire_master(timeout=1) as conn: 33 | async with conn.cursor() as cur: 34 | await cur.execute("SELECT 1") 35 | res = (await cur.fetchone())[0] 36 | return aiohttp.web.Response(text=str(res)) 37 | 38 | 39 | class ReplicaHandler(BaseView): 40 | async def get(self): 41 | async with self.pool.acquire_replica(timeout=1) as conn: 42 | async with conn.cursor() as cur: 43 | await cur.execute("SELECT 1") 44 | res = (await cur.fetchone())[0] 45 | return aiohttp.web.Response(text=str(res)) 46 | 47 | 48 | class MetricsHandler(BaseView): 49 | async def get(self): 50 | metrics = self.pool.metrics() 51 | return aiohttp.web.json_response([asdict(m) for m in metrics]) 52 | 53 | 54 | class REST(AIOHTTPService): 55 | async def create_application(self) -> aiohttp.web.Application: 56 | app = aiohttp.web.Application() 57 | 58 | app.add_routes([ 59 | aiohttp.web.get('/master', MasterHandler), 60 | aiohttp.web.get('/replica', ReplicaHandler), 61 | aiohttp.web.get('/metrics', MetricsHandler), 62 | ]) 63 | pool_manager: PoolManager = PoolManager( 64 | arguments.dsn, 65 | pool_factory_kwargs=dict( 66 | maxsize=arguments.pg_maxsize, 67 | minsize=arguments.pg_minsize 68 | ) 69 | ) 70 | 71 | # Waiting for 1 master and 1 replica will be available 72 | await pool_manager.ready(masters_count=1, replicas_count=1) 73 | app['pool'] = pool_manager 74 | 75 | return app 76 | 77 | 78 | if __name__ == '__main__': 79 | arguments = parser.parse_args() 80 | service = REST(address=arguments.address, port=arguments.port) 81 | 82 | with entrypoint(service, log_config=True) as loop: 83 | loop.run_forever() 84 | -------------------------------------------------------------------------------- /hasql/__init__.py: -------------------------------------------------------------------------------- 1 | __version_info__ = (0, 8, 0) 2 | __version__ = ".".join(map(str, __version_info__)) 3 | 4 | package_info = ( 5 | "hasql is a module for acquiring actual connections with masters " 6 | "and replicas" 7 | ) 8 | 9 | authors = ( 10 | ("Vladislav Bakaev", "vlad@bakaev.tech"), 11 | ("Dmitry Orlov", "me@mosquito.su"), 12 | ("Pavel Mosein", "me@pavkazzz.ru"), 13 | ) 14 | 15 | authors_email = ", ".join(email for _, email in authors) 16 | 17 | __license__ = "Apache 2" 18 | __author__ = ", ".join(f"{name} <{email}>" for name, email in authors) 19 | 20 | __maintainer__ = __author__ 21 | 22 | __all__ = ( 23 | "__author__", 24 | "__license__", 25 | "__maintainer__", 26 | "__version__", 27 | ) 28 | -------------------------------------------------------------------------------- /hasql/aiopg.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Sequence 3 | 4 | import aiopg 5 | 6 | from hasql.base import BasePoolManager 7 | from hasql.metrics import DriverMetrics 8 | from hasql.utils import Dsn 9 | 10 | 11 | class PoolManager(BasePoolManager): 12 | pools: Sequence[aiopg.Pool] 13 | 14 | def get_pool_freesize(self, pool): 15 | return pool.freesize 16 | 17 | def acquire_from_pool(self, pool, **kwargs): 18 | return pool.acquire(**kwargs) 19 | 20 | async def release_to_pool(self, connection, pool, **kwargs): 21 | return await pool.release(connection, **kwargs) 22 | 23 | async def _is_master(self, connection): 24 | cursor = await connection.cursor() 25 | async with cursor: 26 | await cursor.execute("SHOW transaction_read_only") 27 | read_only = await cursor.fetchone() 28 | return read_only[0] == "off" 29 | 30 | async def _pool_factory(self, dsn: Dsn): 31 | return await aiopg.create_pool(str(dsn), **self.pool_factory_kwargs) 32 | 33 | def _prepare_pool_factory_kwargs(self, kwargs: dict) -> dict: 34 | kwargs["minsize"] = kwargs.get("minsize", 1) + 1 35 | kwargs["maxsize"] = kwargs.get("maxsize", 10) + 1 36 | return kwargs 37 | 38 | async def _close(self, pool): 39 | pool.close() 40 | await pool.wait_closed() 41 | 42 | async def _terminate(self, pool): 43 | loop = asyncio.get_running_loop() 44 | await loop.run_in_executor(None, pool.terminate) 45 | 46 | def is_connection_closed(self, connection): 47 | return connection.closed 48 | 49 | def host(self, pool: aiopg.Pool): 50 | return Dsn.parse(str(pool._dsn)).netloc 51 | 52 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 53 | return [ 54 | DriverMetrics( 55 | max=p.maxsize or 0, 56 | min=p.minsize, 57 | idle=p.freesize, 58 | used=p.size - p.freesize, 59 | host=Dsn.parse(str(p._dsn)).netloc, 60 | ) 61 | for p in self.pools 62 | if p 63 | ] 64 | 65 | 66 | __all__ = ("PoolManager",) 67 | -------------------------------------------------------------------------------- /hasql/aiopg_sa.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence 2 | 3 | import aiopg.sa 4 | from psycopg2.extensions import parse_dsn 5 | 6 | from hasql.aiopg import PoolManager as AioPgPoolManager 7 | from hasql.metrics import DriverMetrics 8 | from hasql.utils import Dsn 9 | 10 | 11 | class PoolManager(AioPgPoolManager): 12 | pools: Sequence[aiopg.sa.Engine] # type: ignore[assignment] 13 | 14 | async def _is_master(self, connection): 15 | read_only = await connection.scalar("SHOW transaction_read_only") 16 | return read_only == "off" 17 | 18 | async def _pool_factory(self, dsn: Dsn) -> aiopg.sa.Engine: 19 | return await aiopg.sa.create_engine( 20 | str(dsn), 21 | **self.pool_factory_kwargs, 22 | ) 23 | 24 | def host(self, pool: aiopg.sa.Engine) -> str: # type: ignore[override] 25 | return parse_dsn(pool.dsn).get("host", "") 26 | 27 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 28 | return [ 29 | DriverMetrics( 30 | max=p.maxsize, 31 | min=p.minsize, 32 | idle=p.freesize, 33 | used=p.size - p.freesize, 34 | host=parse_dsn(p.dsn).get("host", ""), 35 | ) 36 | for p in self.pools 37 | if p 38 | ] 39 | 40 | 41 | __all__ = ("PoolManager",) 42 | -------------------------------------------------------------------------------- /hasql/asyncpg.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import ClassVar, Dict, Sequence 3 | 4 | import asyncpg # type: ignore 5 | from packaging.version import parse as parse_version 6 | 7 | from hasql.base import BasePoolManager 8 | from hasql.metrics import DriverMetrics 9 | from hasql.utils import Dsn 10 | 11 | 12 | class PoolManager(BasePoolManager): 13 | pools: Sequence[asyncpg.Pool] 14 | cached_hosts: ClassVar[Dict[int, str]] = {} 15 | 16 | def get_pool_freesize(self, pool): 17 | return pool._queue.qsize() 18 | 19 | def acquire_from_pool(self, pool, **kwargs): 20 | return pool.acquire(**kwargs) 21 | 22 | async def release_to_pool(self, connection, pool, **kwargs): 23 | await pool.release(connection, **kwargs) 24 | 25 | async def _is_master(self, connection): 26 | read_only = await connection.fetchrow("SHOW transaction_read_only") 27 | return read_only[0] == "off" 28 | 29 | async def _pool_factory(self, dsn: Dsn): 30 | return await asyncpg.create_pool(str(dsn), **self.pool_factory_kwargs) 31 | 32 | def _prepare_pool_factory_kwargs(self, kwargs: dict) -> dict: 33 | kwargs["min_size"] = kwargs.get("min_size", 1) + 1 34 | kwargs["max_size"] = kwargs.get("max_size", 10) + 1 35 | return kwargs 36 | 37 | async def _close(self, pool): 38 | await pool.close() 39 | 40 | async def _terminate(self, pool): 41 | loop = asyncio.get_running_loop() 42 | await loop.run_in_executor(None, pool.terminate) 43 | 44 | def is_connection_closed(self, connection): 45 | return connection.is_closed() 46 | 47 | if parse_version(asyncpg.__version__) >= parse_version("0.29.0"): 48 | # We try to reproduce the same behaviour of the _working_addr 49 | # attribute prior the 0.29.0 version for getting the host 50 | # linked to a pool. 51 | def host(self, pool: asyncpg.Pool): 52 | conn = next( 53 | (holder._con for holder in pool._holders if holder._con), 54 | None 55 | ) 56 | if conn is not None: 57 | addr, _ = conn._addr 58 | PoolManager.cached_hosts[id(pool)] = addr 59 | return PoolManager.cached_hosts[id(pool)] 60 | else: 61 | def host(self, pool: asyncpg.Pool): 62 | addr, _ = pool._working_addr 63 | return addr 64 | 65 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 66 | return [ 67 | DriverMetrics( 68 | max=p._maxsize, 69 | min=p._minsize, 70 | idle=self.get_pool_freesize(p), 71 | used=p._maxsize - self.get_pool_freesize(p), 72 | host=self.host(p), 73 | ) 74 | for p in self.pools 75 | if p 76 | ] 77 | 78 | 79 | __all__ = ("PoolManager",) 80 | -------------------------------------------------------------------------------- /hasql/asyncpgsa.py: -------------------------------------------------------------------------------- 1 | import asyncpgsa # type: ignore 2 | from hasql.asyncpg import PoolManager as AsyncPgPoolManager 3 | from hasql.utils import Dsn 4 | 5 | 6 | class PoolManager(AsyncPgPoolManager): 7 | async def _pool_factory(self, dsn: Dsn): 8 | return await asyncpgsa.create_pool( 9 | str(dsn), **self.pool_factory_kwargs, 10 | ) 11 | 12 | 13 | __all__ = ("PoolManager",) 14 | -------------------------------------------------------------------------------- /hasql/asyncsqlalchemy.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Sequence 3 | 4 | import sqlalchemy as sa # type: ignore 5 | from sqlalchemy.ext.asyncio import AsyncConnection # type: ignore 6 | from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine 7 | from sqlalchemy.pool import QueuePool # type: ignore 8 | 9 | from hasql.base import BasePoolManager 10 | from hasql.metrics import DriverMetrics 11 | from hasql.utils import Dsn 12 | 13 | 14 | class PoolManager(BasePoolManager): 15 | def get_pool_freesize(self, pool: AsyncEngine): 16 | queue_pool: QueuePool = pool.sync_engine.pool 17 | return queue_pool.size() - queue_pool.checkedout() 18 | 19 | def acquire_from_pool(self, pool: AsyncEngine, **kwargs): 20 | return pool.connect() 21 | 22 | async def release_to_pool( # type: ignore 23 | self, 24 | connection: AsyncConnection, 25 | _: AsyncEngine, 26 | **kwargs 27 | ): 28 | await connection.close() 29 | 30 | async def _is_master(self, connection: AsyncConnection): 31 | result = await connection.scalar( 32 | sa.text("SHOW transaction_read_only"), 33 | ) == "off" 34 | await connection.execute(sa.text('COMMIT')) 35 | return result 36 | 37 | async def _pool_factory(self, dsn: Dsn): 38 | # TODO: Add support of psycopg3 after release of sqlalchemy 2.0 39 | d = str(dsn).replace("postgresql", "postgresql+asyncpg") 40 | return create_async_engine(d, **self.pool_factory_kwargs) 41 | 42 | def _prepare_pool_factory_kwargs(self, kwargs: dict) -> dict: 43 | kwargs["pool_size"] = kwargs.get("pool_size", 1) + 1 44 | return kwargs 45 | 46 | async def _close(self, pool: AsyncEngine): 47 | await pool.dispose() 48 | 49 | async def _terminate(self, pool: AsyncEngine): 50 | loop = asyncio.get_running_loop() 51 | await loop.run_in_executor(None, pool.sync_engine.dispose) 52 | 53 | def is_connection_closed(self, connection: AsyncConnection): 54 | return connection.closed 55 | 56 | def host(self, pool: AsyncEngine): 57 | return pool.sync_engine.url.host 58 | 59 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 60 | return [ 61 | DriverMetrics( 62 | max=p.sync_engine.pool.size(), 63 | min=0, 64 | idle=p.sync_engine.pool.checkedin(), 65 | used=p.sync_engine.pool.checkedout(), 66 | host=p.sync_engine.url.host, 67 | ) 68 | for p in self.pools 69 | if p 70 | ] 71 | 72 | 73 | __all__ = ("PoolManager",) 74 | -------------------------------------------------------------------------------- /hasql/balancer_policy/__init__.py: -------------------------------------------------------------------------------- 1 | from .greedy import GreedyBalancerPolicy 2 | from .random_weighted import RandomWeightedBalancerPolicy 3 | from .round_robin import RoundRobinBalancerPolicy 4 | 5 | 6 | __all__ = ( 7 | "GreedyBalancerPolicy", 8 | "RandomWeightedBalancerPolicy", 9 | "RoundRobinBalancerPolicy", 10 | ) 11 | -------------------------------------------------------------------------------- /hasql/balancer_policy/base.py: -------------------------------------------------------------------------------- 1 | import random 2 | from abc import abstractmethod 3 | from typing import Any, Optional 4 | 5 | from ..base import AbstractBalancerPolicy, BasePoolManager 6 | 7 | 8 | class BaseBalancerPolicy(AbstractBalancerPolicy): 9 | def __init__(self, pool_manager: BasePoolManager): 10 | self._pool_manager = pool_manager 11 | 12 | async def get_pool( 13 | self, 14 | read_only: bool, 15 | fallback_master: bool = False, 16 | master_as_replica_weight: Optional[float] = None, 17 | ) -> Any: 18 | if not read_only and master_as_replica_weight is not None: 19 | raise ValueError( 20 | "Field master_as_replica_weight is used only when " 21 | "read_only is True", 22 | ) 23 | 24 | choose_master_as_replica = False 25 | if master_as_replica_weight is not None: 26 | rand = random.random() 27 | choose_master_as_replica = 0 < rand <= master_as_replica_weight 28 | 29 | return await self._get_pool( 30 | read_only=read_only, 31 | fallback_master=fallback_master or choose_master_as_replica, 32 | choose_master_as_replica=choose_master_as_replica, 33 | ) 34 | 35 | @abstractmethod 36 | async def _get_pool( 37 | self, 38 | read_only: bool, 39 | fallback_master: bool = False, 40 | choose_master_as_replica: bool = False, 41 | ): 42 | pass 43 | 44 | 45 | __all__ = ["BaseBalancerPolicy"] 46 | -------------------------------------------------------------------------------- /hasql/balancer_policy/greedy.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from hasql.balancer_policy.base import BaseBalancerPolicy 4 | 5 | 6 | class GreedyBalancerPolicy(BaseBalancerPolicy): 7 | async def _get_pool( 8 | self, 9 | read_only: bool, 10 | fallback_master: bool = False, 11 | choose_master_as_replica: bool = False, 12 | ): 13 | candidates = [] 14 | 15 | if read_only: 16 | candidates.extend( 17 | await self._pool_manager.get_replica_pools( 18 | fallback_master=fallback_master, 19 | ), 20 | ) 21 | 22 | if ( 23 | not read_only or 24 | ( 25 | choose_master_as_replica and 26 | self._pool_manager.master_pool_count > 0 27 | ) 28 | ): 29 | candidates.extend(await self._pool_manager.get_master_pools()) 30 | 31 | fat_pool = max(candidates, key=self._pool_manager.get_pool_freesize) 32 | max_freesize = self._pool_manager.get_pool_freesize(fat_pool) 33 | return random.choice([ 34 | candidate 35 | for candidate in candidates 36 | if self._pool_manager.get_pool_freesize(candidate) == max_freesize 37 | ]) 38 | 39 | 40 | __all__ = ("GreedyBalancerPolicy",) 41 | -------------------------------------------------------------------------------- /hasql/balancer_policy/random_weighted.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Iterable, Optional 3 | 4 | from hasql.balancer_policy.base import BaseBalancerPolicy 5 | 6 | 7 | MACHINE_EPSILON: float = 1e-16 8 | 9 | 10 | class RandomWeightedBalancerPolicy(BaseBalancerPolicy): 11 | async def _get_pool( 12 | self, 13 | read_only: bool, 14 | fallback_master: bool = False, 15 | choose_master_as_replica: bool = False, 16 | ): 17 | candidates = [] 18 | 19 | if read_only: 20 | candidates.extend( 21 | await self._pool_manager.get_replica_pools( 22 | fallback_master=fallback_master, 23 | ), 24 | ) 25 | if ( 26 | not read_only or 27 | ( 28 | choose_master_as_replica and 29 | self._pool_manager.master_pool_count > 0 30 | ) 31 | ): 32 | candidates.extend(await self._pool_manager.get_master_pools()) 33 | 34 | choiced_index = self._weighted_choice( 35 | self._normalize_times( 36 | self._reflect_times( 37 | self._get_response_times(candidates), 38 | ), 39 | ), 40 | ) 41 | 42 | return candidates[choiced_index] 43 | 44 | def _get_response_times(self, pools: list) -> Iterable[Optional[float]]: 45 | for pool in pools: 46 | yield self._pool_manager.get_last_response_time(pool) 47 | 48 | @staticmethod 49 | def _reflect_times( 50 | times: Iterable[Optional[float]], 51 | ) -> Iterable[float]: 52 | list_times = [value or 0 for value in times] 53 | sum_time = sum(list_times) 54 | yield from map(lambda x: sum_time - x + MACHINE_EPSILON, list_times) 55 | 56 | @staticmethod 57 | def _normalize_times(times: Iterable[float]) -> Iterable[float]: 58 | list_times = list(times) 59 | sum_time = sum(list_times) 60 | yield from map(lambda x: sum_time / x, list_times) 61 | 62 | @staticmethod 63 | def _weighted_choice(probability_distribution: Iterable[float]) -> int: 64 | rand = random.random() 65 | prefix_sum: float = 0.0 66 | 67 | length = 0 68 | for i, p in enumerate(probability_distribution): 69 | length += 1 70 | prefix_sum += p 71 | if rand <= prefix_sum: 72 | return i 73 | return length - 1 74 | 75 | 76 | __all__ = ["RandomWeightedBalancerPolicy"] 77 | -------------------------------------------------------------------------------- /hasql/balancer_policy/round_robin.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from types import MappingProxyType 3 | from typing import NamedTuple, Optional 4 | 5 | from hasql.balancer_policy.base import BaseBalancerPolicy 6 | 7 | 8 | class PoolOptions(NamedTuple): 9 | read_only: bool 10 | choose_master_as_replica: bool 11 | 12 | 13 | class RoundRobinBalancerPolicy(BaseBalancerPolicy): 14 | def __init__(self, pool_manager): 15 | super().__init__(pool_manager) 16 | self._indexes = defaultdict(lambda: 0) 17 | self._choose_predicates = MappingProxyType({ 18 | PoolOptions(True, False): self._replica_predicate, 19 | PoolOptions(True, True): self._master_as_replica_predicate, 20 | PoolOptions(False, False): self._master_predicate, 21 | }) 22 | 23 | async def _get_pool( 24 | self, 25 | read_only: bool, 26 | fallback_master: Optional[bool] = None, 27 | choose_master_as_replica: bool = False, 28 | ): 29 | if read_only: 30 | if self._pool_manager.replica_pool_count == 0: 31 | if fallback_master: 32 | read_only = False 33 | choose_master_as_replica = False 34 | if self._pool_manager.master_pool_count == 0: 35 | await self._pool_manager.wait_masters_ready(1) 36 | else: 37 | await self._pool_manager.wait_replicas_ready(1) 38 | else: 39 | if self._pool_manager.master_pool_count == 0: 40 | await self._pool_manager.wait_masters_ready(1) 41 | 42 | pool_options = PoolOptions(read_only, choose_master_as_replica) 43 | assert pool_options in self._choose_predicates 44 | 45 | predicate = self._choose_predicates[pool_options] 46 | start_index = self._indexes[pool_options] 47 | 48 | pools = self._pool_manager.pools 49 | for offset in range(len(pools)): 50 | index = (start_index + offset) % len(pools) 51 | current_pool = pools[index] 52 | if current_pool is not None and predicate(current_pool): 53 | self._indexes[pool_options] = (index + 1) % len(pools) 54 | return current_pool 55 | 56 | def _master_predicate(self, pool) -> bool: 57 | return self._pool_manager.pool_is_master(pool) 58 | 59 | def _replica_predicate(self, pool) -> bool: 60 | return self._pool_manager.pool_is_replica(pool) 61 | 62 | def _master_as_replica_predicate(self, pool) -> bool: 63 | return self._master_predicate(pool) or self._replica_predicate(pool) 64 | 65 | 66 | __all__ = ("RoundRobinBalancerPolicy",) 67 | -------------------------------------------------------------------------------- /hasql/base.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from abc import ABC, abstractmethod 4 | from collections import defaultdict 5 | from itertools import chain 6 | from types import MappingProxyType 7 | from typing import (Any, AsyncContextManager, DefaultDict, Dict, List, 8 | Optional, Sequence, Set, Union) 9 | 10 | from .metrics import CalculateMetrics, DriverMetrics, Metrics 11 | from .utils import Dsn, Stopwatch, split_dsn 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | DEFAULT_REFRESH_DELAY: int = 1 16 | DEFAULT_REFRESH_TIMEOUT: int = 30 17 | DEFAULT_ACQUIRE_TIMEOUT: float = 1.0 18 | DEFAULT_MASTER_AS_REPLICA_WEIGHT: float = 0. 19 | DEFAULT_STOPWATCH_WINDOW_SIZE: int = 128 20 | 21 | 22 | class AbstractBalancerPolicy(ABC): 23 | def __init__(self, pool_manager: "BasePoolManager"): 24 | raise NotImplementedError 25 | 26 | @abstractmethod 27 | async def get_pool( 28 | self, 29 | read_only: bool, 30 | fallback_master: bool = False, 31 | master_as_replica_weight: Optional[float] = None, 32 | ) -> Any: 33 | raise NotImplementedError 34 | 35 | 36 | class PoolAcquireContext(AsyncContextManager): 37 | 38 | def __init__( 39 | self, 40 | pool_manager: "BasePoolManager", 41 | read_only: bool, 42 | fallback_master: Optional[bool], 43 | master_as_replica_weight: Optional[float], 44 | timeout: float, 45 | metrics: CalculateMetrics, 46 | **kwargs, 47 | ): 48 | self.pool_manager = pool_manager 49 | self.read_only = read_only 50 | self.fallback_master = fallback_master 51 | self.master_as_replica_weight = master_as_replica_weight 52 | self.timeout = timeout 53 | self.kwargs = kwargs 54 | self.pool = None 55 | self.context = None 56 | self.metrics = metrics 57 | 58 | async def acquire_from_pool_connection(self): 59 | async def execute(): 60 | with self.metrics.with_get_pool(): 61 | self.pool = await self.pool_manager.balancer.get_pool( 62 | read_only=self.read_only, 63 | fallback_master=self.fallback_master, 64 | master_as_replica_weight=self.master_as_replica_weight, 65 | ) 66 | 67 | with self.metrics.with_acquire(self.pool_manager.host(self.pool)): 68 | return await self.pool_manager.acquire_from_pool( 69 | self.pool, **self.kwargs, 70 | ) 71 | 72 | self.conn = await asyncio.wait_for(execute(), timeout=self.timeout) 73 | self.metrics.add_connection(self.pool_manager.host(self.pool)) 74 | self.pool_manager.register_connection(self.conn, self.pool) 75 | return self.conn 76 | 77 | async def __aenter__(self): 78 | async def go(): 79 | with self.metrics.with_get_pool(): 80 | self.pool = await self.pool_manager.balancer.get_pool( 81 | read_only=self.read_only, 82 | fallback_master=self.fallback_master, 83 | master_as_replica_weight=self.master_as_replica_weight, 84 | ) 85 | with self.metrics.with_acquire(self.pool_manager.host(self.pool)): 86 | self.context = self.pool_manager.acquire_from_pool( 87 | self.pool, 88 | **self.kwargs, 89 | ) 90 | return await self.context.__aenter__() 91 | 92 | self.conn = await asyncio.wait_for(go(), timeout=self.timeout) 93 | self.metrics.add_connection(self.pool_manager.host(self.pool)) 94 | return self.conn 95 | 96 | async def __aexit__(self, *exc): 97 | self.metrics.remove_connection(self.pool_manager.host(self.pool)) 98 | await self.context.__aexit__(*exc) 99 | del self.conn 100 | 101 | def __await__(self): 102 | return self.acquire_from_pool_connection().__await__() 103 | 104 | 105 | class BasePoolManager(ABC): 106 | _dsn_ready_event: DefaultDict[Dsn, asyncio.Event] 107 | _dsn_check_cond: DefaultDict[Dsn, asyncio.Condition] 108 | _master_pool_set: Set[Any] 109 | _replica_pool_set: Set[Any] 110 | _unmanaged_connections: Dict[Any, Any] 111 | 112 | def __init__( 113 | self, 114 | dsn: str, 115 | acquire_timeout: Union[float, int] = DEFAULT_ACQUIRE_TIMEOUT, 116 | refresh_delay: Union[float, int] = DEFAULT_REFRESH_DELAY, 117 | refresh_timeout: Union[float, int] = DEFAULT_REFRESH_TIMEOUT, 118 | fallback_master: bool = False, 119 | master_as_replica_weight: float = DEFAULT_MASTER_AS_REPLICA_WEIGHT, 120 | balancer_policy: type = AbstractBalancerPolicy, 121 | stopwatch_window_size: int = DEFAULT_STOPWATCH_WINDOW_SIZE, 122 | pool_factory_kwargs: Optional[dict] = None, 123 | ): 124 | if not issubclass(balancer_policy, AbstractBalancerPolicy): 125 | raise ValueError( 126 | "balancer_policy must be a class BaseBalancerPolicy heir", 127 | ) 128 | 129 | if balancer_policy is AbstractBalancerPolicy: 130 | # Avoid circular import 131 | from .balancer_policy.greedy import GreedyBalancerPolicy 132 | balancer_policy = GreedyBalancerPolicy 133 | 134 | if pool_factory_kwargs is None: 135 | pool_factory_kwargs = {} 136 | self._pool_factory_kwargs = MappingProxyType( 137 | self._prepare_pool_factory_kwargs(pool_factory_kwargs), 138 | ) 139 | self._dsn: List[Dsn] = split_dsn(dsn) 140 | self._dsn_ready_event = defaultdict(asyncio.Event) 141 | self._dsn_check_cond = defaultdict(asyncio.Condition) 142 | self._pools = [None] * len(self._dsn) 143 | self._acquire_timeout = acquire_timeout 144 | self._refresh_delay = refresh_delay 145 | self._refresh_timeout = refresh_timeout 146 | self._fallback_master = fallback_master 147 | self._master_as_replica_weight = master_as_replica_weight 148 | self._balancer = balancer_policy(self) 149 | self._master_pool_set = set() 150 | self._replica_pool_set = set() 151 | self._master_cond = asyncio.Condition() 152 | self._replica_cond = asyncio.Condition() 153 | self._unmanaged_connections = {} 154 | self._stopwatch = Stopwatch(window_size=stopwatch_window_size) 155 | self._refresh_role_tasks = [ 156 | asyncio.create_task(self._check_pool_task(index)) 157 | for index in range(len(self._dsn)) 158 | ] 159 | self._closing = False 160 | self._closed = False 161 | self._metrics = CalculateMetrics() 162 | 163 | @property 164 | def dsn(self) -> List[Dsn]: 165 | return self._dsn 166 | 167 | @property 168 | def refresh_delay(self): 169 | return self._refresh_delay 170 | 171 | @property 172 | def refresh_timeout(self): 173 | return self._refresh_timeout 174 | 175 | @property 176 | def pool_factory_kwargs(self): 177 | return self._pool_factory_kwargs 178 | 179 | @property 180 | def master_pool_count(self): 181 | return len(self._master_pool_set) 182 | 183 | @property 184 | def replica_pool_count(self): 185 | return len(self._replica_pool_set) 186 | 187 | @property 188 | def available_pool_count(self): 189 | return self.master_pool_count + self.replica_pool_count 190 | 191 | @property 192 | def balancer(self) -> AbstractBalancerPolicy: 193 | return self._balancer 194 | 195 | @property 196 | def closing(self) -> bool: 197 | return self._closing 198 | 199 | @property 200 | def closed(self) -> bool: 201 | return self._closed 202 | 203 | @property 204 | def pools(self) -> Sequence[Any]: 205 | return tuple(self._pools) 206 | 207 | @abstractmethod 208 | def get_pool_freesize(self, pool): 209 | pass 210 | 211 | @abstractmethod 212 | def acquire_from_pool(self, pool, **kwargs): 213 | pass 214 | 215 | @abstractmethod 216 | async def release_to_pool(self, connection, pool, **kwargs): 217 | pass 218 | 219 | @abstractmethod 220 | async def _is_master(self, connection): 221 | pass 222 | 223 | @abstractmethod 224 | async def _pool_factory(self, dsn: Dsn): 225 | pass 226 | 227 | @abstractmethod 228 | async def _close(self, pool): 229 | pass 230 | 231 | @abstractmethod 232 | async def _terminate(self, pool) -> None: 233 | pass 234 | 235 | @abstractmethod 236 | def is_connection_closed(self, connection): 237 | pass 238 | 239 | @abstractmethod 240 | def host(self, pool: Any): 241 | pass 242 | 243 | @abstractmethod 244 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 245 | pass 246 | 247 | def metrics(self) -> Metrics: 248 | return Metrics( 249 | drivers=self._driver_metrics(), 250 | hasql=self._metrics.metrics(), 251 | ) 252 | 253 | def acquire( 254 | self, 255 | read_only: bool = False, 256 | fallback_master: Optional[bool] = None, 257 | master_as_replica_weight: Optional[float] = None, 258 | timeout: Optional[float] = None, 259 | **kwargs, 260 | ): 261 | if fallback_master is None: 262 | fallback_master = self._fallback_master 263 | 264 | if not read_only and master_as_replica_weight is not None: 265 | raise ValueError( 266 | "Field master_as_replica_weight is used only when " 267 | "read_only is True", 268 | ) 269 | if ( 270 | master_as_replica_weight is not None and 271 | not (0. <= master_as_replica_weight <= 1) 272 | ): 273 | raise ValueError( 274 | "Field master_as_replica_weight must belong " 275 | "to the segment [0; 1]", 276 | ) 277 | 278 | if read_only: 279 | if master_as_replica_weight is None: 280 | master_as_replica_weight = self._master_as_replica_weight 281 | 282 | if timeout is None: 283 | timeout = self._acquire_timeout 284 | 285 | ctx = PoolAcquireContext( 286 | pool_manager=self, 287 | read_only=read_only, 288 | fallback_master=fallback_master, 289 | master_as_replica_weight=master_as_replica_weight, 290 | timeout=timeout, 291 | metrics=self._metrics, 292 | **kwargs, 293 | ) 294 | 295 | return ctx 296 | 297 | def acquire_master( 298 | self, timeout: Optional[float] = None, **kwargs, 299 | ): 300 | return self.acquire(read_only=False, timeout=timeout, **kwargs) 301 | 302 | def acquire_replica( 303 | self, 304 | fallback_master: Optional[bool] = None, 305 | master_as_replica_weight: Optional[float] = None, 306 | timeout: Optional[float] = None, 307 | **kwargs, 308 | ): 309 | return self.acquire( 310 | read_only=True, 311 | fallback_master=fallback_master, 312 | master_as_replica_weight=master_as_replica_weight, 313 | timeout=timeout, 314 | **kwargs, 315 | ) 316 | 317 | async def release(self, connection, **kwargs): 318 | if connection not in self._unmanaged_connections: 319 | raise ValueError( 320 | "Pool.release() received invalid connection: " 321 | f"{connection!r} is not a member of this pool", 322 | ) 323 | 324 | pool = self._unmanaged_connections.pop(connection) 325 | self._metrics.remove_connection(self.host(pool)) 326 | await self.release_to_pool(connection, pool, **kwargs) 327 | 328 | async def close(self): 329 | self._closing = True 330 | await self._clear() 331 | await asyncio.gather( 332 | *[self._close(pool) for pool in self._pools if pool is not None], 333 | return_exceptions=True, 334 | ) 335 | self._closing = False 336 | self._closed = True 337 | 338 | async def terminate(self): 339 | self._closing = True 340 | await self._clear() 341 | for pool in self._pools: 342 | if pool is None: 343 | continue 344 | await self._terminate(pool) 345 | self._closing = False 346 | self._closed = True 347 | 348 | async def wait_next_pool_check(self, timeout: int = 10): 349 | tasks = [self._wait_checking_pool(dsn) for dsn in self._dsn] 350 | await asyncio.wait_for(asyncio.gather(*tasks), timeout=timeout) 351 | 352 | async def _wait_checking_pool(self, dsn: Dsn): 353 | async with self._dsn_check_cond[dsn]: 354 | for _ in range(2): 355 | await self._dsn_check_cond[dsn].wait() 356 | 357 | async def ready( 358 | self, 359 | masters_count: Optional[int] = None, 360 | replicas_count: Optional[int] = None, 361 | timeout: int = 10, 362 | ): 363 | 364 | if ( 365 | (masters_count is not None and replicas_count is None) or 366 | (masters_count is None and replicas_count is not None) 367 | ): 368 | raise ValueError( 369 | "Arguments master_count and replicas_count " 370 | "should both be either None or not None", 371 | ) 372 | 373 | if masters_count is not None and masters_count < 0: 374 | raise ValueError("masters_count shouldn't be negative") 375 | if replicas_count is not None and replicas_count < 0: 376 | raise ValueError("replicas_count shouldn't be negative") 377 | 378 | if masters_count is None and replicas_count is None: 379 | await asyncio.wait_for(self.wait_all_ready(), timeout=timeout) 380 | return 381 | 382 | assert isinstance(masters_count, int) 383 | assert isinstance(replicas_count, int) 384 | 385 | await asyncio.wait_for( 386 | asyncio.gather( 387 | self.wait_masters_ready(masters_count), 388 | self.wait_replicas_ready(replicas_count), 389 | ), timeout=timeout, 390 | ) 391 | 392 | async def wait_all_ready(self): 393 | for dsn in self._dsn: 394 | await self._dsn_ready_event[dsn].wait() 395 | 396 | async def wait_masters_ready(self, masters_count: int): 397 | def predicate(): 398 | return self.master_pool_count >= masters_count 399 | 400 | async with self._master_cond: 401 | await self._master_cond.wait_for(predicate) 402 | 403 | async def wait_replicas_ready(self, replicas_count: int): 404 | def predicate(): 405 | return self.replica_pool_count >= replicas_count 406 | 407 | async with self._replica_cond: 408 | await self._replica_cond.wait_for(predicate) 409 | 410 | async def get_master_pools(self) -> List: 411 | if not self._master_pool_set: 412 | async with self._master_cond: 413 | await self._master_cond.wait() 414 | return list(self._master_pool_set) 415 | 416 | async def get_replica_pools(self, fallback_master: bool = False) -> List: 417 | if not self._replica_pool_set: 418 | if fallback_master: 419 | return await self.get_master_pools() 420 | async with self._replica_cond: 421 | await self._replica_cond.wait() 422 | return list(self._replica_pool_set) 423 | 424 | def pool_is_master(self, pool) -> bool: 425 | return pool in self._master_pool_set 426 | 427 | def pool_is_replica(self, pool) -> bool: 428 | return pool in self._replica_pool_set 429 | 430 | def register_connection(self, connection, pool): 431 | self._unmanaged_connections[connection] = pool 432 | 433 | def get_last_response_time(self, pool) -> Optional[float]: 434 | return self._stopwatch.get_time(pool) 435 | 436 | def _prepare_pool_factory_kwargs(self, kwargs: dict) -> dict: 437 | return kwargs 438 | 439 | async def _clear(self): 440 | self._balancer = None 441 | if self._refresh_role_tasks is not None: 442 | for refresh_role_task in self._refresh_role_tasks: 443 | refresh_role_task.cancel() 444 | 445 | await asyncio.gather( 446 | *self._refresh_role_tasks, 447 | return_exceptions=True, 448 | ) 449 | 450 | self._refresh_role_tasks = None 451 | 452 | release_tasks = [] 453 | for connection in self._unmanaged_connections: 454 | release_tasks.append(self.release(connection)) 455 | 456 | await asyncio.gather(*release_tasks, return_exceptions=True) 457 | 458 | self._unmanaged_connections.clear() 459 | self._master_pool_set.clear() 460 | self._replica_pool_set.clear() 461 | 462 | async def _check_pool_task(self, index: int): 463 | logger.debug("Starting pool task") 464 | dsn = self._dsn[index] 465 | censored_dsn = str(dsn.with_(password="******")) 466 | pool = await self._wait_creating_pool(dsn) 467 | self._pools[index] = pool 468 | 469 | logger.debug("Setting dsn=%r event", censored_dsn) 470 | sys_connection = None 471 | while not self._closing: 472 | try: 473 | # Не использовать async with self.acquire_from_pool(pool) 474 | # из-за большого таймаута 475 | logger.debug( 476 | "Acquiring connection for checking dsn=%r", censored_dsn, 477 | ) 478 | sys_connection = await asyncio.wait_for( 479 | self.acquire_from_pool(pool), timeout=self._refresh_timeout, 480 | ) 481 | 482 | logger.debug("Checking dsn=%r", censored_dsn) 483 | await self._periodic_pool_check(pool, dsn, sys_connection) 484 | except asyncio.TimeoutError: 485 | logger.warning( 486 | "Creating system connection failed for dsn=%r", 487 | censored_dsn, 488 | ) 489 | except asyncio.CancelledError as cancelled_error: 490 | if self._closing: 491 | raise cancelled_error from None 492 | logger.warning( 493 | "Cancelled error for dsn=%r", 494 | censored_dsn, 495 | exc_info=True, 496 | ) 497 | self._remove_pool_from_master_set(pool, dsn) 498 | self._remove_pool_from_replica_set(pool, dsn) 499 | except Exception: 500 | logger.warning( 501 | "Database is not available with exception for dsn=%r", 502 | censored_dsn, 503 | exc_info=True, 504 | ) 505 | self._remove_pool_from_master_set(pool, dsn) 506 | self._remove_pool_from_replica_set(pool, dsn) 507 | finally: 508 | if sys_connection is not None: 509 | try: 510 | await self.release_to_pool(sys_connection, pool) 511 | except (Exception, asyncio.CancelledError): 512 | logger.warning( 513 | "Release connection to pool with " 514 | "exception for dsn=%r", 515 | censored_dsn, 516 | exc_info=True, 517 | ) 518 | except asyncio.CancelledError as cancelled_error: 519 | if self._closing: 520 | raise cancelled_error from None 521 | logger.warning( 522 | "Release connection to pool with " 523 | "Cancelled error for dsn=%r", 524 | censored_dsn, 525 | exc_info=True, 526 | ) 527 | sys_connection = None 528 | await self._notify_about_pool_has_checked(dsn) 529 | 530 | await asyncio.sleep(self._refresh_delay) 531 | 532 | async def _wait_creating_pool(self, dsn: Dsn): 533 | while not self._closing: 534 | try: 535 | return await asyncio.wait_for( 536 | self._pool_factory(dsn), 537 | timeout=self._refresh_timeout, 538 | ) 539 | except Exception: 540 | logger.warning( 541 | "Creating pool failed with exception for dsn=%s", 542 | dsn.with_(password="******"), 543 | exc_info=True, 544 | ) 545 | await asyncio.sleep(self._refresh_delay) 546 | 547 | async def _periodic_pool_check(self, pool, dsn: Dsn, sys_connection): 548 | while not self._closing: 549 | try: 550 | await asyncio.wait_for( 551 | self._refresh_pool_role(pool, dsn, sys_connection), 552 | timeout=self._refresh_timeout, 553 | ) 554 | await self._notify_about_pool_has_checked(dsn) 555 | except asyncio.TimeoutError: 556 | logger.warning( 557 | "Periodic pool check failed for dsn=%s", 558 | dsn.with_(password="******"), 559 | ) 560 | self._remove_pool_from_master_set(pool, dsn) 561 | self._remove_pool_from_replica_set(pool, dsn) 562 | await self._notify_about_pool_has_checked(dsn) 563 | 564 | await asyncio.sleep(self._refresh_delay) 565 | 566 | async def _notify_about_pool_has_checked(self, dsn: Dsn): 567 | async with self._dsn_check_cond[dsn]: 568 | self._dsn_check_cond[dsn].notify_all() 569 | 570 | async def _add_pool_to_master_set(self, pool, dsn: Dsn): 571 | if pool in self._master_pool_set: 572 | return 573 | self._master_pool_set.add(pool) 574 | logger.debug( 575 | "Pool %s has been added to master set", 576 | dsn.with_(password="******"), 577 | ) 578 | async with self._master_cond: 579 | self._master_cond.notify_all() 580 | 581 | async def _add_pool_to_replica_set(self, pool, dsn: Dsn): 582 | if pool in self._replica_pool_set: 583 | return 584 | self._replica_pool_set.add(pool) 585 | logger.debug( 586 | "Pool %s has been added to replica set", 587 | dsn.with_(password="******"), 588 | ) 589 | async with self._replica_cond: 590 | self._replica_cond.notify_all() 591 | 592 | def _remove_pool_from_master_set(self, pool, dsn: Dsn): 593 | if pool in self._master_pool_set: 594 | self._master_pool_set.remove(pool) 595 | logger.debug( 596 | "Pool %s has been removed from master set", 597 | dsn.with_(password="******"), 598 | ) 599 | 600 | def _remove_pool_from_replica_set(self, pool, dsn: Dsn): 601 | if pool in self._replica_pool_set: 602 | self._replica_pool_set.remove(pool) 603 | logger.debug( 604 | "Pool %s has been removed from replica set", 605 | dsn.with_(password="******"), 606 | ) 607 | 608 | async def _refresh_pool_role(self, pool, dsn: Dsn, sys_connection): 609 | with self._stopwatch(pool): 610 | is_master = await self._is_master(sys_connection) 611 | if is_master: 612 | await self._add_pool_to_master_set(pool, dsn) 613 | self._remove_pool_from_replica_set(pool, dsn) 614 | else: 615 | await self._add_pool_to_replica_set(pool, dsn) 616 | self._remove_pool_from_master_set(pool, dsn) 617 | self._dsn_ready_event[dsn].set() 618 | 619 | def __iter__(self): 620 | return chain(iter(self._master_pool_set), iter(self._replica_pool_set)) 621 | 622 | async def __aenter__(self): 623 | await self.ready() 624 | return self 625 | 626 | async def __aexit__(self, exc_type, exc_val, exc_tb): 627 | await self.close() 628 | 629 | 630 | __all__ = ("BasePoolManager", "AbstractBalancerPolicy") 631 | -------------------------------------------------------------------------------- /hasql/metrics.py: -------------------------------------------------------------------------------- 1 | import time 2 | from collections import defaultdict 3 | from contextlib import contextmanager 4 | from dataclasses import dataclass, field 5 | from typing import Dict, Sequence 6 | 7 | 8 | @dataclass(frozen=True) 9 | class DriverMetrics: 10 | max: int 11 | min: int 12 | idle: int 13 | used: int 14 | host: str 15 | 16 | 17 | @dataclass(frozen=True) 18 | class HasqlMetrics: 19 | pool: int 20 | pool_time: float 21 | acquire: Dict[str, int] 22 | acquire_time: Dict[str, float] 23 | add_connections: Dict[str, int] 24 | remove_connections: Dict[str, int] 25 | 26 | 27 | @dataclass 28 | class CalculateMetrics: 29 | _pool: int = 0 30 | _pool_time: float = 0. 31 | _acquire: Dict[str, int] = field(default_factory=lambda: defaultdict(int)) 32 | _acquire_time: Dict[str, float] = field( 33 | default_factory=lambda: defaultdict(int) 34 | ) 35 | _add_connections: Dict[str, int] = field(default_factory=dict) 36 | _remove_connections: Dict[str, int] = field(default_factory=dict) 37 | 38 | def metrics(self) -> HasqlMetrics: 39 | return HasqlMetrics( 40 | pool=self._pool, 41 | pool_time=self._pool_time, 42 | acquire=self._acquire, 43 | acquire_time=self._acquire_time, 44 | add_connections=self._add_connections, 45 | remove_connections=self._remove_connections, 46 | ) 47 | 48 | @contextmanager 49 | def with_get_pool(self): 50 | self._pool += 1 51 | tt = time.monotonic() 52 | yield 53 | self._pool_time += time.monotonic() - tt 54 | 55 | @contextmanager 56 | def with_acquire(self, pool: str): 57 | self._acquire[pool] += 1 58 | tt = time.monotonic() 59 | yield 60 | self._acquire_time[pool] += time.monotonic() - tt 61 | 62 | def add_connection(self, dsn: str): 63 | self._add_connections[dsn] = ( 64 | self._add_connections.get(dsn, 0) + 1 65 | ) 66 | 67 | def remove_connection(self, dsn: str): 68 | self._remove_connections[dsn] = ( 69 | self._remove_connections.get(dsn, 0) + 1 70 | ) 71 | 72 | 73 | @dataclass(frozen=True) 74 | class Metrics: 75 | drivers: Sequence[DriverMetrics] 76 | hasql: HasqlMetrics 77 | -------------------------------------------------------------------------------- /hasql/psycopg3.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Sequence 2 | 3 | from psycopg import AsyncConnection, errors 4 | from psycopg.conninfo import conninfo_to_dict 5 | from psycopg_pool import AsyncConnectionPool 6 | 7 | from .base import BasePoolManager 8 | from .metrics import DriverMetrics 9 | from .utils import Dsn 10 | 11 | 12 | class PoolAcquireContext: 13 | __slots__ = ("timeout", "connection", "done", "pool") 14 | 15 | def __init__( 16 | self, 17 | pool: AsyncConnectionPool, 18 | timeout: Optional[float] = None, 19 | ): 20 | self.pool = pool 21 | self.timeout = timeout 22 | self.connection = None 23 | self.done = False 24 | 25 | async def __aenter__(self): 26 | if self.connection is not None or self.done: 27 | raise errors.InterfaceError("a connection is already acquired") 28 | self.connection = await self.pool.getconn(self.timeout) 29 | return self.connection 30 | 31 | async def __aexit__(self, *exc): 32 | self.done = True 33 | con = self.connection 34 | self.connection = None 35 | await self.pool.putconn(con) 36 | 37 | def __await__(self): 38 | return self.pool.getconn(self.timeout).__await__() 39 | 40 | 41 | class PoolManager(BasePoolManager): 42 | pools: Sequence[AsyncConnectionPool] 43 | 44 | def __init__(self, dsn: str, **kwargs): 45 | pool_factory_kwargs = kwargs.pop("pool_factory_kwargs", {}) 46 | pool_factory_kwargs["max_waiting"] = -1 47 | super().__init__( 48 | dsn, 49 | pool_factory_kwargs=pool_factory_kwargs, **kwargs 50 | ) 51 | 52 | def get_pool_freesize(self, pool: AsyncConnectionPool): 53 | return pool.get_stats()["pool_available"] 54 | 55 | def acquire_from_pool(self, pool: AsyncConnectionPool, **kwargs): 56 | return PoolAcquireContext(pool, **kwargs) 57 | 58 | async def release_to_pool( 59 | self, 60 | connection: AsyncConnection, 61 | pool: AsyncConnectionPool, 62 | **kwargs 63 | ): 64 | return await pool.putconn(connection) 65 | 66 | async def _is_master(self, connection: AsyncConnection): 67 | async with connection.cursor() as cur: 68 | await cur.execute("SHOW transaction_read_only") 69 | return (await cur.fetchone())[0] == "off" # type: ignore 70 | 71 | async def _pool_factory(self, dsn: Dsn) -> AsyncConnectionPool: 72 | pool = AsyncConnectionPool( 73 | str(dsn), **self.pool_factory_kwargs 74 | ) 75 | await pool.wait() 76 | return pool 77 | 78 | def _prepare_pool_factory_kwargs(self, kwargs: dict) -> dict: 79 | kwargs["min_size"] = kwargs.get("min_size", 1) + 1 80 | kwargs["max_size"] = kwargs.get("max_size", 10) + 1 81 | return kwargs 82 | 83 | async def _close(self, pool: AsyncConnectionPool): 84 | await pool.close() 85 | 86 | async def _terminate(self, pool: AsyncConnectionPool): 87 | pass 88 | 89 | def is_connection_closed(self, connection): 90 | return connection.closed 91 | 92 | def host(self, pool: AsyncConnectionPool): 93 | return conninfo_to_dict(pool.conninfo)["host"] 94 | 95 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 96 | stats = [ 97 | { 98 | **p.get_stats(), 99 | "host": self.host(p) 100 | } 101 | for p in self.pools 102 | if p 103 | ] 104 | return [ 105 | DriverMetrics( 106 | min=stat["pool_min"], 107 | max=stat["pool_max"], 108 | idle=stat["pool_available"], 109 | used=stat["pool_size"], 110 | host=stat["host"], 111 | ) for stat in stats 112 | ] 113 | 114 | 115 | __all__ = ("PoolManager",) 116 | -------------------------------------------------------------------------------- /hasql/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiokitchen/hasql/04574ca482a79188c197812b594a9208926cbeca/hasql/py.typed -------------------------------------------------------------------------------- /hasql/utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | import re 3 | import statistics 4 | import time 5 | from collections import defaultdict, deque 6 | from contextlib import contextmanager 7 | from typing import ( 8 | Any, DefaultDict, Deque, Dict, Generator, Iterable, List, Optional, Tuple, 9 | Union, 10 | ) 11 | from urllib.parse import unquote, urlencode 12 | 13 | 14 | def host_is_ipv6_address(netloc: str) -> bool: 15 | return netloc.count(":") > 1 16 | 17 | 18 | class Dsn: 19 | __slots__ = ( 20 | "_netloc", "_user", "_password", "_dbname", "_kwargs", 21 | "_scheme", "_compiled_dsn", 22 | ) 23 | 24 | URL_EXP = re.compile( 25 | r"^(?P[^\:]+):\/\/" 26 | r"((((?P[^:^@]+))?" 27 | r"((\:(?P[^@]+)?))?\@)?" 28 | r"(?P([^\/^\?]+|\[([^\/]+)\])))?" 29 | r"(((?P\/[^\?]*)?" 30 | r"(\?(?P[^\#]+)?)?" 31 | r"(\#(?P.*))?)?)?$", 32 | ) 33 | 34 | def __init__( 35 | self, 36 | netloc: str, 37 | user: Optional[str] = None, 38 | password: Optional[str] = None, 39 | dbname: Optional[str] = None, 40 | scheme: str = "postgresql", 41 | **kwargs: Any, 42 | ): 43 | self._netloc = netloc 44 | self._user = user 45 | self._password = password 46 | self._dbname = dbname 47 | self._kwargs = kwargs 48 | self._scheme = scheme 49 | self._compiled_dsn = self._compile_dsn() 50 | 51 | @classmethod 52 | def parse(cls, dsn: str) -> "Dsn": 53 | match = cls.URL_EXP.match(dsn) 54 | 55 | if match is None: 56 | raise ValueError("Bad DSN") 57 | 58 | groupdict = match.groupdict() 59 | scheme = groupdict["scheme"] 60 | user = groupdict.get("user") 61 | password = groupdict.get("password") 62 | netloc: str = groupdict["netloc"] 63 | dbname = (groupdict.get("path") or "").lstrip("/") 64 | query = groupdict.get("query") or "" 65 | 66 | params = {} 67 | for item in query.split("&"): 68 | if not item: 69 | continue 70 | key, value = item.split("=", 1) 71 | params[key] = unquote(value) 72 | 73 | return cls( 74 | scheme=scheme, 75 | netloc=netloc, 76 | user=user, 77 | password=password, 78 | dbname=dbname, 79 | **params 80 | ) 81 | 82 | def _compile_dsn(self) -> str: 83 | with io.StringIO() as fp: 84 | fp.write(self._scheme) 85 | fp.write("://") 86 | 87 | if self._user is not None: 88 | fp.write(self._user) 89 | 90 | if self._password is not None: 91 | fp.write(":") 92 | fp.write(self._password) 93 | 94 | if self._user is not None or self._password is not None: 95 | fp.write("@") 96 | 97 | fp.write(self._netloc) 98 | 99 | if self._dbname is not None: 100 | fp.write("/") 101 | fp.write(self._dbname) 102 | 103 | if self._kwargs: 104 | fp.write("?") 105 | fp.write(urlencode(self._kwargs, safe="/~.\"'")) 106 | 107 | return fp.getvalue() 108 | 109 | def with_( 110 | self, 111 | netloc: Optional[str] = None, 112 | user: Optional[str] = None, 113 | password: Optional[str] = None, 114 | dbname: Optional[str] = None, 115 | ) -> "Dsn": 116 | params = { 117 | "netloc": netloc if netloc is not None else self._netloc, 118 | "user": user if user is not None else self._user, 119 | "password": password if password is not None else self._password, 120 | "dbname": dbname if dbname is not None else self._dbname, 121 | **self._kwargs, 122 | } 123 | return self.__class__(**params) 124 | 125 | def __str__(self) -> str: 126 | return self._compiled_dsn 127 | 128 | def __eq__(self, other: Any) -> bool: 129 | return str(self) == str(other) 130 | 131 | def __hash__(self) -> int: 132 | return hash(str(self)) 133 | 134 | @property 135 | def netloc(self) -> str: 136 | return self._netloc 137 | 138 | @property 139 | def user(self) -> Optional[str]: 140 | return self._user 141 | 142 | @property 143 | def password(self) -> Optional[str]: 144 | return self._password 145 | 146 | @property 147 | def dbname(self) -> Optional[str]: 148 | return self._dbname 149 | 150 | @property 151 | def params(self) -> Dict[str, str]: 152 | return self._kwargs 153 | 154 | @property 155 | def scheme(self) -> str: 156 | return self._scheme 157 | 158 | @property 159 | def compiled_dsn(self) -> str: 160 | return self._compiled_dsn 161 | 162 | 163 | def split_dsn(dsn: Union[Dsn, str], default_port: int = 5432) -> List[Dsn]: 164 | if not isinstance(dsn, Dsn): 165 | dsn = Dsn.parse(dsn) 166 | 167 | host_port_pairs: List[Tuple[str, Optional[int]]] = [] 168 | port_count = 0 169 | port: Optional[int] 170 | for host in dsn.netloc.split(","): 171 | if ":" in host: 172 | host, port_str = host.rsplit(":", 1) 173 | port = int(port_str) 174 | port_count += 1 175 | else: 176 | host = host 177 | port = None 178 | host_port_pairs.append((host, port)) 179 | 180 | def deduplicate(dsns: Iterable[Dsn]) -> List[Dsn]: 181 | cache = set() 182 | result = [] 183 | for dsn in dsns: 184 | if dsn in cache: 185 | continue 186 | result.append(dsn) 187 | cache.add(dsn) 188 | return result 189 | 190 | if port_count == len(host_port_pairs): 191 | return deduplicate( 192 | dsn.with_(netloc=f"{host}:{port}") 193 | for host, port in host_port_pairs 194 | ) 195 | 196 | if port_count == 1 and host_port_pairs[-1][1] is not None: 197 | port = host_port_pairs[-1][1] 198 | return deduplicate( 199 | dsn.with_(netloc=f"{host}:{port}") 200 | for host, _ in host_port_pairs 201 | ) 202 | 203 | return deduplicate( 204 | dsn.with_(netloc=f"{host}:{port or default_port}") 205 | for host, port in host_port_pairs 206 | ) 207 | 208 | 209 | class Stopwatch: 210 | def __init__(self, window_size: int): 211 | self._times: DefaultDict[Any, Deque] = defaultdict( 212 | lambda: deque(maxlen=window_size), 213 | ) 214 | self._cache: Dict[Any, Optional[int]] = {} 215 | 216 | def get_time(self, obj: Any) -> Optional[float]: 217 | if obj not in self._times: 218 | return None 219 | if self._cache.get(obj) is None: 220 | self._cache[obj] = statistics.median(self._times[obj]) 221 | return self._cache[obj] 222 | 223 | @contextmanager 224 | def __call__(self, obj: Any) -> Generator[None, None, None]: 225 | start_at = time.monotonic() 226 | yield 227 | self._times[obj].append(time.monotonic() - start_at) 228 | self._cache[obj] = None 229 | 230 | 231 | __all__ = ("Dsn", "split_dsn", "Stopwatch", "host_is_ipv6_address") 232 | -------------------------------------------------------------------------------- /pylama.ini: -------------------------------------------------------------------------------- 1 | [pylama] 2 | linters = mccabe,pycodestyle,pyflakes 3 | skip = venv/* 4 | 5 | [pylama:pycodestyle] 6 | max_line_length = 80 7 | show-pep8 = True 8 | show-source = True 9 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::DeprecationWarning 4 | python_classes = TestSuite* 5 | -------------------------------------------------------------------------------- /resources/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [mypy] 2 | check_untyped_defs = False 3 | disallow_any_generics = False 4 | disallow_incomplete_defs = False 5 | disallow_subclassing_any = False 6 | disallow_untyped_calls = False 7 | disallow_untyped_decorators = False 8 | disallow_untyped_defs = False 9 | follow_imports = silent 10 | no_implicit_reexport = True 11 | strict_optional = True 12 | warn_redundant_casts = True 13 | warn_unused_configs = True 14 | warn_unused_ignores = True 15 | 16 | [mypy-tests.*] 17 | ignore_errors = True 18 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from importlib.machinery import SourceFileLoader 3 | 4 | from setuptools import setup, find_packages 5 | 6 | module = SourceFileLoader( 7 | "version", os.path.join("hasql", "__init__.py") 8 | ).load_module() 9 | 10 | setup( 11 | name="hasql", 12 | version=module.__version__, 13 | author=module.__author__, 14 | author_email=module.authors_email, 15 | license=module.__license__, 16 | description=module.package_info, 17 | long_description=open("README.rst").read(), 18 | 19 | platforms="all", 20 | classifiers=[ 21 | "Development Status :: 5 - Production/Stable", 22 | "Framework :: AsyncIO", 23 | "Intended Audience :: Developers", 24 | "License :: OSI Approved :: Apache Software License", 25 | "Natural Language :: Russian", 26 | "Operating System :: MacOS", 27 | "Operating System :: Microsoft", 28 | "Operating System :: POSIX", 29 | "Programming Language :: Python :: 3", 30 | "Programming Language :: Python :: 3.8", 31 | "Programming Language :: Python :: 3.9", 32 | "Programming Language :: Python :: 3.10", 33 | "Programming Language :: Python :: 3.11", 34 | "Programming Language :: Python :: Implementation :: CPython", 35 | "Programming Language :: Python :: Implementation :: PyPy", 36 | "Programming Language :: Python", 37 | ], 38 | packages=find_packages(exclude=["tests", "example"]), 39 | package_data={'hasql': ['py.typed']}, 40 | install_requires=[], 41 | extras_require={ 42 | "aiopg": [ 43 | "aiopg" 44 | ], 45 | "aiopg_sa": [ 46 | "aiopg[sa]" 47 | ], 48 | "asyncpg": [ 49 | "asyncpg" 50 | ], 51 | "asyncpgsa": [ 52 | "asyncpgsa" 53 | ], 54 | "psycopg": [ 55 | "psycopg[pool]>=3,<4" 56 | ], 57 | "test": [ 58 | "async_timeout", 59 | "psycopg[pool]>=3.0,<4", 60 | "aiopg[sa]~=1.4.0", 61 | "asyncpg~=0.29.0", 62 | "pytest~=6.2.5", 63 | "pytest-cov~=3.0.0", 64 | "aiomisc~=15.2.4", 65 | "mock~=4.0.1", 66 | "sqlalchemy[asyncio]~=1.4.27", 67 | ], 68 | "develop": [ 69 | "async_timeout", 70 | "psycopg[pool]>=3.0,<4", 71 | "aiopg[sa]~=1.4.0", 72 | "asyncpg~=0.27.0", 73 | "pytest~=6.2.5", 74 | "pytest-cov~=3.0.0", 75 | "pylama~=7.7.1", 76 | "aiomisc~=15.2.4", 77 | "mock~=4.0.3", 78 | "sqlalchemy[asyncio]~=1.4.27", 79 | "black~=21.9b0", 80 | "tox~=3.24", 81 | "twine", 82 | "wheel", 83 | "types-psycopg2", 84 | ], 85 | }, 86 | project_urls={ 87 | "Source": "https://github.com/aiokitchen/hasql", 88 | "Tracker": "https://github.com/aiokitchen/hasql/issues", 89 | "Documentation": "https://github.com/aiokitchen/hasql/blob/master/README.rst", 90 | }, 91 | ) 92 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiokitchen/hasql/04574ca482a79188c197812b594a9208926cbeca/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | from contextlib import asynccontextmanager 4 | 5 | import aiomisc 6 | import pytest 7 | 8 | 9 | @pytest.fixture(autouse=True) 10 | def aiomisc_test_timeout(): 11 | return 5 12 | 13 | 14 | class UnavailableDbServer(aiomisc.service.TCPServer): 15 | async def handle_client( 16 | self, 17 | reader: asyncio.StreamReader, 18 | writer: asyncio.StreamWriter, 19 | ): 20 | while await reader.read(65534): 21 | pass 22 | writer.close() 23 | await writer.wait_closed() 24 | 25 | 26 | @pytest.fixture 27 | def db_server_port(aiomisc_unused_port_factory) -> int: 28 | return aiomisc_unused_port_factory() 29 | 30 | 31 | @pytest.fixture 32 | def services(db_server_port, localhost): 33 | return [] # [UnavailableDbServer(port=db_server_port, address=localhost)] 34 | 35 | 36 | @pytest.fixture(scope="session") 37 | def pg_dsn() -> str: 38 | return os.environ.get( 39 | "PG_DSN", 40 | "postgres://test:test@localhost:5432/test", 41 | ) 42 | 43 | 44 | @asynccontextmanager 45 | async def setup_aiopg(pg_dsn): 46 | from hasql.aiopg import PoolManager 47 | 48 | pool = PoolManager(dsn=pg_dsn, fallback_master=True) 49 | yield pool 50 | await pool.close() 51 | 52 | 53 | @asynccontextmanager 54 | async def setup_aiopgsa(pg_dsn): 55 | from hasql.aiopg_sa import PoolManager 56 | 57 | pool = PoolManager(dsn=pg_dsn, fallback_master=True) 58 | yield pool 59 | await pool.close() 60 | 61 | 62 | @asynccontextmanager 63 | async def setup_asyncpg(pg_dsn): 64 | from hasql.asyncpg import PoolManager 65 | 66 | pool = PoolManager(dsn=pg_dsn, fallback_master=True) 67 | yield pool 68 | await pool.close() 69 | 70 | 71 | @asynccontextmanager 72 | async def setup_asyncsqlalchemy(pg_dsn): 73 | from hasql.asyncsqlalchemy import PoolManager 74 | 75 | pool = PoolManager(dsn=pg_dsn, fallback_master=True) 76 | yield pool 77 | await pool.close() 78 | 79 | 80 | @asynccontextmanager 81 | async def setup_psycopg3(pg_dsn): 82 | from hasql.psycopg3 import PoolManager 83 | 84 | pool = PoolManager(dsn=pg_dsn, fallback_master=True) 85 | yield pool 86 | await pool.close() 87 | -------------------------------------------------------------------------------- /tests/mocks/__init__.py: -------------------------------------------------------------------------------- 1 | from .pool_manager import TestPoolManager 2 | 3 | 4 | __all__ = [ 5 | "TestPoolManager", 6 | ] 7 | -------------------------------------------------------------------------------- /tests/mocks/pool_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Any, Sequence 3 | 4 | import mock 5 | 6 | from hasql.base import BasePoolManager 7 | from hasql.metrics import DriverMetrics 8 | from hasql.utils import Dsn 9 | 10 | 11 | class TestConnection: 12 | def __init__(self, pool: "TestPool"): 13 | self._pool = pool 14 | self._is_closed = False 15 | self.close = mock.AsyncMock(side_effect=self.close) 16 | self.terminate = mock.Mock(side_effect=self.terminate) 17 | 18 | @property 19 | def is_closed(self): 20 | return self._is_closed 21 | 22 | async def is_master(self): 23 | if not self._pool.is_running: 24 | raise ConnectionRefusedError 25 | if self._pool.is_behind_firewall: 26 | await asyncio.sleep(100) 27 | return self._pool.is_master 28 | 29 | async def close(self): 30 | self._is_closed = True 31 | 32 | def terminate(self): 33 | self._is_closed = True 34 | 35 | 36 | class PoolAcquireContext: 37 | def __init__(self, pool): 38 | self.pool = pool 39 | self.connection = None 40 | 41 | async def acquire_connection(self): 42 | self.connection = await self.pool.free.get() 43 | self.pool.used.add(self.connection) 44 | return self.connection 45 | 46 | async def __aenter__(self): 47 | return await self.acquire_connection() 48 | 49 | async def __aexit__(self, *exc): 50 | self.pool.used.remove(self.connection) 51 | self.pool.free.put_nowait(self.connection) 52 | 53 | def __await__(self): 54 | return self.acquire_connection().__await__() 55 | 56 | 57 | class TestPool: 58 | def __init__(self, dsn: str, maxsize: int = 10): 59 | self.dsn = dsn 60 | self.is_master = dsn == "postgresql://test:test@master:5432/test" 61 | self.is_running = True 62 | self.is_behind_firewall = False 63 | self.used = set() 64 | self.free = asyncio.LifoQueue() 65 | self.connections = [TestConnection(self) for _ in range(maxsize)] 66 | for conn in self.connections: 67 | self.free.put_nowait(conn) 68 | 69 | @property 70 | def freesize(self): 71 | return self.free.qsize() 72 | 73 | def set_master(self, is_master: bool): 74 | self.is_master = is_master 75 | 76 | def behind_firewall(self, is_behind_firewall: bool): 77 | self.is_behind_firewall = is_behind_firewall 78 | 79 | def shutdown(self): 80 | self.is_running = False 81 | 82 | def startup(self): 83 | self.is_master = False 84 | self.is_running = True 85 | 86 | def acquire(self, **kwargs): 87 | return PoolAcquireContext(self) 88 | 89 | async def release(self, conn: TestConnection, **kwargs): 90 | self.used.remove(conn) 91 | await self.free.put(conn) 92 | 93 | async def close(self): 94 | for conn in self.connections: 95 | await conn.close() 96 | 97 | def terminate(self): 98 | for conn in self.connections: 99 | conn.terminate() 100 | 101 | 102 | class TestPoolManager(BasePoolManager): 103 | def get_pool_freesize(self, pool: TestPool): 104 | return pool.freesize 105 | 106 | def acquire_from_pool(self, pool: TestPool, **kwargs): 107 | return pool.acquire(**kwargs) 108 | 109 | async def release_to_pool( 110 | self, connection: TestConnection, pool: TestPool, **kwargs 111 | ): 112 | await pool.release(connection, **kwargs) 113 | 114 | async def _is_master(self, connection: TestConnection): 115 | return await connection.is_master() 116 | 117 | async def _pool_factory(self, dsn: Dsn): 118 | return TestPool(str(dsn)) 119 | 120 | async def _close(self, pool: TestPool): 121 | await pool.close() 122 | 123 | async def _terminate(self, pool: TestPool): 124 | loop = asyncio.get_running_loop() 125 | await loop.run_in_executor(None, pool.terminate) 126 | 127 | def is_connection_closed(self, connection: TestConnection): 128 | return connection.is_closed 129 | 130 | def metrics(self) -> Sequence[DriverMetrics]: 131 | return [] 132 | 133 | def host(self, pool: Any): 134 | return "test-host:5432" 135 | 136 | def _driver_metrics(self) -> Sequence[DriverMetrics]: 137 | return [] 138 | 139 | 140 | __all__ = ("TestPoolManager",) 141 | -------------------------------------------------------------------------------- /tests/test_aiopg.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import pytest 3 | from aiopg import Connection 4 | 5 | from hasql.aiopg import PoolManager 6 | from hasql.metrics import DriverMetrics 7 | 8 | 9 | @pytest.fixture 10 | async def pool_manager(pg_dsn): 11 | pg_pool = PoolManager( 12 | dsn=pg_dsn, 13 | fallback_master=True, 14 | pool_factory_kwargs={"minsize": 10, "maxsize": 10}, 15 | ) 16 | try: 17 | yield pg_pool 18 | finally: 19 | await pg_pool.close() 20 | 21 | 22 | async def test_acquire_with_context(pool_manager): 23 | async with pool_manager.acquire_master() as conn: 24 | assert isinstance(conn, Connection) 25 | async with conn.cursor() as cursor: 26 | await cursor.execute("SELECT 1") 27 | assert await cursor.fetchall() == [(1,)] 28 | 29 | 30 | async def test_acquire_without_context(pool_manager): 31 | conn = await pool_manager.acquire_master() 32 | assert isinstance(conn, Connection) 33 | async with conn.cursor() as cursor: 34 | await cursor.execute("SELECT 1") 35 | assert await cursor.fetchall() == [(1,)] 36 | 37 | 38 | async def test_close(pool_manager): 39 | aiopg_pool = await pool_manager.balancer.get_pool(read_only=False) 40 | await pool_manager.close() 41 | assert aiopg_pool.closed 42 | 43 | 44 | async def test_release(pool_manager): 45 | aiopg_pool = await pool_manager.balancer.get_pool(read_only=False) 46 | assert pool_manager.get_pool_freesize(aiopg_pool) == 10 47 | conn = await pool_manager.acquire_master() 48 | assert pool_manager.get_pool_freesize(aiopg_pool) == 9 49 | await pool_manager.release(conn) 50 | assert pool_manager.get_pool_freesize(aiopg_pool) == 10 51 | 52 | 53 | async def test_is_connection_closed(pool_manager): 54 | async with pool_manager.acquire_master() as conn: 55 | assert not pool_manager.is_connection_closed(conn) 56 | await conn.close() 57 | assert pool_manager.is_connection_closed(conn) 58 | 59 | 60 | async def test_driver_context_metrics(pool_manager, pg_dsn): 61 | async with pool_manager.acquire_master(): 62 | assert pool_manager.metrics().drivers == [ 63 | DriverMetrics(max=11, min=11, idle=9, used=2, host=mock.ANY) 64 | ] 65 | 66 | 67 | async def test_driver_metrics(pool_manager, pg_dsn): 68 | _ = await pool_manager.acquire_master() 69 | assert pool_manager.metrics().drivers == [ 70 | DriverMetrics(max=11, min=11, idle=9, used=2, host=mock.ANY) 71 | ] 72 | -------------------------------------------------------------------------------- /tests/test_aiopg_sa.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import pytest 3 | from aiopg.sa import SAConnection 4 | 5 | from hasql.aiopg_sa import PoolManager 6 | from hasql.metrics import DriverMetrics 7 | 8 | 9 | @pytest.fixture 10 | async def pool_manager(pg_dsn): 11 | pg_pool = PoolManager(dsn=pg_dsn, fallback_master=True) 12 | try: 13 | await pg_pool.ready() 14 | yield pg_pool 15 | finally: 16 | await pg_pool.close() 17 | 18 | 19 | async def test_acquire_with_context(pool_manager): 20 | async with pool_manager.acquire_master() as conn: 21 | assert isinstance(conn, SAConnection) 22 | cursor = await conn.execute("SELECT 1") 23 | assert await cursor.fetchall() == [(1,)] 24 | 25 | 26 | async def test_acquire_without_context(pool_manager): 27 | conn = await pool_manager.acquire_master() 28 | assert isinstance(conn, SAConnection) 29 | cursor = await conn.execute("SELECT 1") 30 | assert await cursor.fetchall() == [(1,)] 31 | 32 | 33 | async def test_metrics(pool_manager): 34 | async with pool_manager.acquire_master(): 35 | assert pool_manager.metrics().drivers == [ 36 | DriverMetrics(max=11, min=2, idle=0, used=2, host=mock.ANY) 37 | ] 38 | -------------------------------------------------------------------------------- /tests/test_asyncpg.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import pytest 3 | from asyncpg import Connection 4 | 5 | from hasql.asyncpg import PoolManager 6 | from hasql.metrics import DriverMetrics 7 | 8 | 9 | @pytest.fixture 10 | async def pool_manager(pg_dsn): 11 | pg_pool = PoolManager( 12 | dsn=pg_dsn, 13 | fallback_master=True, 14 | pool_factory_kwargs={"min_size": 10, "max_size": 10}, 15 | ) 16 | try: 17 | await pg_pool.ready() 18 | yield pg_pool 19 | finally: 20 | await pg_pool.close() 21 | 22 | 23 | async def test_acquire_with_context(pool_manager): 24 | async with pool_manager.acquire_master() as conn: 25 | assert isinstance(conn, Connection) 26 | assert await conn.fetch("SELECT 1") == [(1,)] 27 | 28 | 29 | async def test_acquire_without_context(pool_manager): 30 | conn = await pool_manager.acquire_master() 31 | assert isinstance(conn, Connection) 32 | assert await conn.fetch("SELECT 1") == [(1,)] 33 | 34 | 35 | async def test_close(pool_manager): 36 | asyncpg_pool = await pool_manager.balancer.get_pool(read_only=False) 37 | await pool_manager.close() 38 | assert asyncpg_pool._closed 39 | 40 | 41 | async def test_terminate(pool_manager): 42 | asyncpg_pool = await pool_manager.balancer.get_pool(read_only=False) 43 | await pool_manager.terminate() 44 | assert asyncpg_pool._closed 45 | 46 | 47 | async def test_release(pool_manager): 48 | asyncpg_pool = await pool_manager.balancer.get_pool(read_only=False) 49 | assert pool_manager.get_pool_freesize(asyncpg_pool) == 10 50 | conn = await pool_manager.acquire_master() 51 | assert pool_manager.get_pool_freesize(asyncpg_pool) == 9 52 | await pool_manager.release(conn) 53 | assert pool_manager.get_pool_freesize(asyncpg_pool) == 10 54 | 55 | 56 | async def test_metrics(pool_manager): 57 | async with pool_manager.acquire_master(): 58 | assert pool_manager.metrics().drivers == [ 59 | DriverMetrics(max=11, min=11, idle=9, used=2, host=mock.ANY) 60 | ] 61 | -------------------------------------------------------------------------------- /tests/test_asyncsqlalchemy.py: -------------------------------------------------------------------------------- 1 | import mock 2 | import pytest 3 | import sqlalchemy as sa 4 | from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine 5 | 6 | from hasql.asyncsqlalchemy import PoolManager 7 | from hasql.metrics import DriverMetrics 8 | 9 | 10 | @pytest.fixture 11 | async def pool_manager(pg_dsn): 12 | pg_pool = PoolManager( 13 | dsn=pg_dsn, 14 | fallback_master=True, 15 | pool_factory_kwargs={"pool_size": 10}, 16 | ) 17 | try: 18 | await pg_pool.ready() 19 | yield pg_pool 20 | pass 21 | finally: 22 | await pg_pool.close() 23 | 24 | 25 | async def test_acquire_with_context(pool_manager): 26 | async with pool_manager.acquire_master() as conn: 27 | assert isinstance(conn, AsyncConnection) 28 | assert await conn.scalar(sa.text("SELECT 1")) == 1 29 | 30 | 31 | async def test_acquire_without_context(pool_manager): 32 | conn = await pool_manager.acquire_master() 33 | assert isinstance(conn, AsyncConnection) 34 | assert await conn.scalar(sa.text("SELECT 1")) == 1 35 | 36 | 37 | async def test_close(pool_manager): 38 | sqlalchemy_pool: AsyncEngine = await pool_manager.balancer.get_pool( 39 | read_only=False, 40 | ) 41 | assert sqlalchemy_pool.sync_engine.pool.checkedout() > 0 42 | await pool_manager.close() 43 | assert sqlalchemy_pool.sync_engine.pool.checkedout() == 0 44 | 45 | 46 | async def test_terminate(pool_manager): 47 | sqlalchemy_pool: AsyncEngine = await pool_manager.balancer.get_pool( 48 | read_only=False, 49 | ) 50 | assert sqlalchemy_pool.sync_engine.pool.overflow() == -10 51 | await pool_manager.terminate() 52 | assert sqlalchemy_pool.sync_engine.pool.overflow() == -11 53 | 54 | 55 | async def test_release(pool_manager): 56 | sqlalchemy_pool = await pool_manager.balancer.get_pool(read_only=False) 57 | assert pool_manager.get_pool_freesize(sqlalchemy_pool) == 10 58 | conn = await pool_manager.acquire_master() 59 | assert pool_manager.get_pool_freesize(sqlalchemy_pool) == 9 60 | await pool_manager.release(conn) 61 | assert pool_manager.get_pool_freesize(sqlalchemy_pool) == 10 62 | 63 | 64 | async def test_is_connection_closed(pool_manager): 65 | async with pool_manager.acquire_master() as conn: 66 | assert not pool_manager.is_connection_closed(conn) 67 | await conn.close() 68 | assert pool_manager.is_connection_closed(conn) 69 | 70 | 71 | async def test_metrics(pool_manager): 72 | async with pool_manager.acquire_master(): 73 | assert pool_manager.metrics().drivers == [ 74 | DriverMetrics(max=11, min=0, idle=0, used=2, host=mock.ANY) 75 | ] 76 | -------------------------------------------------------------------------------- /tests/test_balancer_policy.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | from async_timeout import timeout 5 | 6 | from hasql.balancer_policy import ( 7 | GreedyBalancerPolicy, 8 | RandomWeightedBalancerPolicy, 9 | RoundRobinBalancerPolicy, 10 | ) 11 | from tests.mocks import TestPoolManager 12 | 13 | balancer_policies = pytest.mark.parametrize( 14 | "balancer_policy", 15 | [ 16 | GreedyBalancerPolicy, 17 | RandomWeightedBalancerPolicy, 18 | RoundRobinBalancerPolicy, 19 | ], 20 | ) 21 | 22 | 23 | @pytest.fixture 24 | def make_dsn(): 25 | def make(replicas_count: int): 26 | dsn = "postgresql://test:test@master:5432" 27 | replica_hosts = [f"replica{i}" for i in range(1, replicas_count + 1)] 28 | if replica_hosts: 29 | dsn += "," + ",".join(replica_hosts) 30 | return dsn + "/test" 31 | 32 | return make 33 | 34 | 35 | @pytest.fixture 36 | def make_pool_manager(make_dsn): 37 | async def make(balancer_policy, replicas_count: int = 2): 38 | pool_manager = TestPoolManager( 39 | dsn=make_dsn(replicas_count), 40 | balancer_policy=balancer_policy, 41 | refresh_timeout=0.2, 42 | refresh_delay=0.1, 43 | acquire_timeout=0.1, 44 | ) 45 | return pool_manager 46 | 47 | return make 48 | 49 | 50 | @balancer_policies 51 | async def test_acquire_master(make_pool_manager, balancer_policy): 52 | pool_manager = await make_pool_manager(balancer_policy) 53 | async with timeout(1): 54 | async with pool_manager.acquire_master() as conn: 55 | assert await conn.is_master() 56 | 57 | 58 | @balancer_policies 59 | async def test_acquire_replica(make_pool_manager, balancer_policy): 60 | pool_manager = await make_pool_manager(balancer_policy) 61 | async with timeout(1): 62 | async with pool_manager.acquire_replica() as conn: 63 | assert not await conn.is_master() 64 | 65 | 66 | @balancer_policies 67 | async def test_acquire_replica_with_fallback_master( 68 | make_pool_manager, 69 | balancer_policy, 70 | ): 71 | pool_manager = await make_pool_manager(balancer_policy, replicas_count=0) 72 | async with timeout(1): 73 | async with pool_manager.acquire_replica(fallback_master=True) as conn: 74 | assert await conn.is_master() 75 | 76 | 77 | @balancer_policies 78 | async def test_acquire_master_as_replica(make_pool_manager, balancer_policy): 79 | pool_manager = await make_pool_manager(balancer_policy, replicas_count=0) 80 | async with timeout(1): 81 | async with pool_manager.acquire_replica( 82 | master_as_replica_weight=1.0, 83 | ) as conn: 84 | assert await conn.is_master() 85 | 86 | 87 | @balancer_policies 88 | async def test_dont_acquire_master_as_replica( 89 | make_pool_manager, 90 | balancer_policy, 91 | ): 92 | pool_manager = await make_pool_manager(balancer_policy, replicas_count=0) 93 | with pytest.raises(asyncio.TimeoutError): 94 | async with pool_manager.acquire_replica(master_as_replica_weight=0.0): 95 | pass 96 | -------------------------------------------------------------------------------- /tests/test_base_pool_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from asyncio import CancelledError 3 | from contextlib import ExitStack 4 | from typing import Optional 5 | from unittest.mock import patch, AsyncMock 6 | 7 | import pytest 8 | from async_timeout import timeout as timeout_context 9 | 10 | from hasql.base import BasePoolManager 11 | from tests.mocks import TestPoolManager 12 | 13 | 14 | @pytest.fixture 15 | def dsn(): 16 | return "postgresql://test:test@master,replica1,replica2/test" 17 | 18 | 19 | @pytest.fixture 20 | async def pool_manager(dsn): 21 | pool_manager = TestPoolManager(dsn, refresh_timeout=0.2, refresh_delay=0.1) 22 | try: 23 | yield pool_manager 24 | finally: 25 | await pool_manager.close() 26 | 27 | 28 | def pool_is_master(pool_manager: BasePoolManager, pool): 29 | assert pool_manager.pool_is_master(pool) 30 | assert not pool_manager.pool_is_replica(pool) 31 | 32 | 33 | def pool_is_replica(pool_manager: BasePoolManager, pool): 34 | assert pool_manager.pool_is_replica(pool) 35 | assert not pool_manager.pool_is_master(pool) 36 | 37 | 38 | async def test_wait_next_pool_check(pool_manager: BasePoolManager): 39 | await pool_manager.ready() 40 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 41 | master_pool.shutdown() 42 | assert pool_manager.master_pool_count == 1 43 | await pool_manager.wait_next_pool_check() 44 | assert pool_manager.master_pool_count == 0 45 | 46 | 47 | async def test_ready_all_hosts(pool_manager: BasePoolManager): 48 | await pool_manager.ready() 49 | assert len(pool_manager.dsn) == pool_manager.available_pool_count 50 | 51 | 52 | async def test_ready_min_count_hosts(pool_manager: BasePoolManager): 53 | await pool_manager.ready() 54 | replica_pools = await pool_manager.get_replica_pools() 55 | for replica_pool in replica_pools: 56 | replica_pool.shutdown() 57 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 58 | master_pool.shutdown() 59 | await pool_manager.wait_next_pool_check() 60 | assert pool_manager.master_pool_count == 0 61 | assert pool_manager.replica_pool_count == 0 62 | master_pool.startup() 63 | master_pool.set_master(True) 64 | await pool_manager.ready(masters_count=1, replicas_count=0) 65 | assert pool_manager.master_pool_count == 1 66 | assert pool_manager.replica_pool_count == 0 67 | 68 | 69 | @pytest.mark.parametrize( 70 | ["masters_count", "replicas_count"], 71 | [ 72 | [-1, 5], 73 | [2, -10], 74 | [1, None], 75 | [None, 2], 76 | ], 77 | ) 78 | async def test_ready_with_invalid_arguments( 79 | pool_manager: BasePoolManager, 80 | masters_count: Optional[int], 81 | replicas_count: Optional[int], 82 | ): 83 | with pytest.raises(ValueError): 84 | await pool_manager.ready(masters_count, replicas_count) 85 | 86 | 87 | async def test_wait_db_restart(pool_manager: BasePoolManager): 88 | await pool_manager.ready() 89 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 90 | assert pool_manager.pool_is_master(master_pool) 91 | master_pool.shutdown() 92 | await pool_manager.wait_next_pool_check() 93 | assert pool_manager.master_pool_count == 0 94 | master_pool.startup() 95 | await pool_manager.wait_next_pool_check() 96 | assert pool_manager.master_pool_count == 0 97 | assert pool_manager.pool_is_replica(master_pool) 98 | 99 | 100 | async def test_master_shutdown(pool_manager: BasePoolManager): 101 | await pool_manager.ready() 102 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 103 | assert pool_manager.pool_is_master(master_pool) 104 | master_pool.shutdown() 105 | await pool_manager.wait_next_pool_check() 106 | assert pool_manager.master_pool_count == 0 107 | 108 | 109 | async def test_replica_shutdown(pool_manager: BasePoolManager): 110 | await pool_manager.ready() 111 | replica_pool = await pool_manager.balancer.get_pool(read_only=True) 112 | assert pool_manager.pool_is_replica(replica_pool) 113 | assert pool_manager.replica_pool_count == 2 114 | replica_pool.shutdown() 115 | await pool_manager.wait_next_pool_check() 116 | assert pool_manager.replica_pool_count == 1 117 | 118 | 119 | async def test_change_master(pool_manager: BasePoolManager): 120 | await pool_manager.ready() 121 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 122 | replica_pool = await pool_manager.balancer.get_pool(read_only=True) 123 | pool_is_master(pool_manager, master_pool) 124 | pool_is_replica(pool_manager, replica_pool) 125 | master_pool.set_master(False) 126 | replica_pool.set_master(True) 127 | await pool_manager.wait_next_pool_check() 128 | pool_is_master(pool_manager, replica_pool) 129 | pool_is_replica(pool_manager, master_pool) 130 | 131 | 132 | async def test_define_roles(pool_manager: BasePoolManager): 133 | await pool_manager.ready() 134 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 135 | replica_pool = await pool_manager.balancer.get_pool(read_only=True) 136 | pool_is_master(pool_manager, master_pool) 137 | pool_is_replica(pool_manager, replica_pool) 138 | 139 | 140 | async def test_acquire_master_and_release(pool_manager: BasePoolManager): 141 | await pool_manager.ready() 142 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 143 | init_freesize = pool_manager.get_pool_freesize(master_pool) 144 | connection = await pool_manager.acquire_master() 145 | assert pool_manager.get_pool_freesize(master_pool) + 1 == init_freesize 146 | assert connection in master_pool.used 147 | await pool_manager.release(connection) 148 | assert connection not in master_pool.used 149 | assert pool_manager.get_pool_freesize(master_pool) == init_freesize 150 | 151 | 152 | async def test_acquire_with_context(pool_manager: BasePoolManager): 153 | await pool_manager.ready() 154 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 155 | init_freesize = pool_manager.get_pool_freesize(master_pool) 156 | async with pool_manager.acquire_master() as connection: 157 | assert pool_manager.get_pool_freesize(master_pool) + 1 == init_freesize 158 | assert connection in master_pool.used 159 | assert connection not in master_pool.used 160 | assert pool_manager.get_pool_freesize(master_pool) == init_freesize 161 | 162 | 163 | async def test_acquire_replica_with_fallback_master_is_true( 164 | pool_manager: BasePoolManager, 165 | ): 166 | await pool_manager.ready() 167 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 168 | replica_pools = await pool_manager.get_replica_pools() 169 | for replica_pool in replica_pools: 170 | assert pool_manager.pool_is_replica(replica_pool) 171 | replica_pool.shutdown() 172 | await pool_manager.wait_next_pool_check() 173 | assert pool_manager.replica_pool_count == 0 174 | async with timeout_context(1): 175 | async with pool_manager.acquire_replica( 176 | fallback_master=True, 177 | ) as connection: 178 | assert connection in master_pool.used 179 | 180 | 181 | async def test_acquire_replica_with_fallback_master_is_false( 182 | pool_manager: BasePoolManager, 183 | ): 184 | await pool_manager.ready() 185 | replica_pools = await pool_manager.get_replica_pools() 186 | for replica_pool in replica_pools: 187 | assert pool_manager.pool_is_replica(replica_pool) 188 | replica_pool.shutdown() 189 | await pool_manager.wait_next_pool_check() 190 | assert pool_manager.replica_pool_count == 0 191 | with pytest.raises(asyncio.TimeoutError): 192 | async with timeout_context(1): 193 | await pool_manager.acquire_replica(fallback_master=False) 194 | 195 | 196 | async def test_close(pool_manager: BasePoolManager): 197 | await pool_manager.ready() 198 | assert pool_manager.master_pool_count > 0 199 | assert pool_manager.replica_pool_count > 0 200 | await pool_manager.close() 201 | assert pool_manager.master_pool_count == 0 202 | assert pool_manager.replica_pool_count == 0 203 | for pool in pool_manager: 204 | assert pool is not None 205 | assert all( 206 | pool_manager.is_connection_closed(conn) for conn in pool.connections 207 | ) 208 | assert all(conn.close.call_count == 1 for conn in pool.connections) 209 | 210 | 211 | async def test_terminate(pool_manager: BasePoolManager): 212 | await pool_manager.ready() 213 | assert pool_manager.master_pool_count > 0 214 | assert pool_manager.replica_pool_count > 0 215 | await pool_manager.terminate() 216 | assert pool_manager.master_pool_count == 0 217 | assert pool_manager.replica_pool_count == 0 218 | for pool in pool_manager: 219 | assert pool is not None 220 | assert all( 221 | pool_manager.is_connection_closed(conn) for conn in pool.connections 222 | ) 223 | assert all(conn.terminate.call_count == 1 for conn in pool.connections) 224 | 225 | 226 | async def test_master_behind_firewall(pool_manager: BasePoolManager): 227 | await pool_manager.ready() 228 | assert pool_manager.master_pool_count == 1 229 | master_pool = (await pool_manager.get_master_pools())[0] 230 | master_pool.behind_firewall(True) 231 | await pool_manager.wait_next_pool_check() 232 | assert pool_manager.master_pool_count == 0 233 | master_pool.behind_firewall(False) 234 | await pool_manager.wait_next_pool_check() 235 | assert pool_manager.master_pool_count == 1 236 | 237 | 238 | async def test_replica_behind_firewall(pool_manager: BasePoolManager): 239 | await pool_manager.ready() 240 | replica_pool_count = 2 241 | assert pool_manager.replica_pool_count == replica_pool_count 242 | replica_pools = await pool_manager.get_replica_pools() 243 | for replica_pool in replica_pools: 244 | replica_pool.behind_firewall(True) 245 | await pool_manager.wait_next_pool_check() 246 | assert pool_manager.replica_pool_count == replica_pool_count - 1 247 | replica_pool.behind_firewall(False) 248 | await pool_manager.wait_next_pool_check() 249 | assert pool_manager.replica_pool_count == replica_pool_count 250 | 251 | 252 | async def test_check_pool_canceled_error_while_releasing_connection( 253 | pool_manager: BasePoolManager 254 | ): 255 | await pool_manager.ready() 256 | master_pool = await pool_manager.balancer.get_pool(read_only=False) 257 | 258 | with ExitStack() as stack: 259 | for conn in master_pool.connections: 260 | stack.enter_context( 261 | patch.object( 262 | conn, 'is_master', AsyncMock(side_effect=Exception) 263 | ) 264 | ) 265 | stack.enter_context( 266 | patch.object( 267 | master_pool, 'release', AsyncMock(side_effect=CancelledError) 268 | ) 269 | ) 270 | await asyncio.sleep(1) 271 | for task in pool_manager._refresh_role_tasks: 272 | assert not task.done() 273 | -------------------------------------------------------------------------------- /tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from mock import mock 3 | 4 | from hasql.metrics import HasqlMetrics 5 | from tests.conftest import ( 6 | setup_aiopg, 7 | setup_aiopgsa, 8 | setup_asyncpg, 9 | setup_asyncsqlalchemy, 10 | setup_psycopg3, 11 | ) 12 | 13 | 14 | @pytest.mark.parametrize( 15 | "pool_manager_factory", 16 | [ 17 | (setup_aiopg), 18 | (setup_aiopgsa), 19 | (setup_asyncpg), 20 | (setup_asyncsqlalchemy), 21 | (setup_psycopg3), 22 | ], 23 | ) 24 | async def test_hasql_context_metrics(pool_manager_factory, pg_dsn): 25 | async with pool_manager_factory(pg_dsn) as pool_manager: 26 | async with pool_manager.acquire_master(): 27 | metrics = pool_manager.metrics().hasql 28 | assert metrics == HasqlMetrics( 29 | pool=1, 30 | acquire={pool_manager.host(pool_manager.pools[0]): 1}, 31 | pool_time=mock.ANY, 32 | acquire_time=mock.ANY, 33 | add_connections=mock.ANY, 34 | remove_connections=mock.ANY, 35 | ) 36 | assert list(metrics.add_connections.values()) == [1] 37 | assert metrics.remove_connections == {} 38 | 39 | metrics = pool_manager.metrics().hasql 40 | assert metrics == HasqlMetrics( 41 | pool=1, 42 | acquire={pool_manager.host(pool_manager.pools[0]): 1}, 43 | pool_time=mock.ANY, 44 | acquire_time=mock.ANY, 45 | add_connections=mock.ANY, 46 | remove_connections=mock.ANY, 47 | ) 48 | assert list(metrics.add_connections.values()) == [1] 49 | assert list(metrics.remove_connections.values()) == [1] 50 | 51 | 52 | @pytest.mark.parametrize( 53 | "pool_manager_factory", 54 | [ 55 | (setup_aiopg), 56 | (setup_aiopgsa), 57 | (setup_asyncpg), 58 | (setup_asyncsqlalchemy), 59 | (setup_psycopg3), 60 | ], 61 | ) 62 | async def test_hasql_metrics(pool_manager_factory, pg_dsn): 63 | async with pool_manager_factory(pg_dsn) as pool_manager: 64 | _conn = await pool_manager.acquire_master() 65 | metrics = pool_manager.metrics().hasql 66 | assert metrics == HasqlMetrics( 67 | pool=1, 68 | acquire={pool_manager.host(pool_manager.pools[0]): 1}, 69 | pool_time=mock.ANY, 70 | acquire_time=mock.ANY, 71 | add_connections=mock.ANY, 72 | remove_connections=mock.ANY, 73 | ) 74 | assert list(metrics.add_connections.values()) == [1] 75 | assert metrics.remove_connections == {} 76 | 77 | await pool_manager.release(connection=_conn) 78 | 79 | metrics = pool_manager.metrics().hasql 80 | assert metrics == HasqlMetrics( 81 | pool=1, 82 | acquire={pool_manager.host(pool_manager.pools[0]): 1}, 83 | pool_time=mock.ANY, 84 | acquire_time=mock.ANY, 85 | add_connections=mock.ANY, 86 | remove_connections=mock.ANY, 87 | ) 88 | assert list(metrics.add_connections.values()) == [1] 89 | assert list(metrics.remove_connections.values()) == [1] 90 | 91 | 92 | @pytest.mark.parametrize( 93 | "pool_manager_factory", 94 | [ 95 | (setup_aiopg), 96 | (setup_aiopgsa), 97 | (setup_asyncpg), 98 | (setup_asyncsqlalchemy), 99 | (setup_psycopg3), 100 | ], 101 | ) 102 | async def test_hasql_close_metrics(pool_manager_factory, pg_dsn): 103 | async with pool_manager_factory(pg_dsn) as pool_manager: 104 | _ = await pool_manager.acquire_master() 105 | await pool_manager.close() 106 | 107 | metrics = pool_manager.metrics().hasql 108 | assert metrics == HasqlMetrics( 109 | pool=1, 110 | acquire={pool_manager.host(pool_manager.pools[0]): 1}, 111 | pool_time=mock.ANY, 112 | acquire_time=mock.ANY, 113 | add_connections=mock.ANY, 114 | remove_connections=mock.ANY, 115 | ) 116 | assert list(metrics.add_connections.values()) == [1] 117 | assert list(metrics.remove_connections.values()) == [1] 118 | -------------------------------------------------------------------------------- /tests/test_psycopg3.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from contextlib import AsyncExitStack 3 | 4 | import mock 5 | import pytest 6 | from psycopg import AsyncConnection 7 | from psycopg_pool import TooManyRequests 8 | 9 | from hasql.metrics import DriverMetrics 10 | from hasql.psycopg3 import PoolManager 11 | 12 | 13 | @pytest.fixture 14 | def pool_size() -> int: 15 | return 10 16 | 17 | 18 | @pytest.fixture 19 | async def pool_manager(pg_dsn, pool_size): 20 | pg_pool = PoolManager( 21 | dsn=pg_dsn, 22 | fallback_master=True, 23 | acquire_timeout=1, 24 | pool_factory_kwargs={"min_size": pool_size, "max_size": pool_size}, 25 | ) 26 | try: 27 | await pg_pool.ready() 28 | yield pg_pool 29 | finally: 30 | await pg_pool.close() 31 | 32 | 33 | async def test_acquire_with_context(pool_manager): 34 | async with pool_manager.acquire_master() as conn: 35 | assert isinstance(conn, AsyncConnection) 36 | async with conn.cursor() as cursor: 37 | await cursor.execute("SELECT 1") 38 | assert await cursor.fetchall() == [(1,)] 39 | 40 | 41 | async def test_acquire_without_context(pool_manager): 42 | conn = await pool_manager.acquire_master() 43 | assert isinstance(conn, AsyncConnection) 44 | async with conn.cursor() as cursor: 45 | await cursor.execute("SELECT 1") 46 | assert await cursor.fetchall() == [(1,)] 47 | 48 | 49 | async def test_close(pool_manager): 50 | aiopg_pool = await pool_manager.balancer.get_pool(read_only=False) 51 | await pool_manager.close() 52 | assert aiopg_pool.closed 53 | 54 | 55 | async def test_release(pool_manager): 56 | aiopg_pool = await pool_manager.balancer.get_pool(read_only=False) 57 | assert pool_manager.get_pool_freesize(aiopg_pool) == 10 58 | conn = await pool_manager.acquire_master() 59 | assert pool_manager.get_pool_freesize(aiopg_pool) == 9 60 | await pool_manager.release(conn) 61 | assert pool_manager.get_pool_freesize(aiopg_pool) == 10 62 | 63 | 64 | async def test_is_connection_closed(pool_manager): 65 | async with pool_manager.acquire_master() as conn: 66 | assert not pool_manager.is_connection_closed(conn) 67 | await conn.close() 68 | assert pool_manager.is_connection_closed(conn) 69 | 70 | 71 | async def test_acquire_with_timeout_context(pool_manager, pool_size): 72 | conns = [] 73 | for _ in range(pool_size): 74 | conns.append(await pool_manager.acquire_master()) 75 | 76 | with pytest.raises(TooManyRequests): 77 | await pool_manager.acquire_master() 78 | 79 | for conn in conns: 80 | await pool_manager.release(conn) 81 | conns.clear() 82 | 83 | for pool in pool_manager.pools: 84 | assert pool_manager.get_pool_freesize(pool) == pool_size 85 | 86 | for _ in range(pool_size): 87 | async with pool_manager.acquire_master() as conn: 88 | pass 89 | 90 | 91 | async def test_acquire_with_timeout_context2(pool_manager, pool_size): 92 | async with AsyncExitStack() as stack: 93 | for _ in range(pool_size): 94 | await stack.enter_async_context(pool_manager.acquire_master()) 95 | 96 | async def wait_for_smth(): 97 | with pytest.raises(TooManyRequests): 98 | async with pool_manager.acquire_master(): 99 | pass 100 | 101 | await asyncio.gather(*[wait_for_smth() for _ in range(pool_size)]) 102 | 103 | for pool in pool_manager.pools: 104 | assert pool_manager.get_pool_freesize(pool) == pool_size 105 | 106 | async with AsyncExitStack() as stack: 107 | await stack.enter_async_context(pool_manager.acquire_master()) 108 | 109 | 110 | async def test_metrics(pool_manager): 111 | async with pool_manager.acquire_master(): 112 | assert pool_manager.metrics().drivers == [ 113 | DriverMetrics(max=11, min=11, idle=9, used=11, host=mock.ANY) 114 | ] 115 | -------------------------------------------------------------------------------- /tests/test_trouble.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from unittest import mock 3 | 4 | import pytest 5 | from async_timeout import timeout 6 | 7 | from tests.conftest import ( 8 | setup_aiopg, 9 | setup_aiopgsa, 10 | setup_asyncpg, 11 | setup_asyncsqlalchemy, 12 | setup_psycopg3, 13 | ) 14 | 15 | 16 | @pytest.mark.parametrize( 17 | "pool_manager_factory", 18 | [ 19 | (setup_aiopg), 20 | (setup_aiopgsa), 21 | (setup_asyncpg), 22 | (setup_asyncsqlalchemy), 23 | (setup_psycopg3), 24 | ], 25 | ) 26 | async def test_unavailable_db(pool_manager_factory, localhost, db_server_port): 27 | async with timeout(1): 28 | pg_dsn = f"postgres://pg:pg@{localhost}:{db_server_port}/pg" 29 | async with pool_manager_factory(pg_dsn): 30 | pass 31 | 32 | 33 | @pytest.mark.parametrize( 34 | "pool_manager_factory,name", 35 | [ 36 | (setup_aiopg, "aiopg"), 37 | (setup_aiopgsa, "aiopg_sa"), 38 | (setup_asyncpg, "asyncpg"), 39 | (setup_asyncsqlalchemy, "asyncsqlalchemy"), 40 | (setup_psycopg3, "psycopg3"), 41 | ], 42 | ) 43 | async def test_catch_cancelled_error(pool_manager_factory, pg_dsn, name): 44 | async with pool_manager_factory(pg_dsn) as pool_manager: 45 | await pool_manager.ready() 46 | assert pool_manager.available_pool_count > 0 47 | with mock.patch( 48 | f"hasql.{name}.PoolManager._is_master", 49 | side_effect=asyncio.CancelledError(), 50 | ): 51 | await pool_manager.wait_next_pool_check() 52 | assert pool_manager.available_pool_count == 0 53 | await pool_manager.wait_next_pool_check() 54 | assert pool_manager.available_pool_count > 0 55 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | from typing import Iterable, Optional, Union 3 | 4 | import pytest 5 | 6 | from hasql.utils import Dsn, host_is_ipv6_address, split_dsn 7 | 8 | 9 | FORMAT_DSN_TEST_CASES = [ 10 | [ 11 | "localhost", 12 | 5432, 13 | None, 14 | None, 15 | None, 16 | "postgresql://localhost:5432", 17 | ], 18 | [ 19 | "localhost", 20 | 5432, 21 | "user", 22 | None, 23 | None, 24 | "postgresql://user@localhost:5432", 25 | ], 26 | [ 27 | "localhost", 28 | 5432, 29 | "user", 30 | "pwd", 31 | None, 32 | "postgresql://user:pwd@localhost:5432", 33 | ], 34 | [ 35 | "localhost", 36 | 5432, 37 | None, 38 | None, 39 | "testdb", 40 | "postgresql://localhost:5432/testdb", 41 | ], 42 | [ 43 | "localhost", 44 | "5432", 45 | "user", 46 | None, 47 | "testdb", 48 | "postgresql://user@localhost:5432/testdb", 49 | ], 50 | [ 51 | "localhost", 52 | 5432, 53 | "user", 54 | "pwd", 55 | "testdb", 56 | "postgresql://user:pwd@localhost:5432/testdb", 57 | ], 58 | ] 59 | 60 | 61 | @pytest.mark.parametrize( 62 | ["host", "port", "user", "password", "dbname", "expected_result"], 63 | FORMAT_DSN_TEST_CASES, 64 | ) 65 | def test_format_dsn( 66 | host: str, 67 | port: Union[str, int], 68 | user: Optional[str], 69 | password: Optional[str], 70 | dbname: Optional[str], 71 | expected_result: str, 72 | ): 73 | result_dsn = Dsn( 74 | netloc=f"{host}:{port}", 75 | user=user, 76 | password=password, 77 | dbname=dbname, 78 | ) 79 | assert str(result_dsn) == expected_result 80 | 81 | 82 | def build_url(*, host, user="", password="", dbname="test"): 83 | with StringIO() as fp: 84 | fp.write("postgresql://") 85 | 86 | if user: 87 | fp.write(user) 88 | if password: 89 | fp.write(":") 90 | fp.write(password) 91 | if user or password: 92 | fp.write("@") 93 | fp.write(host) 94 | if dbname: 95 | fp.write("/") 96 | fp.write(dbname) 97 | 98 | return fp.getvalue() 99 | 100 | 101 | def make_examples(): 102 | cases = [ 103 | dict(), 104 | dict(user="test"), 105 | dict(password="secret"), 106 | dict(user="test", password="secret"), 107 | ] 108 | 109 | hosts_cases = [ 110 | ["host1,host2", ["host1:5432", "host2:5432"]], 111 | ["host1:6432,host2", ["host1:6432", "host2:5432"]], 112 | ["host1,host2:6432", ["host1:6432", "host2:6432"]], 113 | ["host1,host2,host3", ["host1:5432", "host2:5432", "host3:5432"]], 114 | ["host1:6432,host2,host3", ["host1:6432", "host2:5432", "host3:5432"]], 115 | ["host1,host2:6432,host3", ["host1:5432", "host2:6432", "host3:5432"]], 116 | ["host1,host2,host3:6432", ["host1:6432", "host2:6432", "host3:6432"]], 117 | ] 118 | 119 | for case in cases: 120 | for (hosts, expected) in hosts_cases: 121 | yield [ 122 | build_url(host=hosts, **case), 123 | [build_url(host=host, **case) for host in expected], 124 | ] 125 | 126 | 127 | MULTI_DSN_PORT_CASES = list(make_examples()) 128 | 129 | 130 | @pytest.mark.parametrize( 131 | ["dsn", "expected_dsns"], 132 | MULTI_DSN_PORT_CASES, 133 | ids=[dsn for dsn, _ in MULTI_DSN_PORT_CASES], 134 | ) 135 | def test_multi_dsn_port(dsn: str, expected_dsns: Iterable[str]): 136 | for host_dsn, expected in zip(split_dsn(Dsn.parse(dsn)), expected_dsns): 137 | assert str(host_dsn) == expected 138 | 139 | 140 | def test_replace_dsn_params(): 141 | dsn = Dsn( 142 | netloc="localhost:5432", 143 | user="user", 144 | password="password", 145 | dbname="testdb", 146 | ) 147 | replaced_dsn = dsn.with_(password="***") 148 | assert str(replaced_dsn) == "postgresql://user:***@localhost:5432/testdb" 149 | 150 | 151 | def test_split_single_host_dsn(): 152 | source_dsn = "postgresql://user:pwd@localhost:5432/testdb" 153 | result_dsn = split_dsn(source_dsn) 154 | assert len(result_dsn) == 1 155 | assert str(result_dsn[0]) == source_dsn 156 | 157 | 158 | def test_split_single_host_dsn_without_port(): 159 | source_dsn = "postgresql://user:pwd@localhost/testdb" 160 | result_dsn = split_dsn(source_dsn, default_port=1) 161 | assert len(result_dsn) == 1 162 | assert str(result_dsn[0]) == "postgresql://user:pwd@localhost:1/testdb" 163 | 164 | 165 | def test_split_multi_host_dsn(): 166 | hosts = ",".join(["master:5432", "replica:5432", "replica:6432"]) 167 | source_dsn = f"postgresql://user:pwd@{hosts}/testdb" 168 | result_dsn = split_dsn(source_dsn) 169 | assert len(result_dsn) == 3 170 | master_dsn, fst_replica_dsn, snd_replica_dsn = result_dsn 171 | assert str(master_dsn) == "postgresql://user:pwd@master:5432/testdb" 172 | assert str(fst_replica_dsn) == "postgresql://user:pwd@replica:5432/testdb" 173 | assert str(snd_replica_dsn) == "postgresql://user:pwd@replica:6432/testdb" 174 | 175 | 176 | def test_split_dsn_skip_same_addreses(): 177 | source_dsn = "postgresql://user:pwd@localhost:5432,localhost:5432/testdb" 178 | result_dsn = split_dsn(source_dsn) 179 | assert len(result_dsn) == 1 180 | assert str(result_dsn[0]) == "postgresql://user:pwd@localhost:5432/testdb" 181 | 182 | 183 | def test_split_dsn_with_default_port(): 184 | source_dsn = "postgresql://user:pwd@master:6432,replica/testdb" 185 | result_dsn = split_dsn(source_dsn, default_port=15432) 186 | assert len(result_dsn) == 2 187 | master_dsn, replica_dsn = result_dsn 188 | assert str(master_dsn) == "postgresql://user:pwd@master:6432/testdb" 189 | assert str(replica_dsn) == "postgresql://user:pwd@replica:15432/testdb" 190 | 191 | 192 | @pytest.mark.parametrize( 193 | ["hosts_count"], 194 | [[1024]], 195 | ) 196 | def test_split_large_dsn(hosts_count: int): 197 | hosts = [f"host-{i}" for i in range(hosts_count)] 198 | large_dsn = "postgresql://user:pwd@" + ",".join(hosts) + "/testdb" 199 | result_dsn = split_dsn(large_dsn, default_port=5432) 200 | for i, dsn in enumerate(result_dsn): 201 | assert str(dsn) == f"postgresql://user:pwd@host-{i}:5432/testdb" 202 | 203 | 204 | def test_split_dsn_with_params(): 205 | dsn = ( 206 | "postgresql://user:password@master:5432,replica:5432/testdb?" 207 | "sslmode=verify-full&sslcert=/root/.postgresql/aa/postgresql.crt&" 208 | "sslkey=/root/.postgresql/aa/postgresql.key" 209 | ) 210 | expected_master_dsn = ( 211 | "postgresql://user:password@master:5432/testdb?" 212 | "sslmode=verify-full&sslcert=/root/.postgresql/aa/postgresql.crt&" 213 | "sslkey=/root/.postgresql/aa/postgresql.key" 214 | ) 215 | expected_replica_dsn = ( 216 | "postgresql://user:password@replica:5432/testdb?" 217 | "sslmode=verify-full&sslcert=/root/.postgresql/aa/postgresql.crt&" 218 | "sslkey=/root/.postgresql/aa/postgresql.key" 219 | ) 220 | master_dsn, replica_dsn = split_dsn(dsn) 221 | assert str(master_dsn) == expected_master_dsn 222 | assert str(replica_dsn) == expected_replica_dsn 223 | 224 | 225 | def test_replace_dsn_part(): 226 | dsn = "postgresql://user:password@localhost:5432/testdb" 227 | expected_dsn = "postgresql://user:***@localhost:5432/testdb" 228 | result_dsn, *_ = split_dsn(dsn) 229 | dsn_with_hidden_password = result_dsn.with_(password="***") 230 | assert str(dsn_with_hidden_password) == expected_dsn 231 | 232 | 233 | @pytest.mark.parametrize( 234 | ["host", "expected_result"], 235 | [ 236 | ["yandex.ru", False], 237 | ["127.0.0.1", False], 238 | ["2001:DB8:3C4D:7777:260:3EFF:FE15:9501", True], 239 | ["2001:dead:beef::1", True], 240 | ], 241 | ) 242 | def test_host_is_ipv6_address(host: str, expected_result: bool): 243 | result = host_is_ipv6_address(host) 244 | assert result == expected_result 245 | 246 | 247 | def test_ipv6_host_in_dsn(): 248 | dsn = ( 249 | "postgresql://" 250 | "user:password@[" 251 | "2001:DB8:3C4D:7777:260:3EFF:FE15:9501" 252 | "]:5432/testdb" 253 | ) 254 | result_dsn, *_ = split_dsn(dsn) 255 | assert str(result_dsn) == dsn 256 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = lint,mypy,py3{8,9,10,11}{,-uvloop} 3 | 4 | [testenv] 5 | passenv = COVERALLS_*, FORCE_COLOR, PG_DSN 6 | allowlist_externals = coveralls 7 | 8 | extras = 9 | develop 10 | 11 | commands= 12 | pytest -vv \ 13 | --cov=hasql --cov-report=term-missing \ 14 | --doctest-modules \ 15 | --aiomisc-test-timeout=30 \ 16 | tests 17 | - coveralls 18 | 19 | [testenv:lint] 20 | allowlist_externals = pylama 21 | deps = 22 | pyflakes==2.4.0 23 | pylama 24 | 25 | commands= 26 | pylama -o pylama.ini hasql tests 27 | 28 | [testenv:checkdoc] 29 | deps = 30 | collective.checkdocs 31 | pygments 32 | 33 | commands = 34 | python setup.py checkdocs 35 | 36 | [testenv:mypy] 37 | allowlist_externals = mypy 38 | basepython = python3.10 39 | usedevelop = true 40 | 41 | deps = 42 | mypy==1.5.1 43 | 44 | commands = 45 | mypy --install-types --non-interactive hasql tests 46 | --------------------------------------------------------------------------------