├── .gitignore ├── Caching.ipynb ├── PITCHME.md ├── Pooling.ipynb ├── README.md ├── breath.gif ├── bulk.jpg ├── but-wait.jpg ├── cache.gif ├── cache.jpg ├── complex_query.png ├── cookie.gif ├── dogpile.jpg ├── gevent_flow_control.png ├── green.png ├── lying.jpg ├── marvin.JPG ├── pool.jpg ├── presentation.md ├── presentation.pdf ├── rich.gif ├── run_away.gif └── thanks.gif /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | -------------------------------------------------------------------------------- /Caching.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Intro to SQLAlchemy and Caching\n", 8 | "The following code was written to use the pgbench database. In this case it was created with the following command: `pgbench -i -s 400 pgbench`. The timings in this file were from a Macbook Pro 2017 with an i5 running at 2.5Ghz and 8GB of RAM." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "from sqlalchemy import MetaData, create_engine\n", 18 | "from sqlalchemy.ext.automap import automap_base\n", 19 | "metadata = MetaData()\n", 20 | "Base = automap_base()\n", 21 | "engine = create_engine('postgresql+psycopg2://jasonmyers@localhost:5432/pgbench')\n", 22 | "Base.prepare(engine, reflect=True)" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "Accounts = Base.classes.pgbench_accounts" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 3, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "from sqlalchemy.orm import Session\n", 41 | "session = Session(engine)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 4, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "from sqlalchemy import func\n", 51 | "query = session.query(Accounts.bid, func.count(1)).group_by(Accounts.bid).limit(5000)" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 5, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "def execute_query():\n", 61 | " query.all()" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 6, 67 | "metadata": {}, 68 | "outputs": [ 69 | { 70 | "name": "stdout", 71 | "output_type": "stream", 72 | "text": [ 73 | "None\n", 74 | "CPU times: user 3.08 ms, sys: 1.54 ms, total: 4.62 ms\n", 75 | "Wall time: 24.9 s\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "%%time\n", 81 | "print(execute_query())" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 7, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "import threading\n", 91 | "def async_creation_runner(cache, somekey, creator, mutex):\n", 92 | " def runner():\n", 93 | " try:\n", 94 | " value = creator()\n", 95 | " cache.set(somekey, value)\n", 96 | " finally:\n", 97 | " mutex.release()\n", 98 | "\n", 99 | " thread = threading.Thread(target=runner)\n", 100 | " thread.start()" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 8, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "from dogpile.cache.util import sha1_mangle_key\n", 110 | "def unicode_sha1_mangle_key(key):\n", 111 | " return sha1_mangle_key(key.encode('ascii', 'ignore'))\n", 112 | "\n", 113 | "\n", 114 | "def mangle_key(key):\n", 115 | " prefix, key = key.split(':', 1)\n", 116 | " base = 'cookie:cache:'\n", 117 | " if prefix:\n", 118 | " base += '{}'.format(prefix)\n", 119 | " else:\n", 120 | " raise ValueError(key)\n", 121 | " value = '{}:{}'.format(base, unicode_sha1_mangle_key(key))\n", 122 | " return value" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 9, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "import sys\n", 132 | "from dogpile.cache import make_region\n", 133 | "regions = {}\n", 134 | "\n", 135 | "regions['default'] = make_region(async_creation_runner=async_creation_runner,\n", 136 | " key_mangler=mangle_key).configure(\n", 137 | " 'dogpile.cache.redis',\n", 138 | " arguments={\n", 139 | " 'host': 'localhost',\n", 140 | " 'port': 6379,\n", 141 | " 'db': 0,\n", 142 | " 'redis_expiration_time': 60*60*2, # 2 hours\n", 143 | " 'distributed_lock': True,\n", 144 | " 'lock_timeout': 120,\n", 145 | " 'lock_sleep': 5\n", 146 | " }\n", 147 | ")\n" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 10, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "def _key_from_query(query, qualifier=None):\n", 157 | " stmt = query.with_labels().statement\n", 158 | " compiled = stmt.compile()\n", 159 | " params = compiled.params\n", 160 | "\n", 161 | " return \" \".join([str(compiled)] +\n", 162 | " [str(params[k]) for k in sorted(params)])" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 11, 168 | "metadata": {}, 169 | "outputs": [], 170 | "source": [ 171 | "from sqlalchemy.orm.query import Query\n", 172 | "from dogpile.cache.api import NO_VALUE\n", 173 | "\n", 174 | "\n", 175 | "class CachingQuery(Query):\n", 176 | "\n", 177 | " def __init__(self, regions, *args, **kw):\n", 178 | " self.cache_regions = regions\n", 179 | " self.saved_to_cache = False\n", 180 | " Query.__init__(self, *args, **kw)\n", 181 | "\n", 182 | " def __iter__(self):\n", 183 | " if hasattr(self, '_cache_region'):\n", 184 | " return self.get_value(\n", 185 | " createfunc=lambda: list(Query.__iter__(self)))\n", 186 | " else:\n", 187 | " return Query.__iter__(self)\n", 188 | " \n", 189 | " def _get_cache_plus_key(self):\n", 190 | " dogpile_region = self.cache_regions[self._cache_region.region]\n", 191 | " if self._cache_region.cache_key:\n", 192 | " key = self._cache_region.cache_key\n", 193 | " else:\n", 194 | " key = _key_from_query(self)\n", 195 | " return dogpile_region, key\n", 196 | " \n", 197 | " def get_value(self, merge=True, createfunc=None,\n", 198 | " expiration_time=None, ignore_expiration=False):\n", 199 | " dogpile_region, cache_key = self._get_cache_plus_key()\n", 200 | "\n", 201 | " assert not ignore_expiration or not createfunc, \\\n", 202 | " \"Can't ignore expiration and also provide createfunc\"\n", 203 | "\n", 204 | " if ignore_expiration or not createfunc:\n", 205 | " cached_value = dogpile_region.get(\n", 206 | " cache_key,\n", 207 | " expiration_time=expiration_time,\n", 208 | " ignore_expiration=ignore_expiration\n", 209 | " )\n", 210 | " else:\n", 211 | " try:\n", 212 | " cached_value = dogpile_region.get_or_create(\n", 213 | " cache_key,\n", 214 | " createfunc,\n", 215 | " expiration_time=expiration_time\n", 216 | " )\n", 217 | " except ConnectionError:\n", 218 | " logger.error('Cannot connect to query caching backend!')\n", 219 | " cached_value = createfunc()\n", 220 | " if cached_value is NO_VALUE:\n", 221 | " raise KeyError(cache_key)\n", 222 | " if merge:\n", 223 | " cached_value = self.merge_result(cached_value, load=False)\n", 224 | " return cached_value\n", 225 | " \n", 226 | " def set_value(self, value):\n", 227 | " dogpile_region, cache_key = self._get_cache_plus_key()\n", 228 | " try:\n", 229 | " dogpile_region.set(cache_key, value)\n", 230 | " self.saved_to_cache = True\n", 231 | " except ConnectionError:\n", 232 | " logger.error('Cannot connect to query caching backend!')" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": 12, 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "from sqlalchemy.orm.interfaces import MapperOption\n", 242 | "\n", 243 | "class FromCache(MapperOption):\n", 244 | " \"\"\"Specifies that a Query should load results from a cache.\"\"\"\n", 245 | "\n", 246 | " propagate_to_loaders = False\n", 247 | "\n", 248 | " def __init__(self, region=\"default\", cache_key=None, cache_prefix=None):\n", 249 | " self.region = region\n", 250 | " self.cache_key = cache_key\n", 251 | " self.cache_prefix = cache_prefix\n", 252 | "\n", 253 | " def process_query(self, query):\n", 254 | " query._cache_region = self" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": 13, 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [ 263 | "def query_callable(regions, query_cls=CachingQuery):\n", 264 | " def query(*arg, **kw):\n", 265 | " return query_cls(regions, *arg, **kw)\n", 266 | " return query" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": 14, 272 | "metadata": {}, 273 | "outputs": [], 274 | "source": [ 275 | "from sqlalchemy.orm import sessionmaker\n", 276 | "\n", 277 | "def init_caching_session(engine=None):\n", 278 | " if not engine:\n", 279 | " return\n", 280 | "\n", 281 | " return sessionmaker(\n", 282 | " bind=engine, autoflush=False, autocommit=False,\n", 283 | " query_cls=query_callable(regions)\n", 284 | " )" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 15, 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "CachingSession = init_caching_session(engine)\n", 294 | "caching_session=CachingSession()" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 16, 300 | "metadata": {}, 301 | "outputs": [], 302 | "source": [ 303 | "query = caching_session.query(Accounts.bid, func.count(1)).group_by(Accounts.bid).limit(5000).options(\n", 304 | " FromCache('default'))" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 17, 310 | "metadata": {}, 311 | "outputs": [ 312 | { 313 | "name": "stdout", 314 | "output_type": "stream", 315 | "text": [ 316 | "None\n", 317 | "CPU times: user 10.4 ms, sys: 7.3 ms, total: 17.7 ms\n", 318 | "Wall time: 24.8 s\n" 319 | ] 320 | } 321 | ], 322 | "source": [ 323 | "%%time\n", 324 | "print(execute_query())" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": 18, 330 | "metadata": {}, 331 | "outputs": [ 332 | { 333 | "name": "stdout", 334 | "output_type": "stream", 335 | "text": [ 336 | "None\n", 337 | "CPU times: user 3.5 ms, sys: 1.62 ms, total: 5.11 ms\n", 338 | "Wall time: 4.32 ms\n" 339 | ] 340 | } 341 | ], 342 | "source": [ 343 | "%%time\n", 344 | "print(execute_query())" 345 | ] 346 | } 347 | ], 348 | "metadata": { 349 | "kernelspec": { 350 | "display_name": "Python 3", 351 | "language": "python", 352 | "name": "python3" 353 | }, 354 | "language_info": { 355 | "codemirror_mode": { 356 | "name": "ipython", 357 | "version": 3 358 | }, 359 | "file_extension": ".py", 360 | "mimetype": "text/x-python", 361 | "name": "python", 362 | "nbconvert_exporter": "python", 363 | "pygments_lexer": "ipython3", 364 | "version": "3.6.4" 365 | } 366 | }, 367 | "nbformat": 4, 368 | "nbformat_minor": 2 369 | } 370 | -------------------------------------------------------------------------------- /PITCHME.md: -------------------------------------------------------------------------------- 1 | # Wicked Production SQLAlchemy 2 | 3 | --- 4 | 5 | ![](cache.jpg) 6 | 7 | --- 8 | 9 | ![](lying.jpg) 10 | 11 | --- 12 | # Lying 13 | --- 14 | 15 | ```sql 16 | SELECT titel, 2011-Jahr AS alt, 'Jahre alt' AS Text 17 | FROM buch 18 | WHERE jahr > 1997 19 | ORDER BY alt DESC, titel 20 | 21 | SELECT B.buchid, B.titel, V.name, V.ort, B.jahr 22 | FROM buch B NATURAL JOIN verlag V 23 | WHERE V.name='Springer' AND B.jahr>=1990 24 | ORDER BY V.ort 25 | 26 | SELECT DISTINCT A.nachname, A.vornamen, A.autorid 27 | FROM autor A NATURAL JOIN buch_aut BA 28 | NATURAL JOIN buch_sw BS NATURAL JOIN schlagwort SW 29 | WHERE SW.schlagwort = 'Datenbank' 30 | ORDER BY A.nachname 31 | 32 | SELECT buchid, titel, jahr 33 | FROM buch 34 | WHERE jahr=(SELECT MIN(jahr) FROM buch) 35 | ``` 36 | --- 37 | 38 | ![](run_away.gif) 39 | 40 | --- 41 | 42 | # “Parallel” Queries... 43 | ### sort of... 44 | ### not really... 45 | # it's lying 46 | 47 | --- 48 | # gevent 49 | 50 | * Fast event loop 51 | * Lightweight execution units 52 | * Monkey patching utility 53 | 54 | --- 55 | 56 | ![Fit](gevent_flow_control.png) 57 | 58 | --- 59 | # Coloring PostgreSQL Green 60 | ```python 61 | def gevent_wait_callback(conn, timeout=None): 62 | """A wait callback useful to allow gevent to work with Psycopg.""" 63 | while True: 64 | state = conn.poll() 65 | if state == extensions.POLL_OK: 66 | break 67 | elif state == extensions.POLL_READ: 68 | wait_read(conn.fileno(), timeout=timeout) 69 | elif state == extensions.POLL_WRITE: 70 | wait_write(conn.fileno(), timeout=timeout) 71 | else: 72 | raise psycopg2.OperationalError( 73 | "Bad result from poll: %r" % state) 74 | ``` 75 | --- 76 | # Coloring PostgreSQL Green 77 | ```python 78 | def make_psycopg_green(): 79 | """Configure Psycopg to be used with gevent in non-blocking way.""" 80 | if not hasattr(extensions, 'set_wait_callback'): 81 | raise ImportError( 82 | "support for coroutines not available in this Psycopg version (%s)" 83 | % psycopg2.__version__) 84 | 85 | extensions.set_wait_callback(gevent_wait_callback) 86 | ``` 87 | --- 88 | # Building a Query Pool (__init__) 89 | ```python 90 | import gevent 91 | from gevent.queue import JoinableQueue, Queue 92 | class QueryPool(object): 93 | def __init__(self, queries, pool_size=5): 94 | self.queries = queries 95 | self.POOL_MAX = pool_size 96 | self.tasks = JoinableQueue() 97 | self.output_queue = Queue() 98 | ``` 99 | 100 | --- 101 | # Building a Query Pool (work) 102 | ```python 103 | def __query(self, query): 104 | conn = engine.connect() 105 | results = conn.execute(query).fetchall() 106 | return results 107 | ``` 108 | --- 109 | 110 | # Building a Query Pool (Executor) 111 | ```python 112 | def executor(self, number): 113 | while not self.tasks.empty(): 114 | query = self.tasks.get() 115 | try: 116 | results = self.__query(query) 117 | self.output_queue.put(results) 118 | except Exception as exc_info: 119 | print exc_info 120 | print 'Query failed :(' 121 | self.tasks.task_done() 122 | ``` 123 | --- 124 | # Building a Query Pool (Overseer) 125 | ```python 126 | def overseer(self): 127 | for query in self.queries: 128 | self.tasks.put(query) 129 | ``` 130 | --- 131 | # Building a Query Pool (runner) 132 | ```python 133 | def run(self): 134 | self.running = [] 135 | gevent.spawn(self.overseer).join() 136 | for i in range(self.POOL_MAX): 137 | runner = gevent.spawn(self.executor, i) 138 | runner.start() 139 | self.running.append(runner) 140 | 141 | self.tasks.join() 142 | for runner in self.running: 143 | runner.kill() 144 | return [x for x in self.output_queue] 145 | ``` 146 | 147 | --- 148 | # Queries 149 | 150 | ```python 151 | query1 = select([pgbench_tellers]) 152 | query2 = select([pgbench_accounts.c.bid, func.count(1)]).group_by(pgbench_accounts.c.bid) 153 | query3 = select([pgbench_branches]) 154 | query4 = select([pgbench_accounts.c.bid]).distinct() 155 | query5 = select([pgbench_accounts]).limit(1000) 156 | query6 = select([pgbench_accounts.c.bid, func.count(1)] 157 | ).group_by(pgbench_accounts.c.bid).limit(5000) 158 | 159 | queries = [query1, query2, query3, query4, query5, query6] 160 | ``` 161 | --- 162 | # Putting it all together 163 | 164 | ```python 165 | make_psycopg_green() 166 | results = QueryPool(queries).run() 167 | ``` 168 | 169 | --- 170 | 171 | # Well... is it worth it... 172 | ### Executing the 6 queries 173 | 174 | * 57s: Serially 175 | * 49.7s: Query pool 2 workers 176 | * 31.4s: Query pool 3 workers 177 | * 30s: Query pool 5 workers 178 | * 27.5s: Query pool with 6 workers 179 | 180 | --- 181 | 182 | ![Fit](complex_query.png) 183 | 184 | --- 185 | 186 | ![](dogpile.jpg) 187 | 188 | --- 189 | # Dogpile (regions) 190 | ```python 191 | regions = {} 192 | 193 | regions['default'] = make_region(async_creation_runner=async_creation_runner, 194 | key_mangler=mangle_key).configure( 195 | 'dogpile.cache.redis', 196 | arguments={ 197 | 'host': redis_host, 198 | 'port': redis_port, 199 | 'db': settings.CACHES['default']['OPTIONS']['DB'], 200 | 'redis_expiration_time': 60*60*2, # 2 hours 201 | 'distributed_lock': True, 202 | 'lock_timeout': 120, 203 | 'lock_sleep': 5 204 | } 205 | ) 206 | ``` 207 | --- 208 | # Dogpile (Dealing with Locking) 209 | ```python 210 | def async_creation_runner(cache, somekey, creator, mutex): 211 | def runner(): 212 | try: 213 | value = creator() 214 | cache.set(somekey, value) 215 | finally: 216 | mutex.release() 217 | 218 | thread = threading.Thread(target=runner) 219 | thread.start() 220 | ``` 221 | --- 222 | # Dogpile (Cache Keys) 223 | 224 | ```python 225 | def unicode_sha1_mangle_key(key): 226 | return sha1_mangle_key(clean_unicode(key)) 227 | 228 | 229 | def mangle_key(key): 230 | prefix, key = key.split(':', 1) 231 | base = 'cookie:cache:' 232 | if prefix: 233 | base += '{}'.format(prefix) 234 | else: 235 | raise ValueError(key) 236 | return '{}:{}'.format(base, unicode_sha1_mangle_key(key)) 237 | ``` 238 | --- 239 | 240 | # CachingQuery (__init__, __iter__) 241 | 242 | ```python 243 | class CachingQuery(Query): 244 | 245 | def __init__(self, regions, *args, **kw): 246 | self.cache_regions = regions 247 | self.saved_to_cache = False 248 | Query.__init__(self, *args, **kw) 249 | 250 | def __iter__(self): 251 | if hasattr(self, '_cache_region'): 252 | return self.get_value( 253 | createfunc=lambda: list(Query.__iter__(self))) 254 | else: 255 | return Query.__iter__(self) 256 | ``` 257 | --- 258 | # CachingQuery (regions) 259 | 260 | ```python 261 | def _get_cache_plus_key(self): 262 | dogpile_region = self.cache_regions[self._cache_region.region] 263 | if self._cache_region.cache_key: 264 | key = self._cache_region.cache_key 265 | else: 266 | key = _key_from_query(self) 267 | return dogpile_region, key 268 | ``` 269 | --- 270 | # CachingQuery (Getter) 271 | 272 | ```python 273 | def get_value(self, merge=True, createfunc=None, 274 | expiration_time=None, ignore_expiration=False): 275 | dogpile_region, cache_key = self._get_cache_plus_key() 276 | 277 | assert not ignore_expiration or not createfunc, \ 278 | "Can't ignore expiration and also provide createfunc" 279 | 280 | if ignore_expiration or not createfunc: 281 | cached_value = dogpile_region.get( 282 | cache_key, 283 | expiration_time=expiration_time, 284 | ignore_expiration=ignore_expiration 285 | ) 286 | ``` 287 | --- 288 | # CachingQuery (Getter - cont) 289 | 290 | ```python 291 | else: 292 | try: 293 | cached_value = dogpile_region.get_or_create( 294 | cache_key, 295 | createfunc, 296 | expiration_time=expiration_time 297 | ) 298 | except ConnectionError: 299 | logger.error('Cannot connect to query caching backend!') 300 | cached_value = createfunc() 301 | if cached_value is NO_VALUE: 302 | raise KeyError(cache_key) 303 | if merge: 304 | cached_value = self.merge_result(cached_value, load=False) 305 | return cached_value 306 | ``` 307 | --- 308 | # CachingQuery (Setter) 309 | 310 | ```python 311 | def set_value(self, value): 312 | dogpile_region, cache_key = self._get_cache_plus_key() 313 | try: 314 | dogpile_region.set(cache_key, value) 315 | self.saved_to_cache = True 316 | except ConnectionError: 317 | logger.error('Cannot connect to query caching backend!') 318 | 319 | ``` 320 | --- 321 | # CachingQuery (Key Generator) 322 | 323 | ```python 324 | def _key_from_query(query, qualifier=None): 325 | stmt = query.with_labels().statement 326 | compiled = stmt.compile() 327 | params = compiled.params 328 | 329 | return " ".join([clean_unicode(compiled)] + 330 | [clean_unicode(params[k]) for k in sorted(params)]) 331 | ``` 332 | --- 333 | # SQLAlchemy Options (FromQuery) 334 | ```python 335 | class FromCache(MapperOption): 336 | """Specifies that a Query should load results from a cache.""" 337 | 338 | propagate_to_loaders = False 339 | 340 | def __init__(self, region="default", cache_key=None, cache_prefix=None): 341 | self.region = region 342 | self.cache_key = cache_key 343 | self.cache_prefix = cache_prefix 344 | 345 | def process_query(self, query): 346 | query._cache_region = self 347 | ``` 348 | --- 349 | # Callable 350 | ```python 351 | def query_callable(regions, query_cls=CachingQuery): 352 | def query(*arg, **kw): 353 | return query_cls(regions, *arg, **kw) 354 | return query 355 | ``` 356 | --- 357 | # Putting it together (Session) 358 | 359 | ```python 360 | def init_caching_session(engine=None): 361 | if not engine: 362 | return 363 | 364 | return sessionmaker( 365 | bind=engine, autoflush=False, autocommit=False, 366 | query_cls=query_callable(regions) 367 | ) 368 | 369 | CachingSession = init_caching_session(engine) 370 | caching_session=CachingSession() 371 | ``` 372 | --- 373 | # Putting it together (Query) 374 | 375 | ```python 376 | query = caching_session.query(Accounts.bid, func.count(1) 377 | ).group_by(Accounts.bid).limit(5000).options( 378 | FromCache('default')) 379 | ``` 380 | --- 381 | # Well... is it worth it... 382 | ### Executing the query 383 | 384 | * 24.9s: Uncached 385 | 386 | * 24.9s: Initial run of caching_query 387 | * 4.32 ms: Second run of caching_query 388 | -------------------------------------------------------------------------------- /Pooling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Intro to SQLAlchemy and Gevent\n", 8 | "The following code was written to use the pgbench database. In this case it was created with the following command: pgbench -i -s 400 pgbench. The timings in this file were from a Retina Macbook Pro 2017 with an i5 running at 2.5Ghz and 8GB of RAM." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "from sqlalchemy import MetaData, create_engine\n", 18 | "metadata = MetaData()\n", 19 | "engine = create_engine('postgresql+psycopg2://jasonmyers@localhost:5432/pgbench')" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 2, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "from sqlalchemy import Table\n", 29 | "pgbench_accounts = Table('pgbench_accounts', metadata, autoload=True, autoload_with=engine)\n", 30 | "pgbench_tellers = Table('pgbench_tellers', metadata, autoload=True, autoload_with=engine)\n", 31 | "pgbench_branches = Table('pgbench_branches', metadata, autoload=True, autoload_with=engine)" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 4, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "from sqlalchemy import select, func" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 5, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "query1 = select([pgbench_tellers])\n", 50 | "query2 = select([pgbench_accounts.c.bid, func.count(1)]).group_by(pgbench_accounts.c.bid)\n", 51 | "query3 = select([pgbench_branches])\n", 52 | "query4 = select([pgbench_accounts.c.bid]).distinct()\n", 53 | "query5 = select([pgbench_accounts]).limit(1000)\n", 54 | "query6 = select([pgbench_accounts.c.bid, func.count(1)]\n", 55 | " ).group_by(pgbench_accounts.c.bid).limit(5000)\n", 56 | "\n", 57 | "queries = [query1, query2, query3, query4, query5, query6]" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 6, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "def execute_six_queries():\n", 67 | " results = {}\n", 68 | " for idx, query in enumerate(queries):\n", 69 | " conn = engine.connect()\n", 70 | " results[idx] = conn.execute(query).fetchall()" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 8, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "CPU times: user 11.4 ms, sys: 3.65 ms, total: 15 ms\n", 83 | "Wall time: 57 s\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "%%time \n", 89 | "execute_six_queries()" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 10, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "import psycopg2\n", 99 | "from psycopg2 import extensions\n", 100 | "\n", 101 | "from gevent.socket import wait_read, wait_write\n", 102 | "\n", 103 | "def make_psycopg_green():\n", 104 | " \"\"\"Configure Psycopg to be used with gevent in non-blocking way.\"\"\"\n", 105 | " if not hasattr(extensions, 'set_wait_callback'):\n", 106 | " raise ImportError(\n", 107 | " \"support for coroutines not available in this Psycopg version (%s)\"\n", 108 | " % psycopg2.__version__)\n", 109 | "\n", 110 | " extensions.set_wait_callback(gevent_wait_callback)\n", 111 | "\n", 112 | "def gevent_wait_callback(conn, timeout=None):\n", 113 | " \"\"\"A wait callback useful to allow gevent to work with Psycopg.\"\"\"\n", 114 | " while True:\n", 115 | " state = conn.poll()\n", 116 | " if state == extensions.POLL_OK:\n", 117 | " break\n", 118 | " elif state == extensions.POLL_READ:\n", 119 | " wait_read(conn.fileno(), timeout=timeout)\n", 120 | " elif state == extensions.POLL_WRITE:\n", 121 | " wait_write(conn.fileno(), timeout=timeout)\n", 122 | " else:\n", 123 | " raise psycopg2.OperationalError(\n", 124 | " \"Bad result from poll: %r\" % state)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 12, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "import gevent\n", 134 | "from gevent.queue import JoinableQueue, Queue\n", 135 | "\n", 136 | "class QueryPool(object):\n", 137 | " def __init__(self, queries, pool_size=5):\n", 138 | " self.queries = queries\n", 139 | " self.POOL_MAX = pool_size\n", 140 | " self.tasks = JoinableQueue()\n", 141 | " self.output_queue = Queue()\n", 142 | "\n", 143 | " def __query(self, query):\n", 144 | " conn = engine.connect()\n", 145 | " results = conn.execute(query).fetchall()\n", 146 | " return results\n", 147 | "\n", 148 | " def executor(self, number):\n", 149 | " while not self.tasks.empty():\n", 150 | " query = self.tasks.get()\n", 151 | " try:\n", 152 | " results = self.__query(query)\n", 153 | " self.output_queue.put(results)\n", 154 | " except Exception as exc_info:\n", 155 | " print(exc_info)\n", 156 | " self.tasks.task_done()\n", 157 | "\n", 158 | " def overseer(self):\n", 159 | " for query in self.queries:\n", 160 | " self.tasks.put(query)\n", 161 | "\n", 162 | " def run(self):\n", 163 | " self.running = []\n", 164 | " gevent.spawn(self.overseer).join()\n", 165 | " for i in range(self.POOL_MAX):\n", 166 | " runner = gevent.spawn(self.executor, i)\n", 167 | " runner.start()\n", 168 | " self.running.append(runner)\n", 169 | "\n", 170 | " self.tasks.join()\n", 171 | " for runner in self.running:\n", 172 | " runner.kill()" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 13, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "make_psycopg_green()" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 14, 187 | "metadata": {}, 188 | "outputs": [ 189 | { 190 | "name": "stdout", 191 | "output_type": "stream", 192 | "text": [ 193 | "CPU times: user 15.9 ms, sys: 5.24 ms, total: 21.1 ms\n", 194 | "Wall time: 30 s\n" 195 | ] 196 | } 197 | ], 198 | "source": [ 199 | "%%time\n", 200 | "QueryPool(queries).run()" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 15, 206 | "metadata": {}, 207 | "outputs": [ 208 | { 209 | "name": "stdout", 210 | "output_type": "stream", 211 | "text": [ 212 | "CPU times: user 15.2 ms, sys: 4.79 ms, total: 20 ms\n", 213 | "Wall time: 31.4 s\n" 214 | ] 215 | } 216 | ], 217 | "source": [ 218 | "%%time\n", 219 | "QueryPool(queries, 3).run()" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 16, 225 | "metadata": {}, 226 | "outputs": [ 227 | { 228 | "name": "stdout", 229 | "output_type": "stream", 230 | "text": [ 231 | "CPU times: user 13.3 ms, sys: 3.76 ms, total: 17 ms\n", 232 | "Wall time: 49.7 s\n" 233 | ] 234 | } 235 | ], 236 | "source": [ 237 | "%%time\n", 238 | "QueryPool(queries, 2).run()" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": 17, 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "CPU times: user 14.7 ms, sys: 4.75 ms, total: 19.5 ms\n", 251 | "Wall time: 27.5 s\n" 252 | ] 253 | } 254 | ], 255 | "source": [ 256 | "%%time\n", 257 | "QueryPool(queries, 6).run()" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [] 266 | } 267 | ], 268 | "metadata": { 269 | "kernelspec": { 270 | "display_name": "Python 3", 271 | "language": "python", 272 | "name": "python3" 273 | }, 274 | "language_info": { 275 | "codemirror_mode": { 276 | "name": "ipython", 277 | "version": 3 278 | }, 279 | "file_extension": ".py", 280 | "mimetype": "text/x-python", 281 | "name": "python", 282 | "nbconvert_exporter": "python", 283 | "pygments_lexer": "ipython3", 284 | "version": "3.6.4" 285 | } 286 | }, 287 | "nbformat": 4, 288 | "nbformat_minor": 2 289 | } 290 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # wicked-sqlalchemy 2 | -------------------------------------------------------------------------------- /breath.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/breath.gif -------------------------------------------------------------------------------- /bulk.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/bulk.jpg -------------------------------------------------------------------------------- /but-wait.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/but-wait.jpg -------------------------------------------------------------------------------- /cache.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/cache.gif -------------------------------------------------------------------------------- /cache.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/cache.jpg -------------------------------------------------------------------------------- /complex_query.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/complex_query.png -------------------------------------------------------------------------------- /cookie.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/cookie.gif -------------------------------------------------------------------------------- /dogpile.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/dogpile.jpg -------------------------------------------------------------------------------- /gevent_flow_control.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/gevent_flow_control.png -------------------------------------------------------------------------------- /green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/green.png -------------------------------------------------------------------------------- /lying.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/lying.jpg -------------------------------------------------------------------------------- /marvin.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/marvin.JPG -------------------------------------------------------------------------------- /pool.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/pool.jpg -------------------------------------------------------------------------------- /presentation.md: -------------------------------------------------------------------------------- 1 | # Wicked Production SQLAlchemy 2 | 3 | --- 4 | 5 | # Lying 6 | 7 | --- 8 | 9 | # Caching 10 | 11 | --- 12 | ![](lying.jpg) 13 | 14 | --- 15 | 16 | 17 | ``` 18 | SELECT titel, 2011-Jahr AS alt, 'Jahre alt' AS Text 19 | FROM buch 20 | WHERE jahr > 1997 21 | ORDER BY alt DESC, titel 22 | 23 | SELECT B.buchid, B.titel, V.name, V.ort, B.jahr 24 | FROM buch B NATURAL JOIN verlag V 25 | WHERE V.name='Springer' AND B.jahr>=1990 26 | ORDER BY V.ort 27 | 28 | SELECT DISTINCT A.nachname, A.vornamen, A.autorid 29 | FROM autor A NATURAL JOIN buch_aut BA 30 | NATURAL JOIN buch_sw BS NATURAL JOIN schlagwort SW 31 | WHERE SW.schlagwort = 'Datenbank' 32 | ORDER BY A.nachname 33 | 34 | SELECT buchid, titel, jahr 35 | FROM buch 36 | WHERE jahr=(SELECT MIN(jahr) FROM buch) 37 | ``` 38 | --- 39 | 40 | ![fit](run_away.gif) 41 | 42 | --- 43 | 44 | ![fit](marvin.JPG) 45 | 46 | --- 47 | 48 | # “Parallel” Queries... 49 | ## sort of... 50 | ### not really... 51 | 52 | --- 53 | # gevent 54 | 55 | * Fast event loop 56 | * Lightweight execution units 57 | * Monkey patching utility 58 | 59 | --- 60 | 61 | ![Fit](gevent_flow_control.png) 62 | 63 | --- 64 | 65 | ![fit](green.png) 66 | 67 | --- 68 | 69 | # Coloring PostgreSQL Green 70 | ``` 71 | def gevent_wait_callback(conn, timeout=None): 72 | """A wait callback useful to allow gevent to work with Psycopg.""" 73 | while True: 74 | state = conn.poll() 75 | if state == extensions.POLL_OK: 76 | break 77 | elif state == extensions.POLL_READ: 78 | wait_read(conn.fileno(), timeout=timeout) 79 | elif state == extensions.POLL_WRITE: 80 | wait_write(conn.fileno(), timeout=timeout) 81 | else: 82 | raise psycopg2.OperationalError( 83 | "Bad result from poll: %r" % state) 84 | ``` 85 | --- 86 | 87 | # Coloring PostgreSQL Green 88 | ``` 89 | def make_psycopg_green(): 90 | """Configure Psycopg to be used with gevent in non-blocking way.""" 91 | if not hasattr(extensions, 'set_wait_callback'): 92 | raise ImportError( 93 | "support for coroutines not available in this Psycopg version (%s)" 94 | % psycopg2.__version__) 95 | 96 | extensions.set_wait_callback(gevent_wait_callback) 97 | ``` 98 | 99 | --- 100 | 101 | ![fit](pool.jpg) 102 | 103 | --- 104 | # Building a Query Pool (\_\_init\_\_) 105 | ``` 106 | import gevent 107 | from gevent.queue import JoinableQueue, Queue 108 | 109 | class QueryPool(object): 110 | def __init__(self, queries, pool_size=5): 111 | self.queries = queries 112 | self.POOL_MAX = pool_size 113 | self.tasks = JoinableQueue() 114 | self.output_queue = Queue() 115 | ``` 116 | 117 | --- 118 | # Building a Query Pool (work) 119 | ``` 120 | def __query(self, query): 121 | conn = engine.connect() 122 | results = conn.execute(query).fetchall() 123 | return results 124 | ``` 125 | --- 126 | 127 | # Building a Query Pool (Executor) 128 | ``` 129 | def executor(self, number): 130 | while not self.tasks.empty(): 131 | query = self.tasks.get() 132 | try: 133 | results = self.__query(query) 134 | self.output_queue.put(results) 135 | except Exception as exc_info: 136 | print exc_info 137 | print 'Query failed :(' 138 | self.tasks.task_done() 139 | ``` 140 | --- 141 | # Building a Query Pool (Overseer) 142 | ``` 143 | def overseer(self): 144 | for query in self.queries: 145 | self.tasks.put(query) 146 | ``` 147 | --- 148 | # Building a Query Pool (runner) 149 | ``` 150 | def run(self): 151 | self.running = [] 152 | gevent.spawn(self.overseer).join() 153 | for i in range(self.POOL_MAX): 154 | runner = gevent.spawn(self.executor, i) 155 | runner.start() 156 | self.running.append(runner) 157 | 158 | self.tasks.join() 159 | for runner in self.running: 160 | runner.kill() 161 | return [x for x in self.output_queue] 162 | ``` 163 | 164 | --- 165 | # Queries 166 | 167 | ``` 168 | query1 = select([pgbench_tellers]) 169 | query2 = select([pgbench_accounts.c.bid, func.count(1)]).group_by(pgbench_accounts.c.bid) 170 | query3 = select([pgbench_branches]) 171 | query4 = select([pgbench_accounts.c.bid]).distinct() 172 | query5 = select([pgbench_accounts]).limit(1000) 173 | query6 = select([pgbench_accounts.c.bid, func.count(1)] 174 | ).group_by(pgbench_accounts.c.bid).limit(5000) 175 | 176 | queries = [query1, query2, query3, query4, query5, query6] 177 | ``` 178 | --- 179 | # Putting it all together 180 | 181 | ``` 182 | make_psycopg_green() 183 | results = QueryPool(queries).run() 184 | ``` 185 | 186 | --- 187 | 188 | # Well... is it worth it... 189 | ### Executing the 6 queries 190 | 191 | * 57s: Serially 192 | * 49.7s: Query pool 2 workers 193 | * 31.4s: Query pool 3 workers 194 | * 30s: Query pool 5 workers 195 | * 27.5s: Query pool with 6 workers 196 | 197 | --- 198 | ![fit](rich.gif) 199 | 200 | --- 201 | 202 | ![fit](complex_query.png) 203 | 204 | --- 205 | 206 | ![fit](dogpile.jpg) 207 | 208 | --- 209 | # Dogpile (regions) 210 | ``` 211 | regions = {} 212 | 213 | regions['default'] = make_region(async_creation_runner=async_creation_runner, 214 | key_mangler=mangle_key).configure( 215 | 'dogpile.cache.redis', 216 | arguments={ 217 | 'host': redis_host, 218 | 'port': redis_port, 219 | 'db': 0, 220 | 'redis_expiration_time': 60*60*2, # 2 hours 221 | 'distributed_lock': True, 222 | 'lock_timeout': 120, 223 | 'lock_sleep': 5 224 | } 225 | ) 226 | ``` 227 | --- 228 | # Dogpile (Creating cache objects) 229 | ``` 230 | def async_creation_runner(cache, somekey, creator, mutex): 231 | def runner(): 232 | try: 233 | value = creator() 234 | cache.set(somekey, value) 235 | finally: 236 | mutex.release() 237 | 238 | thread = threading.Thread(target=runner) 239 | thread.start() 240 | ``` 241 | --- 242 | # Dogpile (Building Cache Keys) 243 | 244 | ``` 245 | def unicode_sha1_mangle_key(key): 246 | return sha1_mangle_key(key.encode('ascii', 'ignore')) 247 | 248 | 249 | def mangle_key(key): 250 | prefix, key = key.split(':', 1) 251 | base = 'cookie:cache:' 252 | if prefix: 253 | base += '{}'.format(prefix) 254 | else: 255 | raise ValueError(key) 256 | return '{}:{}'.format(base, unicode_sha1_mangle_key(key)) 257 | ``` 258 | --- 259 | 260 | ![](cache.gif) 261 | 262 | --- 263 | # CachingQuery (\_\_init\_\_, \_\_iter\_\_) 264 | 265 | ``` 266 | class CachingQuery(Query): 267 | 268 | def __init__(self, regions, *args, **kw): 269 | self.cache_regions = regions 270 | self.saved_to_cache = False 271 | Query.__init__(self, *args, **kw) 272 | 273 | def __iter__(self): 274 | if hasattr(self, '_cache_region'): 275 | return self.get_value( 276 | createfunc=lambda: list(Query.__iter__(self))) 277 | else: 278 | return Query.__iter__(self) 279 | ``` 280 | --- 281 | # CachingQuery (regions) 282 | 283 | ``` 284 | def _get_cache_plus_key(self): 285 | dogpile_region = self.cache_regions[self._cache_region.region] 286 | if self._cache_region.cache_key: 287 | key = self._cache_region.cache_key 288 | else: 289 | key = _key_from_query(self) 290 | return dogpile_region, key 291 | ``` 292 | --- 293 | # CachingQuery (Getter) 294 | 295 | ``` 296 | def get_value(self, merge=True, createfunc=None, 297 | expiration_time=None, ignore_expiration=False): 298 | dogpile_region, cache_key = self._get_cache_plus_key() 299 | 300 | assert not ignore_expiration or not createfunc, \ 301 | "Can't ignore expiration and also provide createfunc" 302 | 303 | if ignore_expiration or not createfunc: 304 | cached_value = dogpile_region.get( 305 | cache_key, 306 | expiration_time=expiration_time, 307 | ignore_expiration=ignore_expiration 308 | ) 309 | ``` 310 | --- 311 | # CachingQuery (Getter - cont) 312 | 313 | ``` 314 | else: 315 | try: 316 | cached_value = dogpile_region.get_or_create( 317 | cache_key, 318 | createfunc, 319 | expiration_time=expiration_time 320 | ) 321 | except ConnectionError: 322 | logger.error('Cannot connect to query caching backend!') 323 | cached_value = createfunc() 324 | if cached_value is NO_VALUE: 325 | raise KeyError(cache_key) 326 | if merge: 327 | cached_value = self.merge_result(cached_value, load=False) 328 | return cached_value 329 | ``` 330 | --- 331 | # CachingQuery (Setter) 332 | 333 | ``` 334 | def set_value(self, value): 335 | dogpile_region, cache_key = self._get_cache_plus_key() 336 | try: 337 | dogpile_region.set(cache_key, value) 338 | self.saved_to_cache = True 339 | except ConnectionError: 340 | logger.error('Cannot connect to query caching backend!') 341 | 342 | ``` 343 | --- 344 | # CachingQuery (Key Generator) 345 | 346 | ``` 347 | def _key_from_query(query, qualifier=None): 348 | stmt = query.with_labels().statement 349 | compiled = stmt.compile() 350 | params = compiled.params 351 | 352 | return " ".join([str(compiled)] + 353 | [str(params[k]) for k in sorted(params)]) 354 | ``` 355 | --- 356 | 357 | ![fit](breath.gif) 358 | 359 | --- 360 | # SQLAlchemy Options (FromQuery) 361 | ``` 362 | class FromCache(MapperOption): 363 | """Specifies that a Query should load results from a cache.""" 364 | 365 | propagate_to_loaders = False 366 | 367 | def __init__(self, region="default", cache_key=None, cache_prefix=None): 368 | self.region = region 369 | self.cache_key = cache_key 370 | self.cache_prefix = cache_prefix 371 | 372 | def process_query(self, query): 373 | query._cache_region = self 374 | ``` 375 | --- 376 | # Callable 377 | ``` 378 | def query_callable(regions, query_cls=CachingQuery): 379 | def query(*arg, **kw): 380 | return query_cls(regions, *arg, **kw) 381 | return query 382 | ``` 383 | --- 384 | # Putting it together (Session) 385 | 386 | ``` 387 | def init_caching_session(engine=None): 388 | if not engine: 389 | return 390 | 391 | return sessionmaker( 392 | bind=engine, autoflush=False, autocommit=False, 393 | query_cls=query_callable(regions) 394 | ) 395 | 396 | CachingSession = init_caching_session(engine) 397 | caching_session=CachingSession() 398 | ``` 399 | --- 400 | # Putting it together (Query) 401 | 402 | ``` 403 | query = caching_session.query(Accounts.bid, func.count(1) 404 | ).group_by(Accounts.bid).limit(5000).options( 405 | FromCache('default')) 406 | ``` 407 | --- 408 | # Well... is it worth it... 409 | ### Executing the query 410 | 411 | * 24.9s: Uncached 412 | * 24.9s: Initial run of caching_query 413 | * 4.32 ms: Second run of caching_query 414 | 415 | --- 416 | 417 | ![fit](but-wait.jpg) 418 | 419 | --- 420 | 421 | # Baked queries 422 | 423 | ![inline](cookie.gif) 424 | 425 | --- 426 | 427 | # Bulk Operations 428 | 429 | ![inline](bulk.jpg) 430 | 431 | --- 432 | 433 | ![inline](thanks.gif) 434 | 435 | ``` 436 | http://github.com/jasonamyers/wicked-sqlalchemy 437 | ``` 438 | -------------------------------------------------------------------------------- /presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/presentation.pdf -------------------------------------------------------------------------------- /rich.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/rich.gif -------------------------------------------------------------------------------- /run_away.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/run_away.gif -------------------------------------------------------------------------------- /thanks.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jasonamyers/wicked-sqlalchemy/61d8704691190496ffc9fe00e685744441cdb22e/thanks.gif --------------------------------------------------------------------------------