├── .circleci └── config.yml ├── .codeclimate.yml ├── .github └── workflows │ └── codeql-analysis.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── examples ├── simple_loop.py └── wait_for_tokens.py ├── limitlion ├── __init__.py ├── running_counter.py ├── throttle.lua └── throttle.py ├── pyproject.toml ├── requirements.txt ├── requirements_test.txt ├── setup.cfg ├── setup.py └── tests ├── conftest.py ├── test_running_counter.py └── test_throttle.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | executors: 4 | python: 5 | parameters: 6 | python_version: 7 | type: string 8 | docker: 9 | - image: python:<< parameters.python_version >> 10 | - image: redis:3.2 11 | 12 | jobs: 13 | test: 14 | parameters: 15 | python_version: 16 | type: string 17 | downgrade_redis: 18 | type: boolean 19 | default: false 20 | working_directory: /home/ubuntu/ 21 | executor: 22 | name: python 23 | python_version: << parameters.python_version >> 24 | steps: 25 | - checkout 26 | - run: 27 | name: Prepare environment 28 | command: | 29 | apt-get update 30 | apt-get install -y lua5.1 luarocks 31 | pip install flake8 flake8-docstrings flake8-polyfill pep8 pep8-naming isort 32 | pip install --no-deps -r requirements.txt 33 | pip install -r requirements_test.txt 34 | luarocks install luacheck 35 | - when: 36 | condition: << parameters.downgrade_redis >> 37 | steps: 38 | - run: 39 | name: Downgrade Redis package 40 | command: pip install redis==2.10.6 41 | - run: 42 | name: Linting 43 | command: | 44 | flake8 45 | isort -rc -c . 46 | luacheck --max-cyclomatic-complexity 11 --globals redis ARGV KEYS -r limitlion 47 | - run: 48 | name: Run tests 49 | command: | 50 | PYTHONPATH=. pytest --cov=limitlion --cov-report=xml 51 | 52 | workflows: 53 | workflow: 54 | jobs: 55 | - test: 56 | matrix: 57 | parameters: 58 | python_version: ["3.8", "3.9", "3.10"] 59 | downgrade_redis: [true, false] 60 | 61 | 62 | -------------------------------------------------------------------------------- /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | checks: 3 | argument-count: 4 | config: 5 | threshold: 5 6 | plugins: 7 | SonarPython: 8 | enabled: true 9 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | name: "CodeQL" 7 | 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | # The branches below must be a subset of the branches above 13 | branches: [master] 14 | schedule: 15 | - cron: '0 21 * * 6' 16 | 17 | jobs: 18 | analyze: 19 | name: Analyze 20 | runs-on: ubuntu-latest 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | # Override automatic language detection by changing the below list 26 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 27 | language: ['python'] 28 | # Learn more... 29 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 30 | 31 | steps: 32 | - name: Checkout repository 33 | uses: actions/checkout@v2 34 | with: 35 | # We must fetch at least the immediate parents so that if this is 36 | # a pull request then we can checkout the head. 37 | fetch-depth: 2 38 | 39 | # If this run was triggered by a pull request event, then checkout 40 | # the head of the pull request instead of the merge commit. 41 | - run: git checkout HEAD^2 42 | if: ${{ github.event_name == 'pull_request' }} 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | .idea 107 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Version 1.0.0 4 | 5 | ### Added 6 | 7 | * Running counter 8 | 9 | ## Changed 10 | 11 | * Removed Python 2.7 support -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Close.io 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [![CircleCI](https://circleci.com/gh/closeio/limitlion.svg?style=svg)](https://circleci.com/gh/closeio/limitlion) 3 | # LimitLion 4 | 5 | A token bucket rate limiting throttle using Redis as the backend. Inspired by 6 | Stripe's [Scaling your API with rate limiters](https://stripe.com/blog/rate-limiters) 7 | blog post. Can be used to control processing rates from one to many processes. 8 | Potential implementations include protecting databases from high processing rates, 9 | orchestrating queue consumer processes, or enforcing HTTP request rate limits. 10 | 11 | Install with: `pip install limitlion` 12 | 13 | Following is a simple example of a throttle named `test` that allows `5` requests per second (RPS) with 14 | a burst factor of `2` using a `8` second window and requesting `1` token (default) 15 | for each unit of work. Look in the `examples` directory for more. 16 | 17 | ```py 18 | redis = redis.Redis('localhost', 6379) 19 | throttle_configure(redis) 20 | while True: 21 | allowed, tokens, sleep = throttle('test', 5, 2, 8) 22 | if allowed: 23 | print ('Do work here') 24 | else: 25 | print ('Sleeping {}'.format(sleep)) 26 | time.sleep(sleep) 27 | ``` 28 | 29 | ## Design 30 | The rate limiting logic uses a classic token bucket algorithm but is implemented 31 | entirely as a Lua Redis script. It leverages the Redis [TIME](https://redis.io/commands/time) 32 | command which ensures fair microsecond resolution across all callers independent 33 | of the caller's clock. Note that buckets start and end on whole seconds. 34 | 35 | Redis 3.2+ is required because `replicate_commands()` is used to support using 36 | the `TIME` command in a Lua script. 37 | 38 | ## Configuring 39 | Default values for RPS, burst factor and window size are supplied to the throttle 40 | Lua script. The Lua script creates a `throttle:[throttle name]:knobs` hash with 41 | these values if it does not yet exist in Redis. The script then uses the values 42 | in that `knobs` hash for the token bucket calculations. Each call also sets the 43 | TTL for the `knobs` key to 7 days so it will remain in Redis as long as the 44 | throttle has been active in the last week. 45 | 46 | Since these settings are stored in Redis a separate process can be used to adjust 47 | them on the fly. This could simply be manually issuing the Redis command to 48 | change the RPS or a more sophisicated process that polls Prometheus metrics to 49 | determine the current load on your database and adjust the RPS accordingly. 50 | 51 | # Running Counter 52 | Another small but useful tool to keep track of counts in Redis for specified 53 | time windows. These counts can then be used to make decisions on limiting or 54 | failing processes as well as for diagnostics. Checkout [`running_counter.py 55 | `](limitlion/running_counter.py) for details. -------------------------------------------------------------------------------- /examples/simple_loop.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Throttle example. 4 | 5 | Simple loop using a throttle with 5 RPS, burst of 4 and 2 second window. Run 6 | multiple of these to get an idea how it limits all processes to 5 RPS after 7 | the burst tokens are consumed. 8 | """ 9 | 10 | import datetime 11 | import time 12 | 13 | import redis 14 | 15 | from limitlion import throttle, throttle_configure 16 | 17 | redis = redis.Redis('localhost', 6379) 18 | 19 | throttle_configure(redis) 20 | 21 | i = 0 22 | while True: 23 | allowed, tokens, sleep = throttle('test_simple', 5, 4, 2) 24 | if allowed: 25 | i += 1 26 | print( 27 | '{}-{} Work number {}'.format(datetime.datetime.now(), tokens, i) 28 | ) 29 | else: 30 | print('Sleeping {}'.format(sleep)) 31 | time.sleep(sleep) 32 | i = 0 33 | -------------------------------------------------------------------------------- /examples/wait_for_tokens.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Waiting throttle example. 4 | 5 | Uses the throttle_wait helper that will sleep until work is allowed. 6 | """ 7 | 8 | import datetime 9 | 10 | import redis 11 | 12 | from limitlion import throttle_configure, throttle_wait 13 | 14 | redis = redis.Redis('localhost', 6379) 15 | 16 | throttle_configure(redis) 17 | throttle = throttle_wait('test_wait', rps=5) 18 | 19 | while True: 20 | # May sleep forever if tokens never become available 21 | throttle() 22 | print('{} Doing work'.format(datetime.datetime.now())) 23 | -------------------------------------------------------------------------------- /limitlion/__init__.py: -------------------------------------------------------------------------------- 1 | """LimitLion package.""" 2 | 3 | from .running_counter import RunningCounter 4 | from .throttle import * 5 | 6 | __all__ = [ 7 | 'RunningCounter', 8 | 'throttle', 9 | 'throttle_configure', 10 | 'throttle_delete', 11 | 'throttle_get', 12 | 'throttle_reset', 13 | 'throttle_set', 14 | 'throttle_wait', 15 | 'THROTTLE_BURST_DEFAULT', 16 | 'THROTTLE_WINDOW_DEFAULT', 17 | 'THROTTLE_REQUESTED_TOKENS_DEFAULT', 18 | ] 19 | -------------------------------------------------------------------------------- /limitlion/running_counter.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import time 3 | from collections import namedtuple 4 | from distutils.version import LooseVersion 5 | 6 | import pkg_resources 7 | 8 | REDIS_PY_VERSION = pkg_resources.get_distribution("redis").version 9 | IS_REDIS_PY_2 = LooseVersion(REDIS_PY_VERSION) < LooseVersion("3") 10 | 11 | 12 | BucketCount = namedtuple('BucketCount', ['bucket', 'count']) 13 | 14 | 15 | class RunningCounter: 16 | """ 17 | A running counter keeps counts per interval for a specified number of 18 | buckets 19 | 20 | Buckets are addressed using the first epoch second for that interval 21 | calculated as follows: 22 | 23 | floor(epoch seconds / interval). 24 | 25 | For example, if using 1 hour intervals the bucket id for 2/19/19 01:23:09Z 26 | would be floor(1550539389 / (60 * 60)) = 430705. This bucket id is used to 27 | generate a Redis key with the following format: 28 | [key prefix]:[key]:[bucket id]. 29 | 30 | A group name can be provided to keep track of the list of counters in named 31 | group. 32 | 33 | Summing up all bucket values for the RunningCounter's window gives the total 34 | count. 35 | 36 | """ 37 | 38 | def __init__( 39 | self, 40 | redis, 41 | interval, 42 | num_buckets, 43 | name=None, 44 | name_prefix='rc', 45 | group_name=None, 46 | ): 47 | """ 48 | Inits RunningCounter class. 49 | 50 | Args: 51 | redis: Redis client instance. 52 | interval (int): How many seconds are collected in each bucket. 53 | num_buckets (int): How many buckets to keep. 54 | name (string): Optional; Name of this running counter. 55 | name_prefix (string): Optional; Prepended to name to generate Redis 56 | key. Name xor group_name must be set. 57 | group_name (string): Optional; Keep track of keys if group name is 58 | specified. Name xor group_name must be set. 59 | """ 60 | if (name is None) == (group_name is None): 61 | raise ValueError('Either name xor group must be set in __init__') 62 | self.redis = redis 63 | self.interval = interval 64 | self.num_buckets = num_buckets 65 | self.name = name 66 | self.name_prefix = name_prefix 67 | self.group_name = group_name 68 | 69 | @property 70 | def window(self): 71 | """ 72 | Running counter window. 73 | 74 | Returns: 75 | Integer seconds for window of Running Counter. 76 | """ 77 | return self.interval * self.num_buckets 78 | 79 | def _key(self, name, bucket): 80 | if self.group_name: 81 | return '{}:{}:{}:{}'.format( 82 | self.name_prefix, self.group_name, name, bucket 83 | ) 84 | else: 85 | return '{}:{}:{}'.format(self.name_prefix, name, bucket) 86 | 87 | def _group_key(self): 88 | """ 89 | Redis key with names of all counters from a group. 90 | """ 91 | assert self.group_name is not None 92 | return '{}:{}:{}'.format( 93 | self.name_prefix, self.group_name, 'group_keys' 94 | ) 95 | 96 | def _get_name(self, name): 97 | if self.name: 98 | if name and self.name != name: 99 | raise ValueError( 100 | 'Cannot specify different name when already set in __init__' 101 | ) 102 | return self.name 103 | else: 104 | if name is None: 105 | raise ValueError('Name not specified') 106 | return name 107 | 108 | def _get_buckets(self, recent_buckets=None, now=None): 109 | """ 110 | Get all buckets in the running counter's window, or only the most 111 | recent_buckets. 112 | """ 113 | now = now or time.time() 114 | current_bucket = int(now) // self.interval 115 | if recent_buckets is None: 116 | oldest_bucket = current_bucket - self.num_buckets 117 | else: 118 | if recent_buckets > self.num_buckets: 119 | raise ValueError( 120 | 'recent_buckets must be less or equal to num_buckets ' 121 | 'in __init__' 122 | ) 123 | oldest_bucket = current_bucket - recent_buckets 124 | buckets = range(current_bucket, oldest_bucket, -1) 125 | return buckets 126 | 127 | def buckets_counts(self, name=None, recent_buckets=None, now=None): 128 | """ 129 | Get RunningCounter buckets with counts. Missing buckets are filled 130 | with 0. Most recent buckets are first. 131 | 132 | Args: 133 | name: Optional; Must be provided if not provided to __init__(). 134 | recent_buckets: Optional; Number of most recent buckets to consider. 135 | now: Optional; Specify time to ensure consistency across multiple 136 | calls. 137 | 138 | Returns: 139 | List of BucketCount. 140 | """ 141 | if not now: 142 | now = time.time() 143 | name = self._get_name(name) 144 | 145 | buckets = self._get_buckets(recent_buckets=recent_buckets, now=now) 146 | 147 | results = self.redis.mget( 148 | map(lambda bucket: self._key(name, bucket), buckets) 149 | ) 150 | 151 | counts = [0 if v is None else float(v) for v in results] 152 | 153 | buckets_counts = [ 154 | BucketCount(bv[0], bv[1]) for bv in zip(buckets, counts) 155 | ] 156 | return buckets_counts 157 | 158 | def count(self, name=None, recent_buckets=None, now=None): 159 | """ 160 | Get total count for counter. 161 | 162 | Args: 163 | name: Optional; Must be provided if not provided to __init__(). 164 | recent_buckets: Optional; Number of most recent buckets to consider. 165 | now: Optional; Specify time to ensure consistency across multiple 166 | calls. 167 | 168 | Returns: 169 | Sum of all buckets. 170 | """ 171 | name = self._get_name(name) 172 | return sum( 173 | [ 174 | bv.count 175 | for bv in self.buckets_counts( 176 | name=name, recent_buckets=recent_buckets, now=now 177 | ) 178 | ] 179 | ) 180 | 181 | def inc(self, increment=1, name=None): 182 | """ 183 | Update rate counter. 184 | 185 | Args: 186 | increment: Float of value to add to bucket. 187 | name: Optional; Must be provided if not provided to __init__(). 188 | 189 | """ 190 | 191 | # If more consistent time is needed across calling 192 | # processes, this method could be converted into a 193 | # Lua script to use Redis server time. 194 | now = time.time() 195 | 196 | name = self._get_name(name) 197 | 198 | bucket = int(now) // self.interval 199 | bucket_key = self._key(name, bucket) 200 | expire = self.num_buckets * self.interval + 15 201 | 202 | pipeline = self.redis.pipeline() 203 | pipeline.incrbyfloat(bucket_key, increment) 204 | pipeline.expire(bucket_key, expire) 205 | if self.group_name is not None: 206 | group_key = self._group_key() 207 | if IS_REDIS_PY_2: 208 | pipeline.zadd(group_key, name, now) 209 | else: 210 | pipeline.zadd(group_key, {name: now}) 211 | pipeline.expire(group_key, expire) 212 | # Trim zset to keys used within window so 213 | # it doesn't grow uncapped. 214 | pipeline.zremrangebyscore(group_key, '-inf', now - self.window - 1) 215 | pipeline.execute() 216 | 217 | def group(self): 218 | """ 219 | Get all counter names in a group. 220 | 221 | Returns: 222 | List of counter names 223 | """ 224 | group_key = self._group_key() 225 | pipeline = self.redis.pipeline() 226 | # Trim zset keys so we don't look for values 227 | # that won't exist anyway 228 | pipeline.zremrangebyscore( 229 | group_key, '-inf', time.time() - self.window - 1 230 | ) 231 | pipeline.zrange(group_key, 0, -1) 232 | results = pipeline.execute() 233 | return [v.decode() if isinstance(v, bytes) else v for v in results[1]] 234 | 235 | def group_counts(self, recent_buckets=None): 236 | """ 237 | Get count for each counter in group. 238 | 239 | Args: 240 | recent_buckets: Optional; Number of most recent buckets to consider. 241 | 242 | Returns: 243 | Dictionary of {[couter name], [count]} 244 | """ 245 | values = {} 246 | # Ensure consistent time across all keys in group 247 | now = time.time() 248 | # Could do this in a pipeline but if a group is huge 249 | # it might be better to do them one at a time 250 | for name in self.group(): 251 | values[name] = self.count( 252 | name, recent_buckets=recent_buckets, now=now 253 | ) 254 | 255 | return values 256 | 257 | def group_buckets_counts(self, recent_buckets=None): 258 | """ 259 | Get count for each counter and bucket in group. 260 | 261 | Args: 262 | recent_buckets: Optional; Number of most recent buckets to consider. 263 | 264 | Returns: 265 | Dictionary of {[counter name], [BucketCount]} 266 | """ 267 | values = {} 268 | now = time.time() 269 | for name in self.group(): 270 | values[name] = self.buckets_counts( 271 | name, recent_buckets=recent_buckets, now=now 272 | ) 273 | 274 | return values 275 | 276 | def delete(self, name=None): 277 | """ 278 | Remove a counter. 279 | 280 | Args: 281 | name: Optional; Must be provided if not provided to __init__(). 282 | """ 283 | name = self._get_name(name) 284 | buckets = self._get_buckets(now=time.time()) 285 | counter_keys = [self._key(name, bucket) for bucket in buckets] 286 | 287 | pipeline = self.redis.pipeline() 288 | pipeline.delete(*counter_keys) 289 | if self.group_name: 290 | pipeline.zrem(self._group_key(), name) 291 | pipeline.execute() 292 | 293 | def delete_group(self): 294 | """ 295 | Remove all counters in a group. A group_name must be provided to 296 | __init__() 297 | """ 298 | now = time.time() 299 | all_counters = self.group() 300 | buckets = self._get_buckets(now=now) 301 | counter_keys = [ 302 | self._key(key, bucket) 303 | for key, bucket in itertools.product(all_counters, buckets) 304 | ] 305 | self.redis.delete(self._group_key(), *counter_keys) 306 | -------------------------------------------------------------------------------- /limitlion/throttle.lua: -------------------------------------------------------------------------------- 1 | -- This script implements a per second token bucket rate limiting algorithm. It is 2 | -- based on Stripe's published script. 3 | -- KEYS = {} ARGV = { throttle knob name, default rate (rps), default burst multiplier, 4 | -- default rate limit window in seconds, requested tokens, knobs_ttl } 5 | -- Returns: allowed (1=allowed, 0=not allowed), 6 | -- tokens left, 7 | -- decimal seconds left in this window 8 | 9 | -- Using the time command requires this script to be replicated via commands 10 | redis.replicate_commands() 11 | 12 | local function check_bucket(bucket_key, rate, burst, window, 13 | now, requested_tokens) 14 | -- Checks bucket to see if a request would be allowed. 15 | -- 16 | -- Args: 17 | -- bucket_key: Redis key name of bucket 18 | -- rate: Request rate per second 19 | -- burst: Burst multiplier 20 | -- window: Number of seconds in window 21 | -- now: Current second since epoch 22 | -- requested_tokens: Number of tokens requested 23 | -- 24 | -- Returns: 25 | -- allowed: 1 if this request should allowed, otherwise 0 26 | -- refreshed: Window start time, whole seconds since epoch 27 | -- filled_tokens: How many tokens in bucket 28 | 29 | -- Maximum size of bucket 30 | local capacity = math.ceil(rate * burst * window) 31 | 32 | local last_tokens = tonumber(redis.call("hget", bucket_key, "tokens")) 33 | if last_tokens == nil then 34 | last_tokens = capacity 35 | end 36 | 37 | local last_refreshed = tonumber(redis.call("hget", bucket_key, "refreshed")) 38 | if last_refreshed == nil then 39 | last_refreshed = 0 40 | end 41 | 42 | -- Calculate how many new tokens should be added, can be zero 43 | local age = math.max(0, now-last_refreshed) 44 | -- Whole windows that have elapsed 45 | local elapsed_windows = math.floor(age / window) 46 | local add_tokens = math.ceil(elapsed_windows * rate * window) 47 | 48 | -- Fill bucket with new tokens 49 | local filled_tokens = math.min(capacity, last_tokens + add_tokens) 50 | 51 | -- Determine if this request is going to be allowed 52 | local allowed 53 | if filled_tokens >= requested_tokens then 54 | allowed = 1 55 | else 56 | allowed = 0 57 | end 58 | 59 | local refreshed 60 | if add_tokens > 0 and last_refreshed == 0 then 61 | -- Adding tokens to a new bucket. 62 | refreshed = now 63 | elseif add_tokens > 0 and last_refreshed ~= 0 then 64 | -- Add tokens to an existing bucket. 65 | refreshed = last_refreshed + elapsed_windows * window 66 | else 67 | -- Don't change refreshed time if we haven't added new tokens 68 | refreshed = last_refreshed 69 | end 70 | 71 | return {allowed, refreshed, filled_tokens} 72 | end 73 | 74 | local function update_bucket(bucket_key, allowed, refreshed, 75 | filled_tokens, ttl, requested_tokens) 76 | -- Updates bucket token count, last refreshed time, and TTL 77 | -- 78 | -- Args: 79 | -- bucket_key: Redis key name of bucket 80 | -- allowed: 1 if this request will be allowed, otherwise 0 81 | -- refreshed: Window start time, whole seconds since epoch 82 | -- filled_tokens: How many tokens in bucket 83 | -- ttl: Redis key expiration 84 | -- 85 | -- Returns: 86 | -- new_tokens: Current number of tokens in bucket 87 | 88 | local new_tokens = filled_tokens 89 | if allowed == 1 then 90 | new_tokens = math.max(0, filled_tokens - requested_tokens) 91 | end 92 | 93 | redis.call("hmset", bucket_key, "tokens", new_tokens, "refreshed", refreshed) 94 | redis.call("expire", bucket_key, ttl) 95 | return new_tokens 96 | end 97 | 98 | local name = ARGV[1] 99 | local default_rps = ARGV[2] 100 | local default_burst = ARGV[3] 101 | local default_window = ARGV[4] 102 | local requested_tokens = tonumber(ARGV[5]) 103 | local knobs_ttl = tonumber(ARGV[6]) 104 | local rps 105 | local burst 106 | local window 107 | 108 | -- Lookup throttle knob settings 109 | local knobs_key = name .. ":knobs" 110 | -- Use 111 | -- HMSET rps burst window 112 | -- to manually override the setting for any throttle. 113 | local knobs = redis.call("HMGET", knobs_key, "rps", "burst", "window") 114 | if knobs[1] == false then 115 | -- Use defaults if knobs hash is not found 116 | rps = tonumber(default_rps) 117 | burst = tonumber(default_burst) 118 | window = tonumber(default_window) 119 | else 120 | rps = tonumber(knobs[1]) 121 | burst = tonumber(knobs[2]) 122 | window = tonumber(knobs[3]) 123 | -- Set knobs hash expiration if knobs_ttl is specified 124 | if knobs_ttl > 0 then 125 | redis.call("EXPIRE", knobs_key, knobs_ttl) 126 | end 127 | end 128 | 129 | -- Use redis server time so it is consistent across callers 130 | -- The following line gets replaced before loading this script during tests 131 | -- so that we can freeze the time values. See throttle.py. 132 | local time = redis.call("time") 133 | local now = tonumber(time[1]) 134 | 135 | -- Keep the hash around for twice the useful burst time to reduce unnecessary expires 136 | local ttl = math.floor(burst * window * 2) 137 | 138 | local tokens 139 | local seconds_left 140 | local allowed 141 | if rps == 0 then 142 | -- rps = 0 always results in a denied request with a full window sleep 143 | seconds_left = window 144 | tokens = 0 145 | allowed = 0 146 | elseif rps == -1 then 147 | -- rps = -1 always results in an allowed 148 | seconds_left = 0 149 | tokens = 1 150 | allowed = 1 151 | else 152 | -- Check bucket to determine if work is allowed 153 | local rate = check_bucket(name, rps, burst, window, now, requested_tokens) 154 | tokens = update_bucket(name, rate[1], rate[2], rate[3], ttl, requested_tokens) 155 | allowed = rate[1] 156 | -- Calculate decimal seconds left in the window 157 | local diff = math.max(0, now - tonumber(rate[2])) 158 | seconds_left = (window - diff - 1) + (1000000 - tonumber(time[2])) / 1000000 159 | end 160 | 161 | -- string.format is necessary for seconds_left because Lua to Redis number 162 | -- conversion automatically casts numbers to integers which would drop the microseconds 163 | return {allowed, tokens, string.format("%.6f", seconds_left)} 164 | -------------------------------------------------------------------------------- /limitlion/throttle.py: -------------------------------------------------------------------------------- 1 | """Token bucket throttle backed by Redis.""" 2 | 3 | import time 4 | 5 | import pkg_resources 6 | 7 | KEY_FORMAT = 'throttle:{}' 8 | 9 | # throttle knob defaults 10 | THROTTLE_BURST_DEFAULT = 1 11 | THROTTLE_WINDOW_DEFAULT = 5 12 | THROTTLE_REQUESTED_TOKENS_DEFAULT = 1 13 | 14 | # The default is to extend a throttle's knob settings TTL out 15 | # 7 days each time the throttle is used. 16 | DEFAULT_KNOBS_TTL = 60 * 60 * 24 * 7 17 | 18 | throttle_script = None 19 | redis = None 20 | 21 | 22 | def _validate_throttle(key, params): 23 | check_values_pipe = redis.pipeline() 24 | for param, param_name in params: 25 | if param is not None: 26 | # Throttle values can only be positive floats 27 | try: 28 | assert float(param) >= 0 29 | except (ValueError, AssertionError): 30 | raise ValueError( 31 | '"{}" is not a valid throttle value. Throttle values must ' 32 | 'be positive floats.'.format(param) 33 | ) 34 | else: 35 | check_values_pipe.hexists(key, param_name) 36 | if not all(check_values_pipe.execute()): 37 | raise IndexError( 38 | "Throttle knob {} doesn't exist or is invalid".format(key) 39 | ) 40 | 41 | 42 | def _verify_configured(): 43 | if not redis or not throttle_script: 44 | raise RuntimeError('Throttle is not configured') 45 | 46 | 47 | def throttle( 48 | name, 49 | rps, 50 | burst=THROTTLE_BURST_DEFAULT, 51 | window=THROTTLE_WINDOW_DEFAULT, 52 | requested_tokens=THROTTLE_REQUESTED_TOKENS_DEFAULT, 53 | knobs_ttl=DEFAULT_KNOBS_TTL, 54 | ): 55 | """ 56 | Throttle that allows orchestration of distributed workers. 57 | 58 | Args: 59 | name: Name of throttle. Used as part of the Redis key. 60 | rps: Default requests per second allowed by this throttle 61 | burst: Default burst multiplier 62 | window: Default limit window in seconds 63 | requested_tokens: Number of tokens required for this work request 64 | knobs_ttl: Throttle's knob TTL value (0 disables setting TTL) 65 | 66 | Returns: 67 | allowed: True if work is allowed 68 | tokens: Number of tokens left in throttle bucket 69 | sleep: Seconds before next limit window starts. If work is 70 | not allowed you should sleep this many seconds. (float) 71 | 72 | The first use of a throttle will set the default values in redis for 73 | rps, burst, and window. Subsequent calls will use the values stored in 74 | Redis. This allows changes to the throttle knobs to be made on the fly by 75 | simply changing the values stored in redis. 76 | 77 | See throttle_set function to set the throttle. 78 | 79 | Setting RPS to 0 causes all work requests to be denied and a full sleep. 80 | Setting RPS to -1 causes all work requests to be allowed. 81 | 82 | """ 83 | 84 | _verify_configured() 85 | allowed, tokens, sleep = throttle_script( 86 | keys=[], 87 | args=[ 88 | KEY_FORMAT.format(name), 89 | rps, 90 | burst, 91 | window, 92 | requested_tokens, 93 | knobs_ttl, 94 | ], 95 | ) 96 | # Converting the string sleep to a float causes floating point rounding 97 | # issues that limits having true microsecond resolution for the sleep 98 | # value. 99 | return allowed == 1, int(tokens), float(sleep) 100 | 101 | 102 | def throttle_configure(redis_instance, testing=False): 103 | """Register Lua throttle script in Redis.""" 104 | 105 | global redis, throttle_script 106 | redis = redis_instance 107 | 108 | lua_script = pkg_resources.resource_string( 109 | __name__, 'throttle.lua' 110 | ).decode() 111 | 112 | # Modify scripts when testing so time can be frozen 113 | if testing: 114 | lua_script = lua_script.replace( 115 | 'local time = redis.call("time")', 116 | 'local time\n' 117 | 'if redis.call("exists", "frozen_second") == 1 then\n' 118 | ' time = redis.call("mget", "frozen_second", "frozen_microsecond")\n' # noqa: E501 119 | 'else\n' 120 | ' time = redis.call("time")\n' 121 | 'end', 122 | ) 123 | throttle_script = redis.register_script(lua_script) 124 | 125 | 126 | def throttle_delete(name): 127 | """Delete Redis throttle data.""" 128 | 129 | _verify_configured() 130 | key = KEY_FORMAT.format(name) 131 | pipeline = redis.pipeline() 132 | pipeline.delete(key) 133 | pipeline.delete(key + ':knobs') 134 | pipeline.execute() 135 | 136 | 137 | def throttle_get(name): 138 | """ 139 | Get throttle values from redis. 140 | 141 | Returns: (tokens, refreshed, rps, burst, window) 142 | 143 | """ 144 | 145 | key = KEY_FORMAT.format(name) + ':knobs' 146 | 147 | # Get each value in hashes individually in case they don't exist 148 | get_values_pipe = redis.pipeline() 149 | key = KEY_FORMAT.format(name) 150 | get_values_pipe.hget(key, 'tokens') 151 | get_values_pipe.hget(key, 'refreshed') 152 | 153 | key = KEY_FORMAT.format(name) + ':knobs' 154 | get_values_pipe.hget(key, 'rps') 155 | get_values_pipe.hget(key, 'burst') 156 | get_values_pipe.hget(key, 'window') 157 | 158 | values = get_values_pipe.execute() 159 | return values 160 | 161 | 162 | def throttle_reset(name): 163 | """Reset throttle settings.""" 164 | 165 | _verify_configured() 166 | key = KEY_FORMAT.format(name) + ':knobs' 167 | redis.delete(key) 168 | 169 | 170 | def throttle_set(name, rps=None, burst=None, window=None, knobs_ttl=None): 171 | """ 172 | Adjust throttle values in redis. 173 | 174 | If knobs_ttl is used here the throttle() call needs to be called 175 | with knobs_ttl=0 so the ttl isn't also set in the Lua script 176 | """ 177 | 178 | _verify_configured() 179 | key = KEY_FORMAT.format(name) + ':knobs' 180 | 181 | params = [(rps, 'rps'), (burst, 'burst'), (window, 'window')] 182 | _validate_throttle(key, params) 183 | 184 | set_values_pipe = redis.pipeline() 185 | for param, param_name in params: 186 | if param is not None: 187 | set_values_pipe.hset(key, param_name, param) 188 | 189 | if knobs_ttl: 190 | set_values_pipe.expire(key, knobs_ttl) 191 | 192 | set_values_pipe.execute() 193 | 194 | 195 | def throttle_wait(name, *args, **kwargs): 196 | """Sleeps time specified by throttle if needed. 197 | 198 | This will wait potentially forever to get permission to do work 199 | 200 | Usage: 201 | throttle = throttle_wait('name', rps=123) 202 | for ...: 203 | throttle() 204 | do_work() 205 | """ 206 | 207 | max_wait = kwargs.pop('max_wait', None) 208 | 209 | def throttle_func(requested_tokens=1): 210 | start_time = time.time() 211 | allowed, tokens, sleep = throttle( 212 | name, *args, requested_tokens=requested_tokens, **kwargs 213 | ) 214 | while not allowed: 215 | if max_wait is not None and time.time() - start_time > max_wait: 216 | break 217 | time.sleep(sleep) 218 | allowed, tokens, sleep = throttle( 219 | name, *args, requested_tokens=requested_tokens, **kwargs 220 | ) 221 | return allowed, tokens, sleep 222 | 223 | return throttle_func 224 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | skip-string-normalization = true 3 | line-length = 79 4 | exclude = ''' 5 | /( 6 | \.git 7 | | \.venv 8 | | ui 9 | )/ 10 | ''' 11 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | redis==3.5.3 2 | freezefrog==0.4.1 -------------------------------------------------------------------------------- /requirements_test.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | coverage 3 | pytest-cov 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | testpaths=tests 3 | 4 | [flake8] 5 | exclude=build,dist,docs,sql_migrations,venv,.tox,.eggs,venv3 6 | ignore=D1,D200,D202,D204,D205,D40,D413,E127,E128,E226,F403,F405,I100,N806 7 | import-order-style=google 8 | max-complexity=12 9 | max-line-length = 80 10 | 11 | [isort] 12 | skip=.tox,venv,venv3 13 | not_skip= 14 | __init__.py 15 | known_first_party = limitlion 16 | known_tests=tests 17 | sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,TESTS,LOCALFOLDER 18 | default_section=THIRDPARTY 19 | use_parentheses=true 20 | multi_line_output=5 21 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """LimitLion setup.""" 2 | 3 | from setuptools import setup 4 | 5 | install_requires = ['redis>=2'] 6 | 7 | tests_require = install_requires + ['pytest', 'pytest-cov'] 8 | 9 | with open("README.md", "r") as fh: 10 | long_description = fh.read() 11 | 12 | setup( 13 | name='limitlion', 14 | version='1.0.0', 15 | url='http://github.com/closeio/limitlion', 16 | description='Close LimitLion', 17 | long_description=long_description, 18 | long_description_content_type="text/markdown", 19 | platforms='any', 20 | classifiers=[ 21 | 'Intended Audience :: Developers', 22 | 'License :: OSI Approved :: MIT License', 23 | 'Operating System :: OS Independent', 24 | 'Programming Language :: Python :: 3', 25 | 'Programming Language :: Python :: 3.5', 26 | 'Programming Language :: Python :: 3.6', 27 | 'Topic :: Software Development :: Libraries :: Python Modules', 28 | ], 29 | packages=[ 30 | 'limitlion', 31 | ], 32 | package_data={'limitlion': ['*.lua']}, 33 | install_requires=install_requires, 34 | tests_require=tests_require, 35 | ) 36 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import redis as redis_client 3 | 4 | import limitlion 5 | 6 | REDIS_HOST = 'localhost' 7 | REDIS_PORT = 6379 8 | REDIS_DB = 1 9 | 10 | 11 | @pytest.fixture 12 | def redis(): 13 | client = redis_client.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB) 14 | client.flushdb() 15 | yield client 16 | client.flushdb() 17 | 18 | 19 | @pytest.fixture 20 | def limitlion_fixture(redis): 21 | limitlion.throttle_configure(redis, True) 22 | -------------------------------------------------------------------------------- /tests/test_running_counter.py: -------------------------------------------------------------------------------- 1 | """LimitLion tests.""" 2 | import datetime 3 | import time 4 | 5 | import pytest 6 | from freezefrog import FreezeTime 7 | 8 | from limitlion.running_counter import BucketCount, RunningCounter 9 | 10 | 11 | class TestRunningCounter: 12 | def test_main(self, redis): 13 | name = 'test' 14 | num_buckets = 5 15 | interval = 5 16 | 17 | # Start counter now 18 | now = start = datetime.datetime.utcnow().replace(second=0, minute=0) 19 | with FreezeTime(now): 20 | counter = RunningCounter( 21 | redis, 22 | interval, 23 | num_buckets, 24 | name, 25 | ) 26 | # Add two values to current bucket 27 | counter.inc(1) 28 | counter.inc(1.2) 29 | 30 | buckets_counts = counter.buckets_counts() 31 | bucket = int(time.time()) // interval 32 | assert buckets_counts == [ 33 | BucketCount(bucket, 2.2), 34 | BucketCount(bucket - 1, 0), 35 | BucketCount(bucket - 2, 0), 36 | BucketCount(bucket - 3, 0), 37 | BucketCount(bucket - 4, 0), 38 | ] 39 | assert counter.count() == 2.2 40 | 41 | # Move half way into window and add value to bucket 42 | now = start + datetime.timedelta( 43 | seconds=int(num_buckets * interval / 2) 44 | ) 45 | with FreezeTime(now): 46 | counter.inc(2.3) 47 | buckets_counts = counter.buckets_counts() 48 | new_bucket = int(time.time()) // interval 49 | assert buckets_counts == [ 50 | BucketCount(new_bucket, 2.3), 51 | BucketCount(new_bucket - 1, 0), 52 | BucketCount(new_bucket - 2, 2.2), 53 | BucketCount(new_bucket - 3, 0), 54 | BucketCount(new_bucket - 4, 0), 55 | ] 56 | assert counter.count() == 4.5 57 | 58 | # Move forward enough to drop first bucket 59 | now = start + datetime.timedelta(seconds=num_buckets * interval + 1) 60 | with FreezeTime(now): 61 | buckets_counts = counter.buckets_counts() 62 | assert buckets_counts == [ 63 | BucketCount(new_bucket + 3, 0), 64 | BucketCount(new_bucket + 2, 0), 65 | BucketCount(new_bucket + 1, 0), 66 | BucketCount(new_bucket, 2.3), 67 | BucketCount(new_bucket - 1, 0), 68 | ] 69 | assert counter.count() == 2.3 70 | 71 | # Move forward enough to drop all buckets 72 | now = start + datetime.timedelta( 73 | seconds=num_buckets * interval + int(num_buckets * interval / 2) 74 | ) 75 | with FreezeTime(now): 76 | buckets_counts = counter.buckets_counts() 77 | current_bucket = int(time.time()) // interval 78 | assert buckets_counts == [ 79 | BucketCount(current_bucket, 0), 80 | BucketCount(current_bucket - 1, 0), 81 | BucketCount(current_bucket - 2, 0), 82 | BucketCount(current_bucket - 3, 0), 83 | BucketCount(current_bucket - 4, 0), 84 | ] 85 | assert counter.count() == 0 86 | 87 | def test_multi_counters_not_allowed(self, redis): 88 | counter = RunningCounter(redis, 10, 10, name='test1') 89 | 90 | with pytest.raises(ValueError): 91 | counter.inc(1, name='test2') 92 | 93 | with pytest.raises(ValueError): 94 | counter.buckets_counts(name='test2') 95 | 96 | def test_window(self, redis): 97 | counter = RunningCounter(redis, 9, 8, 'test') 98 | assert counter.window == 72 # Seconds 99 | 100 | def test_redis_expirations(self, redis): 101 | # Test TTL when specifying name in constructor 102 | name = 'test' 103 | counter = RunningCounter(redis, 9, 8, name) 104 | counter.inc(2.3) 105 | buckets_counts = counter.buckets_counts() 106 | ttl = redis.ttl(counter._key(name, buckets_counts[0].bucket)) 107 | assert ttl > counter.window 108 | 109 | # Test TTL when specifying name in inc 110 | name = 'test2' 111 | counter = RunningCounter(redis, 9, 8, name) 112 | counter.inc(2.3) 113 | buckets_counts = counter.buckets_counts(name=name) 114 | ttl = redis.ttl(counter._key(name, buckets_counts[0].bucket)) 115 | assert ttl > counter.window 116 | 117 | def test_groups(self, redis): 118 | counter = RunningCounter(redis, 10, 10, group_name='group') 119 | counter.inc(1.2, 'test') 120 | counter.inc(2.2, 'test2') 121 | 122 | assert counter.group() == ['test', 'test2'] 123 | assert counter.group_counts() == {'test': 1.2, 'test2': 2.2} 124 | 125 | # Make sure there aren't collisions between two groups 126 | # using the same names 127 | counter = RunningCounter(redis, 10, 10, group_name='group2') 128 | counter.inc(1.2, 'test') 129 | counter.inc(2.2, 'test2') 130 | 131 | assert counter.group() == ['test', 'test2'] 132 | assert counter.group_counts() == {'test': 1.2, 'test2': 2.2} 133 | 134 | def test_group_counter_purging(self, redis): 135 | start = datetime.datetime.now() 136 | counter = RunningCounter(redis, 10, 10, group_name='group') 137 | with FreezeTime(start): 138 | counter.inc(1.2, 'test') 139 | 140 | assert counter.group() == ['test'] 141 | with FreezeTime(start + datetime.timedelta(seconds=counter.window)): 142 | counter.inc(2.2, 'test2') 143 | assert counter.group() == ['test', 'test2'] 144 | 145 | # One second past window should result in first counter being 146 | # removed from the zset 147 | with FreezeTime( 148 | start + datetime.timedelta(seconds=counter.window + 1) 149 | ): 150 | counter.inc(2.2, 'test2') 151 | assert counter.group() == ['test2'] 152 | 153 | def test_group_bad_init(self, redis): 154 | with pytest.raises(ValueError): 155 | RunningCounter(redis, 1, 1, name='test', group_name='group') 156 | 157 | def test_empty_counter(self, redis): 158 | counter = RunningCounter(redis, 1, 1, name='test_empty') 159 | count = counter.count() 160 | assert count == 0 161 | 162 | def test_delete_counter(self, redis): 163 | counter = RunningCounter(redis, 1, 1, name='name1') 164 | counter.inc() 165 | counter.delete() 166 | assert counter.count() == 0 167 | 168 | def test_delete_group_counter(self, redis): 169 | counter = RunningCounter(redis, 1, 1, group_name='group') 170 | counter.inc(name="name1") 171 | counter.delete(name="name1") 172 | assert counter.group_counts() == {} 173 | counter.inc(name="name1") 174 | counter.inc(name="name2") 175 | counter.delete_group() 176 | assert counter.group_counts() == {} 177 | 178 | def test_group_buckets_counts(self, redis): 179 | start = datetime.datetime.now() 180 | counter = RunningCounter(redis, 10, 5, group_name='group') 181 | with FreezeTime(start): 182 | counter.inc(1, 'counter1') 183 | counter.inc(1, 'counter2') 184 | with FreezeTime( 185 | start + datetime.timedelta(seconds=counter.interval * 2) 186 | ): 187 | counter.inc(2, 'counter1') 188 | counter.inc(3, 'counter2') 189 | with FreezeTime( 190 | start + datetime.timedelta(seconds=counter.interval * 4) 191 | ): 192 | current_bucket = int(time.time()) // 10 193 | counter1_values = [0, 0, 2.0, 0, 1.0] 194 | counter2_values = [0, 0, 3.0, 0, 1.0] 195 | buckets = list( 196 | range(current_bucket, current_bucket - counter.num_buckets, -1) 197 | ) 198 | counter1_bucket_values = [ 199 | BucketCount(bucket=bucket, count=count) 200 | for bucket, count in zip(buckets, counter1_values) 201 | ] 202 | counter2_bucket_values = [ 203 | BucketCount(bucket=bucket, count=count) 204 | for bucket, count in zip(buckets, counter2_values) 205 | ] 206 | assert counter.group_buckets_counts() == { 207 | 'counter1': counter1_bucket_values, 208 | 'counter2': counter2_bucket_values, 209 | } 210 | assert counter.group_buckets_counts(3) == { 211 | 'counter1': counter1_bucket_values[:3], 212 | 'counter2': counter2_bucket_values[:3], 213 | } 214 | 215 | def test_group_counts_specify_recent_buckets(self, redis): 216 | start = datetime.datetime.now() 217 | counter = RunningCounter(redis, 10, 10, group_name='group') 218 | with FreezeTime(start): 219 | counter.inc(1, 'counter1') 220 | counter.inc(3, 'counter2') 221 | 222 | with FreezeTime(start + datetime.timedelta(seconds=counter.interval)): 223 | counter.inc(1, 'counter1') 224 | counter.inc(3, 'counter2') 225 | with FreezeTime( 226 | start + datetime.timedelta(seconds=counter.interval * 2) 227 | ): 228 | counter.inc(1, 'counter1') 229 | counter.inc(3, 'counter2') 230 | assert counter.group_counts(recent_buckets=1) == { 231 | 'counter1': 1.0, 232 | 'counter2': 3.0, 233 | } 234 | assert counter.group_counts(recent_buckets=2) == { 235 | 'counter1': 2.0, 236 | 'counter2': 6.0, 237 | } 238 | assert counter.group_counts() == { 239 | 'counter1': 3.0, 240 | 'counter2': 9.0, 241 | } 242 | assert counter.group_counts(recent_buckets=10) == { 243 | 'counter1': 3.0, 244 | 'counter2': 9.0, 245 | } 246 | with pytest.raises(ValueError): 247 | counter.group_counts(recent_buckets=11) 248 | -------------------------------------------------------------------------------- /tests/test_throttle.py: -------------------------------------------------------------------------------- 1 | """LimitLion tests.""" 2 | 3 | import math 4 | import time 5 | 6 | import pytest 7 | import redis as redis_lib 8 | 9 | import limitlion 10 | from limitlion.throttle import DEFAULT_KNOBS_TTL 11 | 12 | REDIS_HOST = 'localhost' 13 | REDIS_PORT = 6379 14 | REDIS_DB = 1 15 | 16 | TEST_PARAMETERS = [] 17 | for window in (1, 2, 5, 10): 18 | for burst in (1, 2, 3.3, 10): 19 | for rps in (0.0001, 0.2, 0.5, 0.6, 1, 2, 2.2, 5, 10): 20 | TEST_PARAMETERS.append((rps, burst, window)) 21 | 22 | 23 | @pytest.fixture() 24 | def redis(): 25 | if redis_lib.VERSION[0] >= 3: 26 | RedisCls = redis_lib.Redis 27 | else: 28 | RedisCls = redis_lib.StrictRedis 29 | 30 | redis_instance = RedisCls(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB) 31 | limitlion.throttle_configure(redis_instance, True) 32 | 33 | redis_instance.flushdb() 34 | yield redis_instance 35 | redis_instance.connection_pool.disconnect() 36 | 37 | 38 | class TestThrottleNotConfigured: 39 | """ 40 | Tests throttle configuration check. 41 | 42 | This test is run first before it is configured for the remaining tests. 43 | """ 44 | 45 | def test_not_configured(self): 46 | with pytest.raises(RuntimeError) as excinfo: 47 | limitlion.throttle('test', 1, 1, 1, 1) 48 | assert 'Throttle is not configured' in str(excinfo.value) 49 | 50 | 51 | class TestThrottle: 52 | """ 53 | Tests throttle. 54 | """ 55 | 56 | def _get_redis_key(self, name): 57 | return limitlion.KEY_FORMAT.format(name) 58 | 59 | def _fake_work( 60 | self, 61 | key, 62 | rps=5, 63 | burst=1, 64 | window=5, 65 | requested_tokens=1, 66 | knobs_ttl=DEFAULT_KNOBS_TTL, 67 | ): 68 | return limitlion.throttle( 69 | key, rps, burst, window, requested_tokens, knobs_ttl 70 | ) 71 | 72 | @staticmethod 73 | def _get_microseconds(time): 74 | return int((time - int(time)) * 1000000) 75 | 76 | def _fake_bucket_tokens(self, key, tokens, refreshed, redis): 77 | """Create a faked token bucket in Redis.""" 78 | 79 | assert refreshed == int(refreshed) 80 | redis.hmset(key, {'tokens': tokens, 'refreshed': refreshed}) 81 | 82 | def _freeze_redis_time( 83 | self, redis, seconds=int(time.time()), microseconds=0 84 | ): 85 | """ 86 | Freeze time in Redis. 87 | 88 | Utilizes the modified Lua script loaded when the throttle is configured 89 | with testing=True. The modified script will use time values from the 90 | keys frozen_second and frozen_microsecond instead of the Redis TIME 91 | command. 92 | """ 93 | 94 | # Confirm this is being called properly with whole numbers 95 | assert seconds == int(seconds) 96 | assert microseconds == int(microseconds) 97 | assert seconds >= 0 98 | assert microseconds >= 0 99 | 100 | # Pull whole seconds out of microseconds 101 | seconds += int(microseconds / 1000000) 102 | microseconds = microseconds % 1000000 103 | 104 | redis.mset( 105 | {'frozen_second': seconds, 'frozen_microsecond': microseconds} 106 | ) 107 | 108 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 109 | def test_bursting(self, rps, burst, window, redis): 110 | """Test bursting logic.""" 111 | 112 | capacity = math.ceil(rps * burst * window) 113 | start_time = int(time.time()) 114 | self._freeze_redis_time(redis, start_time, 0) 115 | 116 | allowed, tokens, sleep = self._fake_work('test', rps, burst, window) 117 | assert allowed is True 118 | assert tokens == capacity - 1 119 | 120 | self._freeze_redis_time(redis, start_time + window, 500000) 121 | allowed, tokens, sleep = self._fake_work( 122 | 'test', rps, burst, window, capacity 123 | ) 124 | assert allowed is True 125 | assert tokens == 0 126 | 127 | self._freeze_redis_time(redis, start_time + 2 * window, 500000) 128 | allowed, tokens, sleep = self._fake_work('test', rps, burst, window) 129 | assert allowed is True 130 | assert tokens == math.ceil(rps * window) - 1 131 | 132 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 133 | def test_zero_rps(self, rps, burst, window, redis): 134 | """Test RPS set to 0.""" 135 | 136 | allowed, tokens, sleep = self._fake_work('test', 0, burst, window) 137 | assert allowed is False 138 | assert sleep == window 139 | assert tokens == 0 140 | 141 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 142 | def test_request_all_tokens(self, rps, burst, window, redis): 143 | """Test request all tokens in one request.""" 144 | 145 | allowed, tokens, sleep = self._fake_work( 146 | 'test', rps, burst, window, (rps * burst * window) 147 | ) 148 | assert allowed is True 149 | assert sleep <= window 150 | assert tokens == 0 151 | 152 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 153 | def test_request_too_many_tokens(self, rps, burst, window, redis): 154 | """Test requesting capacity plus 1.""" 155 | 156 | allowed, tokens, sleep = self._fake_work( 157 | 'test', rps, burst, window, (rps * burst * window) + 1 158 | ) 159 | assert allowed is False 160 | assert sleep <= window 161 | assert tokens == math.ceil(rps * burst * window) 162 | 163 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 164 | def test_multiple_throttles(self, rps, burst, window, redis): 165 | """Test multiple throttles.""" 166 | 167 | throttle_name_1 = 'test1' 168 | throttle_redis_key_1 = self._get_redis_key(throttle_name_1) 169 | throttle_name_2 = 'test2' 170 | throttle_redis_key_2 = self._get_redis_key(throttle_name_2) 171 | 172 | start_time = int(time.time()) 173 | # Fake bucket with two tokens left 174 | self._fake_bucket_tokens(throttle_redis_key_1, 1, start_time, redis) 175 | self._fake_bucket_tokens(throttle_redis_key_2, 3, start_time, redis) 176 | 177 | # Set time 4 microseconds into the first second of this window 178 | self._freeze_redis_time(redis, start_time, 4) 179 | 180 | allowed, tokens, sleep = self._fake_work( 181 | throttle_name_1, rps, burst, window 182 | ) 183 | assert allowed is True 184 | assert tokens == 0 185 | 186 | allowed, tokens, sleep = self._fake_work( 187 | throttle_name_1, rps, burst, window 188 | ) 189 | assert allowed is False 190 | assert tokens == 0 191 | 192 | # Second throttle, add +1 to RPS just to test with a different value 193 | # for the second throttle 194 | allowed, tokens, sleep = self._fake_work( 195 | throttle_name_2, rps + 1, burst, window 196 | ) 197 | assert allowed is True 198 | assert tokens == min(math.ceil((rps + 1) * window * burst), 3) - 1 199 | 200 | # Confirm first throttle is still out of tokens 201 | allowed, tokens, sleep = self._fake_work( 202 | throttle_name_1, rps, burst, window 203 | ) 204 | assert allowed is False 205 | assert tokens == 0 206 | 207 | @pytest.mark.parametrize('rps, burst, window', TEST_PARAMETERS) 208 | def test_rate_limits(self, rps, burst, window, redis): 209 | """Test requests over allowable limit.""" 210 | 211 | # Don't include burst in this capacity because we don't start with 212 | # an empty bucket which is when burst tokens are available 213 | capacity = math.ceil(rps * window) 214 | 215 | # Max capacity is needed because the Lua script will fix buckets 216 | # that we add too many tokens with a call to _fake_bucket_tokens. 217 | # For example, setting tokens to 2 with rps, burst, window = 1 is 218 | # technically too many tokens in that bucket. 219 | max_capacity = math.ceil(rps * window * burst) 220 | 221 | throttle_name = 'test' 222 | throttle_redis_key = self._get_redis_key(throttle_name) 223 | 224 | start_time = int(time.time()) 225 | # Fake bucket with two tokens left 226 | self._fake_bucket_tokens(throttle_redis_key, 2, start_time, redis) 227 | # Set time 4 microseconds into the first second of this window 228 | self._freeze_redis_time(redis, start_time, 4) 229 | 230 | # Fist call should be under limit 231 | allowed, tokens, sleep = self._fake_work( 232 | throttle_name, rps, burst, window 233 | ) 234 | assert allowed is True 235 | assert tokens == min(max_capacity, 2) - 1 236 | 237 | # Second call may be allowed depending on RPS 238 | allowed, tokens, sleep = self._fake_work( 239 | throttle_name, rps, burst, window 240 | ) 241 | assert allowed is (min(max_capacity, 2) - 1 > 0) 242 | assert tokens == 0 243 | 244 | # Third call should be over limit 245 | allowed, tokens, sleep = self._fake_work( 246 | throttle_name, rps, burst, window 247 | ) 248 | assert allowed is False 249 | assert tokens == 0 250 | # This might need to be switched to checking if it is within +/- 1 251 | # microsecond to deal with floating point rounding madness. 252 | assert sleep == window - 0.000004 253 | 254 | # Call should succeed if we come back exactly at the sleep time. 255 | # Next window starts at: 256 | # start_time + whole seconds of sleep + microseconds of sleep + initial 257 | # 4 microsecond of start time 258 | # Floating point comparison madness with + 4 versus + 5 259 | self._freeze_redis_time( 260 | redis, 261 | int(start_time) + int(sleep), 262 | TestThrottle._get_microseconds(sleep) + 5, 263 | ) 264 | allowed, tokens, sleep = self._fake_work( 265 | throttle_name, rps, burst, window 266 | ) 267 | assert allowed is True 268 | assert tokens == capacity - 1 269 | 270 | def test_changing_settings(self, redis): 271 | """Test changing throttle settings.""" 272 | 273 | throttle_name = 'test' 274 | 275 | start_time = int(time.time()) 276 | self._freeze_redis_time(redis, start_time, 0) 277 | 278 | # Fist call should be under limit 279 | allowed, tokens, sleep = self._fake_work(throttle_name, 5, 1, 5) 280 | assert allowed is True 281 | assert tokens == 24 282 | 283 | limitlion.throttle_set(throttle_name, 10, 1, 5) 284 | 285 | # Second call should still be under limit and changing default should 286 | # not change tokens remaining 287 | allowed, tokens, sleep = self._fake_work(throttle_name, 100, 100, 100) 288 | assert allowed is True 289 | assert tokens == 23 290 | 291 | self._freeze_redis_time(redis, start_time + 6, 0) 292 | # This would actually temporarily starve throttles because they would 293 | # not add tokens for an extra 5 seconds longer than planned. 294 | limitlion.throttle_set(throttle_name, 10, 1, 10) 295 | 296 | allowed, tokens, sleep = self._fake_work(throttle_name, 5, 1, 5) 297 | assert allowed is True 298 | assert tokens == 22 299 | 300 | # Move into next window 301 | self._freeze_redis_time(redis, start_time + 11, 0) 302 | 303 | allowed, tokens, sleep = self._fake_work(throttle_name, 5, 1, 5) 304 | assert allowed is True 305 | assert tokens == 99 306 | 307 | @pytest.mark.parametrize('value', ['a', -100, '-100']) 308 | def test_setting_invalid_throttle_values(self, value, redis): 309 | """Tests setting throttle values that are not 310 | positive floats. 311 | """ 312 | throttle_name = 'test' 313 | 314 | start_time = int(time.time()) 315 | 316 | self._freeze_redis_time(redis, start_time, 0) 317 | 318 | with pytest.raises(ValueError) as excinfo: 319 | limitlion.throttle_set(throttle_name, value, 2, 6) 320 | assert ( 321 | '"{}" is not a valid throttle value. Throttle values must ' 322 | 'be positive floats.'.format(value) 323 | ) in str(excinfo.value) 324 | 325 | def test_get_throttle(self, redis): 326 | """Test getting throttle settings.""" 327 | 328 | throttle_name = 'test' 329 | 330 | start_time = int(time.time()) 331 | 332 | self._freeze_redis_time(redis, start_time, 0) 333 | 334 | limitlion.throttle_set(throttle_name, 5, 2, 6) 335 | self._fake_work(throttle_name) 336 | tokens, refreshed, rps, burst, window = limitlion.throttle_get( 337 | throttle_name 338 | ) 339 | assert int(tokens) == 59 340 | assert int(refreshed) == start_time 341 | assert int(rps) == 5 342 | assert int(burst) == 2 343 | assert int(window) == 6 344 | 345 | def test_set_throttle(self, redis): 346 | """Test setting throttle settings.""" 347 | 348 | throttle_name = 'test' 349 | key = self._get_redis_key(throttle_name) 350 | 351 | start_time = int(time.time()) 352 | 353 | self._freeze_redis_time(redis, start_time, 0) 354 | 355 | limitlion.throttle_set(throttle_name, 5, 2, 6) 356 | self._fake_work(throttle_name) 357 | tokens, refreshed, rps, burst, window = limitlion.throttle_get( 358 | throttle_name 359 | ) 360 | 361 | # TTL should be ~7 days 362 | assert redis.ttl('{}:knobs'.format(key)) > DEFAULT_KNOBS_TTL - 2 363 | assert int(tokens) == 59 364 | assert int(refreshed) == start_time 365 | assert int(rps) == 5 366 | assert int(burst) == 2 367 | assert int(window) == 6 368 | 369 | def test_set_throttle_with_ttl(self, redis): 370 | """Test setting throttle settings with a ttl.""" 371 | 372 | throttle_name = 'test' 373 | key = self._get_redis_key(throttle_name) 374 | start_time = int(time.time()) 375 | self._freeze_redis_time(redis, start_time, 0) 376 | 377 | # Test having knobs never expire 378 | limitlion.throttle_set(throttle_name, 5, 2, 6) 379 | self._fake_work(throttle_name, knobs_ttl=0) 380 | tokens, refreshed, rps, burst, window = limitlion.throttle_get( 381 | throttle_name 382 | ) 383 | 384 | # TTL should not be set 385 | assert redis.ttl('{}:knobs'.format(key)) == -1 386 | assert int(tokens) == 59 387 | assert int(refreshed) == start_time 388 | assert int(rps) == 5 389 | assert int(burst) == 2 390 | assert int(window) == 6 391 | 392 | # Test setting 10 second expiration 393 | limitlion.throttle_set(throttle_name, 5, 2, 6, knobs_ttl=10) 394 | self._fake_work(throttle_name, knobs_ttl=0) 395 | tokens, refreshed, rps, burst, window = limitlion.throttle_get( 396 | throttle_name 397 | ) 398 | 399 | # TTL should not be 9 or 10 400 | ttl = redis.ttl('{}:knobs'.format(key)) 401 | assert (ttl >= 9) and (ttl <= 10) 402 | assert int(tokens) == 58 403 | assert int(refreshed) == start_time 404 | assert int(rps) == 5 405 | assert int(burst) == 2 406 | assert int(window) == 6 407 | 408 | def test_delete_reset(self, redis): 409 | """Test throttle delete.""" 410 | 411 | throttle_name = 'test' 412 | self._fake_work(throttle_name, 5, 1, 5) 413 | 414 | limitlion.throttle_reset(throttle_name) 415 | key = self._get_redis_key(throttle_name) 416 | assert redis.exists(key + ':knobs') == 0 417 | 418 | def test_delete_throttle(self, redis): 419 | """Test throttle delete.""" 420 | 421 | throttle_name = 'test' 422 | self._fake_work(throttle_name, 5, 1, 5) 423 | 424 | limitlion.throttle_delete(throttle_name) 425 | key = self._get_redis_key(throttle_name) 426 | assert redis.exists(key) == 0 427 | assert redis.exists(key + ':knobs') == 0 428 | 429 | def test_throttle_wait(self, redis): 430 | """Test wait helper method.""" 431 | 432 | throttle_name = 'test' 433 | throttle_func = limitlion.throttle_wait(throttle_name, rps=123) 434 | allowed, tokens, sleep = throttle_func(requested_tokens=2) 435 | 436 | assert allowed is True 437 | assert int(tokens) == 613 438 | 439 | def test_throttle_wait_with_max_wait(self, redis): 440 | """Test wait helper method.""" 441 | 442 | start_time = int(time.time()) 443 | self._freeze_redis_time(redis, start_time, 0) 444 | throttle_name = 'test' 445 | max_wait = 0.1 446 | 447 | limitlion.throttle_set(throttle_name, 1, 1, 1) 448 | self._fake_work(throttle_name) 449 | throttle_func = limitlion.throttle_wait( 450 | throttle_name, rps=1, max_wait=max_wait 451 | ) 452 | allowed, tokens, sleep = throttle_func() 453 | assert time.time() - start_time >= max_wait 454 | assert allowed is False 455 | --------------------------------------------------------------------------------