├── .credo.exs ├── .formatter.exs ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── dependabot.yml └── workflows │ ├── ci.yml │ └── publish.yml ├── .gitignore ├── .tool-versions ├── BENCHMARKS.md ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CREDITS.md ├── LICENSE.md ├── README.md ├── RELEASE.md ├── bench └── base.exs ├── compose.yml ├── lib └── hammer │ ├── redis.ex │ └── redis │ ├── fix_window.ex │ ├── leaky_bucket.ex │ └── token_bucket.ex ├── mix.exs ├── mix.lock ├── publish.yml └── test ├── hammer ├── redis │ ├── leaky_bucket_test.exs │ └── token_bucket_test.exs └── redis_test.exs └── test_helper.exs /.credo.exs: -------------------------------------------------------------------------------- 1 | # Last updated for credo 1.6.1 2 | %{ 3 | # 4 | # You can have as many configs as you like in the `configs:` field. 5 | configs: [ 6 | %{ 7 | # 8 | # Run any config using `mix credo -C `. If no config name is given 9 | # "default" is used. 10 | # 11 | name: "default", 12 | # 13 | # These are the files included in the analysis: 14 | files: %{ 15 | # 16 | # You can give explicit globs or simply directories. 17 | # In the latter case `**/*.{ex,exs}` will be used. 18 | # 19 | included: [ 20 | "lib/", 21 | "src/", 22 | "test/", 23 | "web/", 24 | "apps/*/lib/", 25 | "apps/*/src/", 26 | "apps/*/test/", 27 | "apps/*/web/" 28 | ], 29 | excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"] 30 | }, 31 | # 32 | # Load and configure plugins here: 33 | # 34 | plugins: [], 35 | # 36 | # If you create your own checks, you must specify the source files for 37 | # them here, so they can be loaded by Credo before running the analysis. 38 | # 39 | requires: [], 40 | # 41 | # If you want to enforce a style guide and need a more traditional linting 42 | # experience, you can change `strict` to `true` below: 43 | # 44 | strict: true, 45 | # 46 | # To modify the timeout for parsing files, change this value: 47 | # 48 | parse_timeout: 5000, 49 | # 50 | # If you want to use uncolored output by default, you can change `color` 51 | # to `false` below: 52 | # 53 | color: true, 54 | # 55 | # You can customize the parameters of any check by adding a second element 56 | # to the tuple. 57 | # 58 | # To disable a check put `false` as second element: 59 | # 60 | # {Credo.Check.Design.DuplicatedCode, false} 61 | # 62 | checks: %{ 63 | enabled: [ 64 | # 65 | ## Consistency Checks 66 | # 67 | {Credo.Check.Consistency.ExceptionNames, []}, 68 | {Credo.Check.Consistency.LineEndings, []}, 69 | {Credo.Check.Consistency.ParameterPatternMatching, []}, 70 | {Credo.Check.Consistency.SpaceAroundOperators, []}, 71 | {Credo.Check.Consistency.SpaceInParentheses, []}, 72 | {Credo.Check.Consistency.TabsOrSpaces, []}, 73 | 74 | # 75 | ## Design Checks 76 | # 77 | # You can customize the priority of any check 78 | # Priority values are: `low, normal, high, higher` 79 | # 80 | {Credo.Check.Design.AliasUsage, 81 | priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0}, 82 | {Credo.Check.Design.SkipTestWithoutComment, []}, 83 | # You can also customize the exit_status of each check. 84 | # If you don't want TODO comments to cause `mix credo` to fail, just 85 | # set this value to 0 (zero). 86 | # 87 | {Credo.Check.Design.TagTODO, [exit_status: 2]}, 88 | {Credo.Check.Design.TagFIXME, []}, 89 | 90 | # 91 | ## Readability Checks 92 | # 93 | {Credo.Check.Readability.AliasAs, 94 | files: %{excluded: ["lib/*_web.ex", "test/support/conn_case.ex"]}}, 95 | {Credo.Check.Readability.AliasOrder, []}, 96 | {Credo.Check.Readability.BlockPipe, []}, 97 | {Credo.Check.Readability.FunctionNames, []}, 98 | {Credo.Check.Readability.ImplTrue, []}, 99 | {Credo.Check.Readability.LargeNumbers, []}, 100 | {Credo.Check.Readability.MaxLineLength, priority: :low, max_length: 120}, 101 | {Credo.Check.Readability.ModuleAttributeNames, []}, 102 | {Credo.Check.Readability.ModuleDoc, []}, 103 | {Credo.Check.Readability.ModuleNames, []}, 104 | {Credo.Check.Readability.MultiAlias, []}, 105 | {Credo.Check.Readability.ParenthesesInCondition, []}, 106 | {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, 107 | {Credo.Check.Readability.PipeIntoAnonymousFunctions, []}, 108 | {Credo.Check.Readability.PredicateFunctionNames, []}, 109 | {Credo.Check.Readability.PreferImplicitTry, []}, 110 | {Credo.Check.Readability.RedundantBlankLines, []}, 111 | {Credo.Check.Readability.Semicolons, []}, 112 | {Credo.Check.Readability.SinglePipe, []}, 113 | {Credo.Check.Readability.SpaceAfterCommas, []}, 114 | {Credo.Check.Readability.Specs, 115 | files: %{ 116 | excluded: [ 117 | "lib/*_web.ex", 118 | "lib/*_web/controllers/*_controller.ex", 119 | "lib/*_web/graphql/*/resolvers.ex" 120 | ] 121 | }}, 122 | {Credo.Check.Readability.StrictModuleLayout, []}, 123 | {Credo.Check.Readability.StringSigils, []}, 124 | {Credo.Check.Readability.TrailingBlankLine, []}, 125 | {Credo.Check.Readability.TrailingWhiteSpace, []}, 126 | {Credo.Check.Readability.UnnecessaryAliasExpansion, []}, 127 | {Credo.Check.Readability.VariableNames, []}, 128 | {Credo.Check.Readability.WithCustomTaggedTuple, []}, 129 | {Credo.Check.Readability.WithSingleClause, []}, 130 | 131 | # 132 | ## Refactoring Opportunities 133 | # 134 | {Credo.Check.Refactor.Apply, []}, 135 | {Credo.Check.Refactor.CondStatements, []}, 136 | {Credo.Check.Refactor.CyclomaticComplexity, []}, 137 | {Credo.Check.Refactor.FilterFilter, []}, 138 | {Credo.Check.Refactor.FilterReject, []}, 139 | {Credo.Check.Refactor.FunctionArity, []}, 140 | {Credo.Check.Refactor.IoPuts, []}, 141 | {Credo.Check.Refactor.LongQuoteBlocks, []}, 142 | {Credo.Check.Refactor.MapJoin, []}, 143 | {Credo.Check.Refactor.MapMap, []}, 144 | {Credo.Check.Refactor.MatchInCondition, []}, 145 | {Credo.Check.Refactor.NegatedConditionsInUnless, []}, 146 | {Credo.Check.Refactor.NegatedConditionsWithElse, []}, 147 | {Credo.Check.Refactor.Nesting, []}, 148 | {Credo.Check.Refactor.PipeChainStart, []}, 149 | {Credo.Check.Refactor.RedundantWithClauseResult, []}, 150 | {Credo.Check.Refactor.RejectFilter, []}, 151 | {Credo.Check.Refactor.RejectReject, []}, 152 | {Credo.Check.Refactor.UnlessWithElse, []}, 153 | {Credo.Check.Refactor.WithClauses, []}, 154 | 155 | # 156 | ## Warnings 157 | # 158 | {Credo.Check.Warning.ApplicationConfigInModuleAttribute, []}, 159 | {Credo.Check.Warning.BoolOperationOnSameValues, []}, 160 | {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, 161 | {Credo.Check.Warning.IExPry, []}, 162 | {Credo.Check.Warning.IoInspect, []}, 163 | {Credo.Check.Warning.MapGetUnsafePass, []}, 164 | {Credo.Check.Warning.MixEnv, []}, 165 | {Credo.Check.Warning.OperationOnSameValues, []}, 166 | {Credo.Check.Warning.OperationWithConstantResult, []}, 167 | {Credo.Check.Warning.RaiseInsideRescue, []}, 168 | {Credo.Check.Warning.SpecWithStruct, []}, 169 | {Credo.Check.Warning.UnsafeExec, []}, 170 | {Credo.Check.Warning.UnsafeToAtom, []}, 171 | {Credo.Check.Warning.UnusedEnumOperation, []}, 172 | {Credo.Check.Warning.UnusedFileOperation, []}, 173 | {Credo.Check.Warning.UnusedKeywordOperation, []}, 174 | {Credo.Check.Warning.UnusedListOperation, []}, 175 | {Credo.Check.Warning.UnusedPathOperation, []}, 176 | {Credo.Check.Warning.UnusedRegexOperation, []}, 177 | {Credo.Check.Warning.UnusedStringOperation, []}, 178 | {Credo.Check.Warning.UnusedTupleOperation, []}, 179 | {Credo.Check.Warning.WrongTestFileExtension, []} 180 | ], 181 | disabled: [ 182 | # 183 | # Controversial and experimental checks (opt-in, just move the check to `:enabled` 184 | # and be sure to use `mix credo --strict` to see low priority checks if you set 185 | # `strict: false` above) 186 | # 187 | {Credo.Check.Consistency.MultiAliasImportRequireUse, []}, 188 | {Credo.Check.Consistency.UnusedVariableNames, []}, 189 | {Credo.Check.Design.DuplicatedCode, []}, 190 | {Credo.Check.Readability.SeparateAliasRequire, []}, 191 | {Credo.Check.Readability.SingleFunctionToBlockPipe, []}, 192 | {Credo.Check.Refactor.ABCSize, []}, 193 | {Credo.Check.Refactor.AppendSingleItem, []}, 194 | {Credo.Check.Refactor.DoubleBooleanNegation, []}, 195 | {Credo.Check.Refactor.ModuleDependencies, []}, 196 | {Credo.Check.Refactor.NegatedIsNil, []}, 197 | {Credo.Check.Refactor.VariableRebinding, []}, 198 | {Credo.Check.Warning.LeakyEnvironment, []} 199 | 200 | # 201 | # Custom checks can be created using `mix credo.gen.check`. 202 | # 203 | ] 204 | } 205 | } 206 | ] 207 | } 208 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: epinault 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | ** Provide the following details 14 | 15 | - Elixir version (elixir -v): 16 | - Erlang version (erl -v): 17 | - Redis version (redis-server -v): 18 | - Operating system: 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Actual behavior** 24 | A clear and concise description of what actually happens. 25 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | 9 | - package-ecosystem: "mix" 10 | directory: "/" 11 | schedule: 12 | interval: "weekly" -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | test: 13 | runs-on: ubuntu-latest 14 | env: 15 | MIX_ENV: test 16 | 17 | services: 18 | redis: 19 | # Docker Hub image 20 | image: redis 21 | ports: 22 | - 6379:6379 23 | # Set health checks to wait until redis has started 24 | options: >- 25 | --health-cmd "redis-cli ping" 26 | --health-interval 10s 27 | --health-timeout 5s 28 | --health-retries 5 29 | 30 | strategy: 31 | # https://hexdocs.pm/elixir/compatibility-and-deprecations.html#between-elixir-and-erlang-otp 32 | matrix: 33 | elixir: [1.15, 1.16, 1.17] 34 | otp: [25, 26] 35 | include: 36 | - elixir: 1.14 37 | otp: 25 38 | - elixir: 1.17 39 | otp: 27 40 | - elixir: 1.18 41 | otp: 27 42 | steps: 43 | - uses: actions/checkout@v4 44 | 45 | - uses: erlef/setup-beam@v1 46 | with: 47 | otp-version: ${{matrix.otp}} 48 | elixir-version: ${{matrix.elixir}} 49 | 50 | - uses: actions/cache@v4 51 | with: 52 | path: | 53 | deps 54 | _build 55 | key: test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-${{ github.head_ref || github.ref }}-mix-${{ hashFiles('**/mix.lock') }} 56 | restore-keys: | 57 | test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-${{ github.head_ref || github.ref }}-mix- 58 | test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-refs/heads/master-mix- 59 | 60 | - run: mix deps.get 61 | - run: mix deps.unlock --check-unused 62 | - run: mix deps.compile 63 | - run: mix compile --warnings-as-errors 64 | - run: mix credo --strict --format=oneline 65 | - run: mix test --warnings-as-errors --cover --include redis --include slow 66 | 67 | format: 68 | runs-on: ubuntu-latest 69 | steps: 70 | - uses: actions/checkout@v4 71 | - uses: erlef/setup-beam@v1 72 | with: 73 | elixir-version: 1 74 | otp-version: 27 75 | - run: mix format --check-formatted 76 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check out 13 | uses: actions/checkout@v4 14 | 15 | - name: Publish package to hex.pm 16 | uses: hipcall/github_action_publish_hex@v1 17 | env: 18 | HEX_API_KEY: ${{ secrets.HEX_API_KEY }} 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | elixir 1.18.2-otp-27 2 | erlang 27.1.2 3 | -------------------------------------------------------------------------------- /BENCHMARKS.md: -------------------------------------------------------------------------------- 1 | ❯ MIX_ENV=bench LIMIT=1 SCALE=5000 RANGE=10000 PARALLEL=500 mix run bench/base.exs 2 | parallel: 500 3 | limit: 1 4 | scale: 5000 5 | range: 10000 6 | 7 | Operating System: macOS 8 | CPU Information: Apple M1 Max 9 | Number of Available Cores: 10 10 | Available memory: 32 GB 11 | Elixir 1.17.3 12 | Erlang 27.1.2 13 | JIT enabled: true 14 | 15 | Benchmark suite executing with the following configuration: 16 | warmup: 14 s 17 | time: 6 s 18 | memory time: 0 ns 19 | reduction time: 0 ns 20 | parallel: 500 21 | inputs: none specified 22 | Estimated total run time: 1 min 23 | 24 | Benchmarking hammer_redis_fix_window ... 25 | Benchmarking hammer_redis_leaky_bucket ... 26 | Benchmarking hammer_redis_token_bucket ... 27 | Calculating statistics... 28 | Formatting results... 29 | 30 | Name ips average deviation median 99th % 31 | hammer_redis_fix_window 232.75 4.30 ms ±21.83% 4.31 ms 6.51 ms 32 | hammer_redis_token_bucket 67.46 14.82 ms ±13.57% 14.25 ms 19.66 ms 33 | hammer_redis_leaky_bucket 61.71 16.20 ms ±54.15% 15.67 ms 31.44 ms 34 | 35 | Comparison: 36 | hammer_redis_fix_window 232.75 37 | hammer_redis_token_bucket 67.46 - 3.45x slower +10.53 ms 38 | hammer_redis_leaky_bucket 61.71 - 3.77x slower +11.91 ms 39 | 40 | Extended statistics: 41 | 42 | Name minimum maximum sample size mode 43 | hammer_redis_fix_window 1.09 ms 8.88 ms 698.34 K 4.33 ms 44 | hammer_redis_token_bucket 1.37 ms 37.16 ms 202.54 K 13.60 ms, 13.19 ms 45 | hammer_redis_leaky_bucket 3.52 ms 197.32 ms 185.33 K15.38 ms, 15.62 ms, 15.30 -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 7.0.2 - 2025-02-10 4 | 5 | ### Changed 6 | 7 | - Fix incorrect timeout typespec 8 | 9 | ## 7.0.1 - 2025-02-07 10 | 11 | - Fix leaky bucket algorithm to use the correct formula on deny 12 | 13 | ## 7.0.0 - 2025-02-06 14 | 15 | - Release candidate for 7.0.0. See [upgrade to v7](https://hexdocs.pm/hammer/upgrade-v7.html) for upgrade instructions. 16 | 17 | ## 7.0.0-rc.1 (2024-12-18) 18 | 19 | ### Changed 20 | 21 | - Added `:algorithm` option to the backend with support for: 22 | - `:fix_window` (default) - Fixed time window rate limiting 23 | - `:leaky_bucket` - Constant rate limiting with burst capacity 24 | - `:token_bucket` - Token-based rate limiting with burst capacity 25 | - Add benchmarks file and run them with `bench` 26 | 27 | ## 7.0.0-rc.0 (2024-12-06) 28 | 29 | ### Changed 30 | 31 | - Conform to new Hammer API 32 | - Remove Poolboy as it introduces unnecessary blocking. 33 | 34 | ## 6.2.0 (2024-12-04) 35 | 36 | ### Changed 37 | 38 | - Package updates 39 | - Add config to customize the redis prefix 40 | - Deprecate Elixir 1.12 as this are no longer supported 41 | 42 | ## 6.1.2 (2022-11-11) 43 | 44 | ### Changed 45 | 46 | - Applied credo suggestions 47 | - Update dependencies 48 | 49 | ## 6.1.1 (2022-11-11) 50 | 51 | ### Changed 52 | 53 | - package update and ownership transferred 54 | 55 | ## 6.1.0 (2019-09-03) 56 | 57 | ### Changed 58 | 59 | - Return actual count upon bucket creation (thanks to @davelively14, https://github.com/ExHammer/hammer-backend-redis/pull/16) 60 | 61 | 62 | ## 6.0.1 (2019-07-13) 63 | 64 | ### Added 65 | 66 | - Accept an optional `redis_url` option 67 | 68 | ### Changed 69 | 70 | - Updated dependencies in test environment (thanks to @ono, https://github.com/ExHammer/hammer-backend-redis/pull/14) 71 | 72 | ### Fixed 73 | 74 | - Fixed a crash in `delete_buckets` (thanks to @ono, https://github.com/ExHammer/hammer-backend-redis/pull/15) 75 | 76 | 77 | ## 6.0.0 (2018-10-13) 78 | 79 | ### Changed 80 | 81 | - Raise an error if `expiry_ms` is not configured explicitly 82 | - Update the `redix` dependency to `~> 0.8` 83 | 84 | 85 | ### Fixed 86 | 87 | - Actually honor `:redis_config`, as is claimed in the documentation 88 | 89 | ## 5.0.0 (2018-10-13) 90 | 91 | ### Changed 92 | 93 | - Update to the new custom-increment api 94 | 95 | ## 4.0.3 (2018-05-08) 96 | 97 | ### Fixed 98 | 99 | - Fix a rare crash, again related to race-conditions 100 | (https://github.com/ExHammer/hammer-backend-redis/issues/11#issuecomment-387202359) 101 | 102 | ## 4.0.2 (2018-04-27) 103 | 104 | ### Fixed 105 | 106 | - Fixed race-condition, really this time 107 | (https://github.com/ExHammer/hammer-backend-redis/issues/11) 108 | 109 | 110 | ## 4.0.1 (2018-04-24) 111 | 112 | ### Fixed 113 | 114 | - Fixed a race-condition that could cause crashes 115 | (https://github.com/ExHammer/hammer-backend-redis/issues/11) 116 | 117 | 118 | ## 4.0.0 (2018-04-23) 119 | 120 | ### Changed 121 | 122 | - Update to `Hammer 4.0` 123 | 124 | 125 | ## 3.0.0 (2018-02-20) 126 | 127 | ### Changed 128 | 129 | - Require elixir >= 1.6 130 | 131 | 132 | ## 2.0.0 (2017-09-24) 133 | 134 | ### Changed 135 | 136 | - Updated to new Hammer API 137 | 138 | 139 | ## 1.0.0 (2017-08-27) 140 | 141 | ### Changed 142 | 143 | - `hammer_backend_redis` now explicitly depends on `hammer` 144 | - Implement the `Hammer.Backend` behaviour 145 | - Alias `redix_config` to `redis_config` in the config list, for convenience 146 | 147 | 148 | ## 0.1.0 (2017-07-31) 149 | 150 | Initial release. 151 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. 4 | 5 | We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. 6 | 7 | Examples of unacceptable behavior by participants include: 8 | 9 | * The use of sexualized language or imagery 10 | * Personal attacks 11 | * Trolling or insulting/derogatory comments 12 | * Public or private harassment 13 | * Publishing other's private information, such as physical or electronic addresses, without explicit permission 14 | * Other unethical or unprofessional conduct. 15 | 16 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. 17 | 18 | This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. 19 | 20 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers. 21 | 22 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.2.0, available at [https://www.contributor-covenant.org/version/1/2/0/code-of-conduct/](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct/) 23 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Hammer Backend Redis 2 | 3 | Please take a moment to review this document in order to make the contribution 4 | process easy and effective for everyone involved! 5 | Also make sure you read our [Code of Conduct](CODE_OF_CONDUCT.md) that outlines our commitment towards an open and welcoming environment. 6 | 7 | ## Using the issue tracker 8 | 9 | Use the issues tracker for: 10 | 11 | * [Bug reports](#bug-reports) 12 | * [Submitting pull requests](#pull-requests) 13 | 14 | We do our best to keep the issue tracker tidy and organized, making it useful 15 | for everyone. For example, we classify open issues per perceived difficulty, 16 | making it easier for developers to [contribute to Hammer Backend Redis](#pull-requests). 17 | 18 | ## Bug reports 19 | 20 | A bug is either a _demonstrable problem_ that is caused by the code in the repository, 21 | or indicate missing, unclear, or misleading documentation. Good bug reports are extremely 22 | helpful - thank you! 23 | 24 | Guidelines for bug reports: 25 | 26 | 1. **Use the GitHub issue search** — check if the issue has already been 27 | reported. 28 | 29 | 2. **Check if the issue has been fixed** — try to reproduce it using the 30 | `master` branch in the repository. 31 | 32 | 3. **Isolate and report the problem** — ideally create a reduced test 33 | case. 34 | 35 | Please try to be as detailed as possible in your report. Include information about 36 | your Operating System, as well as your Erlang, Elixir and Hammer Backend Redis versions. Please provide steps to 37 | reproduce the issue as well as the outcome you were expecting! All these details 38 | will help developers to fix any potential bugs. 39 | 40 | Example: 41 | 42 | > Short and descriptive example bug report title 43 | > 44 | > A summary of the issue and the environment in which it occurs. If suitable, 45 | > include the steps required to reproduce the bug. 46 | > 47 | > 1. This is the first step 48 | > 2. This is the second step 49 | > 3. Further steps, etc. 50 | > 51 | > `` - a link to the reduced test case (e.g. a GitHub Gist) 52 | > 53 | > Any other information you want to share that is relevant to the issue being 54 | > reported. This might include the lines of code that you have identified as 55 | > causing the bug, and potential solutions (and your opinions on their 56 | > merits). 57 | 58 | ## Contributing Documentation 59 | 60 | Code documentation (`@doc`, `@moduledoc`, `@typedoc`) has a special convention: 61 | the first paragraph is considered to be a short summary. 62 | 63 | For functions, macros and callbacks say what it will do. For example write 64 | something like: 65 | 66 | ```elixir 67 | @doc """ 68 | Marks the given value as HTML safe. 69 | """ 70 | def safe({:safe, value}), do: {:safe, value} 71 | ``` 72 | 73 | For modules, protocols and types say what it is. For example write 74 | something like: 75 | 76 | ```elixir 77 | defmodule MyModule do 78 | @moduledoc """ 79 | Conveniences for working HTML strings and templates. 80 | ... 81 | """ 82 | ``` 83 | 84 | Keep in mind that the first paragraph might show up in a summary somewhere, long 85 | texts in the first paragraph create very ugly summaries. As a rule of thumb 86 | anything longer than 80 characters is too long. 87 | 88 | Try to keep unnecessary details out of the first paragraph, it's only there to 89 | give a user a quick idea of what the documented "thing" does/is. The rest of the 90 | documentation string can contain the details, for example when a value and when 91 | `nil` is returned. 92 | 93 | If possible include examples, preferably in a form that works with doctests. 94 | This makes it easy to test the examples so that they don't go stale and examples 95 | are often a great help in explaining what a function does. 96 | 97 | ## Pull requests 98 | 99 | Good pull requests - patches, improvements, new features - are a fantastic 100 | help. They should remain focused in scope and avoid containing unrelated 101 | commits. 102 | 103 | **IMPORTANT**: By submitting a patch, you agree that your work will be 104 | licensed under the license used by the project. 105 | 106 | If you have any large pull request in mind (e.g. implementing features, 107 | refactoring code, etc), **please ask first** otherwise you risk spending 108 | a lot of time working on something that the project's developers might 109 | not want to merge into the project. 110 | 111 | Please adhere to the coding conventions in the project (indentation, 112 | accurate comments, etc.) and don't forget to add your own tests and 113 | documentation. When working with git, we recommend the following process 114 | in order to craft an excellent pull request: 115 | 116 | 1. [Fork](https://help.github.com/articles/fork-a-repo/) the project, clone your fork, 117 | and configure the remotes: 118 | 119 | ```bash 120 | # Clone your fork of the repo into the current directory 121 | git clone https://github.com//hammer-backend-redis 122 | 123 | # Navigate to the newly cloned directory 124 | cd hammer-backend-redis 125 | 126 | # Assign the original repo to a remote called "upstream" 127 | git remote add upstream https://github.com/ExHammer/hammer-backend-redis 128 | ``` 129 | 130 | 2. If you cloned a while ago, get the latest changes from upstream, and update your fork: 131 | 132 | ```bash 133 | git checkout master 134 | git pull upstream master 135 | git push 136 | ``` 137 | 138 | 3. Create a new topic branch (off of `master`) to contain your feature, change, 139 | or fix. 140 | 141 | **IMPORTANT**: Making changes in `master` is discouraged. You should always 142 | keep your local `master` in sync with upstream `master` and make your 143 | changes in topic branches. 144 | 145 | ```bash 146 | git checkout -b 147 | ``` 148 | 149 | 4. Commit your changes in logical chunks. Keep your commit messages organized, 150 | with a short description in the first line and more detailed information on 151 | the following lines. Feel free to use Git's 152 | [interactive rebase](https://help.github.com/articles/about-git-rebase/) 153 | feature to tidy up your commits before making them public. 154 | 155 | 5. Make sure all the tests are still passing. 156 | 157 | ```bash 158 | mix test 159 | ``` 160 | 161 | 6. Push your topic branch up to your fork: 162 | 163 | ```bash 164 | git push origin 165 | ``` 166 | 167 | 7. [Open a Pull Request](https://help.github.com/articles/about-pull-requests/) 168 | with a clear title and description. 169 | 170 | 8. If you haven't updated your pull request for a while, you should consider 171 | rebasing on master and resolving any conflicts. 172 | 173 | **IMPORTANT**: _Never ever_ merge upstream `master` into your branches. You 174 | should always `git rebase` on `master` to bring your changes up to date when 175 | necessary. 176 | 177 | ```bash 178 | git checkout master 179 | git pull upstream master 180 | git checkout 181 | git rebase master 182 | ``` 183 | 184 | Thank you for your contributions! 185 | 186 | ## Guides 187 | 188 | These Guides aim to be inclusive. We use "we" and "our" instead of "you" and 189 | "your" to foster this sense of inclusion. 190 | 191 | Ideally there is something for everybody in each guide, from beginner to expert. 192 | This is hard, maybe impossible. When we need to compromise, we do so on behalf 193 | of beginning users because expert users have more tools at their disposal to 194 | help themselves. 195 | 196 | The general pattern we use for presenting information is to first introduce a 197 | small, discrete topic, then write a small amount of code to demonstrate the 198 | concept, then verify that the code worked. 199 | 200 | In this way, we build from small, easily digestible concepts into more complex 201 | ones. The shorter this cycle is, as long as the information is still clear and 202 | complete, the better. 203 | 204 | For formatting the guides: 205 | 206 | - We use the `elixir` code fence for all module code. 207 | - We use the `iex` for IEx sessions. 208 | - We use the `console` code fence for shell commands. 209 | - We use the `html` code fence for html templates, even if there is elixir code 210 | in the template. 211 | - We use backticks for filenames and directory paths. 212 | - We use backticks for module names, function names, and variable names. 213 | - Documentation line length should hard wrapped at around 100 characters if possible. 214 | -------------------------------------------------------------------------------- /CREDITS.md: -------------------------------------------------------------------------------- 1 | Credits in no special order: 2 | 3 | - [June Kelly](https://github.com/JuneKelly) 4 | - [Kian-Meng Ang](https://github.com/kianmeng) 5 | - [Matt Pinkston](https://github.com/mpinkston) 6 | - [Mariano Vallés](https://github.com/mrnovalles) 7 | - [Chaouki Dhib](https://github.com/chaodhib) 8 | - [Tatsuya Ono](https://github.com/ono) 9 | - [Dave Lively](https://github.com/davelively14) 10 | - [Emmanuel Pinault](https://github.com/epinault) 11 | - [ruslandoga](https://github.com/ruslandoga) 12 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | =============== 2 | The MIT License 3 | =============== 4 | 5 | Copyright 2023 June Kelly 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hammer.Redis 2 | 3 | [![Build Status](https://github.com/ExHammer/hammer-backend-redis/actions/workflows/ci.yml/badge.svg)](https://github.com/ExHammer/hammer-backend-redis/actions/workflows/ci.yml) 4 | [![Hex.pm](https://img.shields.io/hexpm/v/hammer_backend_redis.svg)](https://hex.pm/packages/hammer_backend_redis) 5 | [![Documentation](https://img.shields.io/badge/documentation-gray)](https://hexdocs.pm/hammer_backend_redis) 6 | [![Total Download](https://img.shields.io/hexpm/dt/hammer_backend_redis.svg)](https://hex.pm/packages/hammer_backend_redis) 7 | [![License](https://img.shields.io/hexpm/l/hammer_backend_redis.svg)](https://github.com/ExHammer/hammer-backend-redis/blob/master/LICENSE.md) 8 | 9 | A Redis backend for the [Hammer](https://github.com/ExHammer/hammer) rate-limiter. 10 | 11 | This backend is a thin [Redix](https://hex.pm/packages/redix) wrapper. A single connection is used per rate-limiter. It should be enough for most use-cases since packets for rate limiting requests are short (i.e. no head of line blocking) and Redis is OK with [pipelining](https://redis.io/learn/operate/redis-at-scale/talking-to-redis/client-performance-improvements#pipelining) (i.e. we don't block awaiting replies). Consider benchmarking before introducing more connections since TCP performance might be unintuitive. For possible pooling approaches, see Redix docs on [pooling](https://hexdocs.pm/redix/real-world-usage.html#name-based-pool) and also [PartitionSupervisor.](https://hexdocs.pm/elixir/1.17.3/PartitionSupervisor.html) Do not use poolboy or db_connection-like pools since they practically disable pipelining which leads to worse connection utilisation and worse performance. 12 | 13 | The algorithm we are using is the first method described (called "bucketing") in [Rate Limiting with Redis](https://youtu.be/CRGPbCbRTHA?t=753). 14 | In other sources it's sometimes called a "fixed window counter". 15 | 16 | **TODO:** document ttl issues if servers are misconfigured 17 | 18 | ## Installation 19 | 20 | Hammer-backend-redis 21 | is [available in Hex](https://hex.pm/packages/hammer_backend_redis), the package 22 | can be installed by adding `hammer_backend_redis` to your list of dependencies in `mix.exs`: 23 | 24 | ```elixir 25 | def deps do 26 | [ 27 | {:hammer_backend_redis, "~> 7.0"} 28 | ] 29 | end 30 | ``` 31 | 32 | ## Usage 33 | 34 | Define the rate limiter: 35 | 36 | ```elixir 37 | defmodule MyApp.RateLimit do 38 | use Hammer, backend: Hammer.Redis 39 | end 40 | ``` 41 | 42 | And add it to your app's supervision tree: 43 | 44 | ```elixir 45 | children = [ 46 | {MyApp.RateLimit, url: "redis://localhost:6379"} 47 | ] 48 | ``` 49 | 50 | And that's it, calls to `MyApp.RateLimit.hit/3` and so on will use Redis to store 51 | the rate-limit counters. See the [documentation](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.html) for more details. 52 | 53 | ## Run tests locally 54 | 55 | You need a running Redis instance. One can be started locally using `docker compose up -d redis`. 56 | See the [compose.yml](./compose.yml) for more details. 57 | 58 | ## Getting Help 59 | 60 | If you're having trouble, open an issue on this repo. 61 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Instructions 2 | 3 | 1. Check related deps for required version bumps and compatibility 4 | 2. Bump version in related files below 5 | 3. Bump external dependency version in related external files below 6 | 4. Run tests: 7 | - `mix test` in the root folder 8 | - `mix credo` in the root folder 9 | 5. Commit, push code 10 | 6. Publish `hammer-bakend-redis` packages and docs 11 | 12 | -------------------------------------------------------------------------------- /bench/base.exs: -------------------------------------------------------------------------------- 1 | # MIX_ENV=bench LIMIT=1 SCALE=5000 RANGE=10000 PARALLEL=500 mix run bench/basic.exs 2 | # inspired from https://github.com/PragTob/rate_limit/blob/master/bench/basic.exs 3 | profile? = !!System.get_env("PROFILE") 4 | parallel = String.to_integer(System.get_env("PARALLEL", "1")) 5 | limit = String.to_integer(System.get_env("LIMIT", "1000000")) 6 | scale = String.to_integer(System.get_env("SCALE", "60000")) 7 | range = String.to_integer(System.get_env("RANGE", "1_000")) 8 | 9 | IO.puts(""" 10 | parallel: #{parallel} 11 | limit: #{limit} 12 | scale: #{scale} 13 | range: #{range} 14 | """) 15 | 16 | # TODO: clean up ETS table before/after each scenario 17 | defmodule RedisFixWindowRateLimiter do 18 | use Hammer, backend: Hammer.Redis, algorithm: :fix_window 19 | end 20 | 21 | defmodule RedisLeakyBucketRateLimiter do 22 | use Hammer, backend: Hammer.Redis, algorithm: :leaky_bucket 23 | end 24 | 25 | defmodule RedisTokenBucketRateLimiter do 26 | use Hammer, backend: Hammer.Redis, algorithm: :token_bucket 27 | end 28 | 29 | RedisFixWindowRateLimiter.start_link([]) 30 | RedisTokenBucketRateLimiter.start_link([]) 31 | RedisLeakyBucketRateLimiter.start_link([]) 32 | 33 | Benchee.run( 34 | %{ 35 | "hammer_redis_fix_window" => fn key -> RedisFixWindowRateLimiter.hit("sites:#{key}", scale, limit) end, 36 | "hammer_redis_leaky_bucket" => fn key -> RedisLeakyBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 37 | "hammer_redis_token_bucket" => fn key -> RedisTokenBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 38 | }, 39 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}], 40 | before_each: fn _ -> :rand.uniform(range) end, 41 | print: [fast_warning: false], 42 | time: 6, 43 | # fill the table with some data 44 | warmup: 14, 45 | profile_after: profile?, 46 | parallel: parallel 47 | ) 48 | -------------------------------------------------------------------------------- /compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | redis: 3 | image: redis:latest 4 | ports: 5 | - 6379:6379 6 | -------------------------------------------------------------------------------- /lib/hammer/redis.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis do 2 | @moduledoc """ 3 | This backend uses the [Redix](https://hex.pm/packages/redix) library to connect to Redis. 4 | 5 | defmodule MyApp.RateLimit do 6 | # the default prefix is "MyApp.RateLimit:" 7 | # the default timeout is :infinity 8 | use Hammer, backend: Hammer.Redis, prefix: "MyApp.RateLimit:", timeout: :infinity 9 | end 10 | 11 | MyApp.RateLimit.start_link(url: "redis://localhost:6379") 12 | 13 | # increment and timeout arguments are optional 14 | # by default increment is 1 and timeout is as defined in the module 15 | {:allow, _count} = MyApp.RateLimit.hit(key, scale, limit) 16 | {:allow, _count} = MyApp.RateLimit.hit(key, scale, limit, _increment = 1, _timeout = :infinity) 17 | 18 | The Redis backend supports the following algorithms: 19 | - `:fix_window` - Fixed window rate limiting (default) 20 | Simple counting within fixed time windows. See [Hammer.Redis.FixWindow](Hammer.Redis.FixWindow.html) for more details. 21 | 22 | - `:leaky_bucket` - Leaky bucket rate limiting 23 | Smooth rate limiting with a fixed rate of tokens. See [Hammer.Redis.LeakyBucket](Hammer.Redis.LeakyBucket.html) for more details. 24 | 25 | - `:token_bucket` - Token bucket rate limiting 26 | Flexible rate limiting with bursting capability. See [Hammer.Redis.TokenBucket](Hammer.Redis.TokenBucket.html) for more details. 27 | 28 | """ 29 | # Redix does not define a type for its start options, so we define our 30 | # own so hopefully redix will be updated to provide a type 31 | @type redis_option :: {:url, String.t()} | {:name, String.t()} 32 | @type redis_options :: [redis_option()] 33 | 34 | # credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity 35 | defmacro __before_compile__(%{module: module}) do 36 | hammer_opts = Module.get_attribute(module, :hammer_opts) 37 | 38 | prefix = String.trim_leading(Atom.to_string(module), "Elixir.") 39 | prefix = Keyword.get(hammer_opts, :prefix, prefix) 40 | timeout = Keyword.get(hammer_opts, :timeout, :infinity) 41 | 42 | name = module 43 | 44 | algorithm = 45 | case Keyword.get(hammer_opts, :algorithm) do 46 | nil -> 47 | Hammer.Redis.FixWindow 48 | 49 | :fix_window -> 50 | Hammer.Redis.FixWindow 51 | 52 | :sliding_window -> 53 | Hammer.Redis.SlidingWindow 54 | 55 | :leaky_bucket -> 56 | Hammer.Redis.LeakyBucket 57 | 58 | :token_bucket -> 59 | Hammer.Redis.TokenBucket 60 | 61 | _module -> 62 | raise ArgumentError, """ 63 | Hammer requires a valid backend to be specified. Must be one of: :fix_window, :sliding_window, :leaky_bucket, :token_bucket. 64 | If none is specified, :fix_window is used. 65 | 66 | Example: 67 | 68 | use Hammer, backend: Hammer.Redis, algorithm: Hammer.Redis.FixWindow 69 | """ 70 | end 71 | 72 | Code.ensure_loaded!(algorithm) 73 | 74 | unless is_binary(prefix) do 75 | raise ArgumentError, """ 76 | Expected `:prefix` value to be a string, got: #{inspect(prefix)} 77 | """ 78 | end 79 | 80 | case timeout do 81 | :infinity -> 82 | :ok 83 | 84 | _ when is_integer(timeout) and timeout > 0 -> 85 | :ok 86 | 87 | _ -> 88 | raise ArgumentError, """ 89 | Expected `:timeout` value to be a positive integer or `:infinity`, got: #{inspect(timeout)} 90 | """ 91 | end 92 | 93 | quote do 94 | @name unquote(name) 95 | @prefix unquote(prefix) 96 | @timeout unquote(timeout) 97 | @algorithm unquote(algorithm) 98 | 99 | @spec child_spec(Keyword.t()) :: Supervisor.child_spec() 100 | def child_spec(opts) do 101 | %{ 102 | id: __MODULE__, 103 | start: {__MODULE__, :start_link, [opts]}, 104 | type: :worker 105 | } 106 | end 107 | 108 | @spec start_link(Hammer.Redis.redis_options()) :: 109 | {:ok, pid()} | :ignore | {:error, term()} 110 | def start_link(opts) do 111 | opts = Keyword.put(opts, :name, @name) 112 | 113 | Hammer.Redis.start_link(opts) 114 | end 115 | 116 | def hit(key, scale, limit, increment \\ 1) do 117 | @algorithm.hit(@name, @prefix, key, scale, limit, increment, @timeout) 118 | end 119 | 120 | if function_exported?(@algorithm, :inc, 6) do 121 | def inc(key, scale, increment \\ 1) do 122 | @algorithm.inc(@name, @prefix, key, scale, increment, @timeout) 123 | end 124 | end 125 | 126 | if function_exported?(@algorithm, :set, 6) do 127 | def set(key, scale, count) do 128 | @algorithm.set(@name, @prefix, key, scale, count, @timeout) 129 | end 130 | end 131 | 132 | if function_exported?(@algorithm, :get, 4) do 133 | def get(key, scale) do 134 | @algorithm.get(@name, @prefix, key, @timeout) 135 | end 136 | end 137 | 138 | if function_exported?(@algorithm, :get, 5) do 139 | def get(key, scale) do 140 | @algorithm.get(@name, @prefix, key, scale, @timeout) 141 | end 142 | end 143 | end 144 | end 145 | 146 | @doc false 147 | @spec start_link(Hammer.Redis.redis_options()) :: 148 | {:ok, pid()} | :ignore | {:error, term()} 149 | def start_link(opts) do 150 | {url, opts} = Keyword.pop(opts, :url) 151 | 152 | opts = 153 | if url do 154 | url_opts = Redix.URI.to_start_options(url) 155 | Keyword.merge(url_opts, opts) 156 | else 157 | opts 158 | end 159 | 160 | Redix.start_link(opts) 161 | end 162 | end 163 | -------------------------------------------------------------------------------- /lib/hammer/redis/fix_window.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.FixWindow do 2 | @moduledoc """ 3 | This module implements the Fix Window algorithm. 4 | 5 | The fixed window algorithm works by dividing time into fixed intervals or "windows" 6 | of a specified duration (scale). Each window tracks request counts independently. 7 | 8 | For example, with a 60 second window: 9 | - Window 1: 0-60 seconds 10 | - Window 2: 60-120 seconds 11 | - And so on... 12 | 13 | ## The algorithm: 14 | 15 | 1. When a request comes in, we: 16 | - Calculate which window it belongs to based on current time 17 | - Increment the counter for that window 18 | - Store expiration time as end of window 19 | 2. To check if rate limit is exceeded: 20 | - If count <= limit: allow request 21 | - If count > limit: deny and return time until window expires 22 | 3. Old windows are automatically cleaned up after expiration 23 | 24 | This provides simple rate limiting but has edge cases where a burst of requests 25 | spanning a window boundary could allow up to 2x the limit in a short period. 26 | For more precise limiting, consider using the sliding window algorithm instead. 27 | 28 | The fixed window algorithm is a good choice when: 29 | 30 | - You need simple, predictable rate limiting with clear time boundaries 31 | - The exact precision of the rate limit is not critical 32 | - You want efficient implementation with minimal storage overhead 33 | - Your use case can tolerate potential bursts at window boundaries 34 | 35 | ## Common use cases include: 36 | 37 | - Basic API rate limiting where occasional bursts are acceptable 38 | - Protecting backend services from excessive load 39 | - Implementing fair usage policies 40 | - Scenarios where clear time-based quotas are desired (e.g. "100 requests per minute") 41 | 42 | The main tradeoff is that requests near window boundaries can allow up to 2x the 43 | intended limit in a short period. For example with a limit of 100 per minute: 44 | - 100 requests at 11:59:59 45 | - Another 100 requests at 12:00:01 46 | 47 | This results in 200 requests in 2 seconds, while still being within limits. 48 | If this behavior is problematic, consider using the sliding window algorithm instead. 49 | 50 | ## Example usage: 51 | 52 | defmodule MyApp.RateLimit do 53 | use Hammer, backend: Hammer.Redis, algorithm: :fix_window 54 | end 55 | 56 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 57 | 58 | # Allow 10 requests per second 59 | MyApp.RateLimit.hit("user_123", 1000, 10) 60 | """ 61 | @doc false 62 | @spec hit( 63 | Redix.connection(), 64 | String.t(), 65 | String.t(), 66 | non_neg_integer(), 67 | non_neg_integer(), 68 | non_neg_integer(), 69 | timeout() 70 | ) :: 71 | {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 72 | def hit(name, prefix, key, scale, limit, increment, timeout) do 73 | now = now() 74 | window = div(now, scale) 75 | full_key = redis_key(prefix, key, window) 76 | expires_at = (window + 1) * scale 77 | 78 | commands = [ 79 | ["INCRBY", full_key, increment], 80 | ["EXPIREAT", full_key, div(expires_at, 1000), "NX"] 81 | ] 82 | 83 | [count, _] = 84 | Redix.pipeline!(name, commands, timeout: timeout) 85 | 86 | if count <= limit do 87 | {:allow, count} 88 | else 89 | {:deny, expires_at - now} 90 | end 91 | end 92 | 93 | @doc false 94 | @spec inc( 95 | Redix.connection(), 96 | String.t(), 97 | String.t(), 98 | non_neg_integer(), 99 | non_neg_integer(), 100 | timeout() 101 | ) :: non_neg_integer() 102 | def inc(name, prefix, key, scale, increment, timeout) do 103 | now = now() 104 | window = div(now, scale) 105 | full_key = redis_key(prefix, key, window) 106 | expires_at = (window + 1) * scale 107 | 108 | commands = [ 109 | ["INCRBY", full_key, increment], 110 | ["EXPIREAT", full_key, div(expires_at, 1000), "NX"] 111 | ] 112 | 113 | [count, _] = 114 | Redix.pipeline!(name, commands, timeout: timeout) 115 | 116 | count 117 | end 118 | 119 | @doc false 120 | @spec set( 121 | Redix.connection(), 122 | String.t(), 123 | String.t(), 124 | non_neg_integer(), 125 | non_neg_integer(), 126 | timeout() 127 | ) :: non_neg_integer() 128 | def set(name, prefix, key, scale, count, timeout) do 129 | now = now() 130 | window = div(now, scale) 131 | full_key = redis_key(prefix, key, window) 132 | expires_at = (window + 1) * scale 133 | 134 | commands = [ 135 | ["SET", full_key, count], 136 | ["EXPIREAT", full_key, div(expires_at, 1000), "NX"] 137 | ] 138 | 139 | Redix.pipeline!(name, commands, timeout: timeout) 140 | 141 | count 142 | end 143 | 144 | @doc false 145 | @spec get( 146 | Redix.connection(), 147 | String.t(), 148 | String.t(), 149 | non_neg_integer(), 150 | timeout() 151 | ) :: non_neg_integer() 152 | def get(name, prefix, key, scale, timeout) do 153 | now = now() 154 | window = div(now, scale) 155 | full_key = redis_key(prefix, key, window) 156 | count = Redix.command!(name, ["GET", full_key], timeout: timeout) 157 | 158 | case count do 159 | nil -> 0 160 | count -> String.to_integer(count) 161 | end 162 | end 163 | 164 | @compile inline: [redis_key: 3] 165 | defp redis_key(prefix, key, window) do 166 | "#{prefix}:#{key}:#{window}" 167 | end 168 | 169 | @compile inline: [now: 0] 170 | defp now do 171 | System.system_time(:millisecond) 172 | end 173 | end 174 | -------------------------------------------------------------------------------- /lib/hammer/redis/leaky_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.LeakyBucket do 2 | @moduledoc """ 3 | This module implements the Leaky Bucket algorithm. 4 | 5 | The leaky bucket algorithm works by modeling a bucket that: 6 | - Fills up with requests at the input rate 7 | - "Leaks" requests at a constant rate 8 | - Has a maximum capacity (the bucket size) 9 | 10 | For example, with a leak rate of 10 requests/second and bucket size of 100: 11 | - Requests add to the bucket's current level 12 | - The bucket leaks 10 requests per second steadily 13 | - If bucket reaches capacity (100), new requests are denied 14 | - Once bucket level drops, new requests are allowed again 15 | 16 | ## The algorithm: 17 | 18 | 1. When a request comes in, we: 19 | - Calculate how much has leaked since last request 20 | - Subtract leaked amount from current bucket level 21 | - Try to add new request to bucket 22 | - Store new bucket level and timestamp 23 | 2. To check if rate limit is exceeded: 24 | - If new bucket level <= capacity: allow request 25 | - If new bucket level > capacity: deny and return time until enough leaks 26 | 3. Old entries are automatically cleaned up after expiration 27 | 28 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 29 | The leaky bucket is a good choice when: 30 | 31 | - You need to enforce a constant processing rate 32 | - Want to allow temporary bursts within bucket capacity 33 | - Need to smooth out traffic spikes 34 | - Want to prevent resource exhaustion 35 | 36 | ## Common use cases include: 37 | 38 | - API rate limiting needing consistent throughput 39 | - Network traffic shaping 40 | - Service protection from sudden load spikes 41 | - Queue processing rate control 42 | - Scenarios needing both burst tolerance and steady-state limits 43 | 44 | The main advantages are: 45 | - Smooth, predictable output rate 46 | - Configurable burst tolerance 47 | - Natural queueing behavior 48 | 49 | The tradeoffs are: 50 | - More complex implementation than fixed windows 51 | - Need to track last request time and current bucket level 52 | - May need tuning of bucket size and leak rate parameters 53 | 54 | For example, with 100 requests/sec limit and 500 bucket size: 55 | - Can handle bursts of up to 500 requests 56 | - But long-term average rate won't exceed 100/sec 57 | - Provides smoother traffic than fixed windows 58 | 59 | ## Example usage: 60 | 61 | defmodule MyApp.RateLimit do 62 | use Hammer, backend: Hammer.Redis, algorithm: :leaky_bucket 63 | end 64 | 65 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 66 | 67 | # Allow 100 requests/sec leak rate with max capacity of 500 68 | MyApp.RateLimit.hit("user_123", 100, 500, 1) 69 | """ 70 | 71 | @doc """ 72 | Checks if a key is allowed to perform an action, and increment the counter by the given amount. 73 | """ 74 | @spec hit( 75 | connection_name :: atom(), 76 | prefix :: String.t(), 77 | key :: String.t(), 78 | leak_rate :: pos_integer(), 79 | capacity :: pos_integer(), 80 | cost :: pos_integer(), 81 | timeout :: non_neg_integer() 82 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 83 | def hit(connection_name, prefix, key, leak_rate, capacity, cost, timeout) do 84 | {:ok, [allowed, value]} = 85 | Redix.command( 86 | connection_name, 87 | [ 88 | "EVAL", 89 | redis_script(), 90 | "1", 91 | redis_key(prefix, key), 92 | capacity, 93 | leak_rate, 94 | cost 95 | ], 96 | timeout: timeout 97 | ) 98 | 99 | if allowed == 1 do 100 | {:allow, value} 101 | else 102 | {:deny, value} 103 | end 104 | end 105 | 106 | @doc """ 107 | Returns the current level of the bucket for a given key. 108 | """ 109 | @spec get( 110 | connection_name :: atom(), 111 | prefix :: String.t(), 112 | key :: String.t(), 113 | timeout :: non_neg_integer() 114 | ) :: 115 | non_neg_integer() 116 | def get(connection_name, prefix, key, timeout) do 117 | res = 118 | Redix.command( 119 | connection_name, 120 | [ 121 | "HGET", 122 | redis_key(prefix, key), 123 | "level" 124 | ], 125 | timeout: timeout 126 | ) 127 | 128 | case res do 129 | {:ok, nil} -> 130 | 0 131 | 132 | {:ok, level} -> 133 | String.to_integer(level) 134 | 135 | _ -> 136 | 0 137 | end 138 | end 139 | 140 | @compile inline: [redis_key: 2] 141 | defp redis_key(prefix, key) do 142 | "#{prefix}:#{key}" 143 | end 144 | 145 | defp redis_script do 146 | """ 147 | -- Get current time in seconds 148 | local now = redis.call("TIME")[1] 149 | 150 | -- Get current bucket state 151 | local bucket = redis.call("HMGET", KEYS[1], "level", "last_update") 152 | local current_level = tonumber(bucket[1]) or 0 -- Default to capacity if new 153 | local last_update = tonumber(bucket[2]) or now 154 | local capacity = tonumber(ARGV[1]) 155 | 156 | -- Calculate leak amount since last update 157 | local elapsed = now - last_update 158 | local leak_amount = elapsed * ARGV[2] -- leak_rate per second 159 | 160 | -- Update bucket level 161 | local new_level = math.max(0, current_level - leak_amount) 162 | 163 | -- Try to consume tokens 164 | local cost = tonumber(ARGV[3]) 165 | if new_level < capacity then 166 | new_level = new_level + cost 167 | redis.call("HMSET", KEYS[1], "level", new_level, "last_update", now) 168 | -- Set TTL to time needed to leak current level plus a small buffer 169 | local time_to_empty = math.ceil(new_level / ARGV[2]) 170 | local ttl = time_to_empty + 60 -- Add 60 second buffer 171 | redis.call("EXPIRE", KEYS[1], ttl) 172 | return {1, new_level} 173 | else 174 | -- Calculate time until enough tokens available 175 | local time_needed = (new_level - cost) / ARGV[2] 176 | return {0, math.ceil(time_needed * 1000)} -- Deny with ms wait time 177 | end 178 | """ 179 | end 180 | end 181 | -------------------------------------------------------------------------------- /lib/hammer/redis/token_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.TokenBucket do 2 | @moduledoc """ 3 | This module implements the Token Bucket algorithm. 4 | The token bucket algorithm works by modeling a bucket that: 5 | - Fills with tokens at a constant rate (the refill rate) 6 | - Has a maximum capacity of tokens (the bucket size) 7 | - Each request consumes one or more tokens 8 | - If there are enough tokens, the request is allowed 9 | - If not enough tokens, the request is denied 10 | 11 | For example, with a refill rate of 10 tokens/second and bucket size of 100: 12 | - Tokens are added at 10 per second up to max of 100 13 | - Each request needs tokens to proceed 14 | - If bucket has enough tokens, request allowed and tokens consumed 15 | - If not enough tokens, request denied until bucket refills 16 | 17 | ## The algorithm: 18 | 19 | 1. When a request comes in, we: 20 | - Calculate tokens added since last request based on time elapsed 21 | - Add new tokens to bucket (up to max capacity) 22 | - Try to consume tokens for the request 23 | - Store new token count and timestamp 24 | 2. To check if rate limit is exceeded: 25 | - If enough tokens: allow request and consume tokens 26 | - If not enough: deny and return time until enough tokens refill 27 | 3. Old entries are automatically cleaned up after expiration 28 | 29 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 30 | The token bucket is a good choice when: 31 | 32 | - You need to allow temporary bursts of traffic 33 | - Want to enforce an average rate limit 34 | - Need to support different costs for different operations 35 | - Want to avoid the sharp edges of fixed windows 36 | 37 | ## Common use cases include: 38 | 39 | - API rate limiting with burst tolerance 40 | - Network traffic shaping 41 | - Resource allocation control 42 | - Gaming systems with "energy" mechanics 43 | - Scenarios needing flexible rate limits 44 | 45 | The main advantages are: 46 | - Natural handling of bursts 47 | - Flexible token costs for different operations 48 | - Smooth rate limiting behavior 49 | - Simple to reason about 50 | 51 | The tradeoffs are: 52 | - Need to track token count and last update time 53 | - May need tuning of bucket size and refill rate 54 | - More complex than fixed windows 55 | 56 | For example with 100 tokens/minute limit and 500 bucket size: 57 | - Can handle bursts using saved up tokens 58 | - Automatically smooths out over time 59 | - Different operations can cost different amounts 60 | - More flexible than fixed request counts 61 | 62 | ## Example usage: 63 | 64 | defmodule MyApp.RateLimit do 65 | use Hammer, backend: Hammer.Redis, algorithm: :token_bucket 66 | end 67 | 68 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 69 | 70 | # Allow 10 tokens per second with max capacity of 100 71 | MyApp.RateLimit.hit("user_123", 10, 100, 1) 72 | """ 73 | 74 | @doc false 75 | @spec hit( 76 | connection_name :: atom(), 77 | prefix :: String.t(), 78 | key :: String.t(), 79 | refill_rate :: pos_integer(), 80 | capacity :: pos_integer(), 81 | cost :: pos_integer(), 82 | timeout :: non_neg_integer() 83 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 84 | def hit(connection_name, prefix, key, refill_rate, capacity, cost, timeout) do 85 | {:ok, [allowed, value]} = 86 | Redix.command( 87 | connection_name, 88 | [ 89 | "EVAL", 90 | redis_script(), 91 | "1", 92 | redis_key(prefix, key), 93 | capacity, 94 | refill_rate, 95 | cost 96 | ], 97 | timeout: timeout 98 | ) 99 | 100 | if allowed == 1 do 101 | {:allow, value} 102 | else 103 | {:deny, 1000} 104 | end 105 | end 106 | 107 | @compile inline: [redis_key: 2] 108 | defp redis_key(prefix, key) do 109 | "#{prefix}:#{key}" 110 | end 111 | 112 | @doc """ 113 | Returns the current level of the bucket for a given key. 114 | """ 115 | @spec get( 116 | connection_name :: atom(), 117 | prefix :: String.t(), 118 | key :: String.t(), 119 | timeout :: non_neg_integer() 120 | ) :: 121 | non_neg_integer() 122 | def get(connection_name, prefix, key, timeout) do 123 | case Redix.command( 124 | connection_name, 125 | [ 126 | "HGET", 127 | redis_key(prefix, key), 128 | "level" 129 | ], 130 | timeout: timeout 131 | ) do 132 | {:ok, nil} -> 133 | 0 134 | 135 | {:ok, level} -> 136 | String.to_integer(level) 137 | 138 | _ -> 139 | 0 140 | end 141 | end 142 | 143 | defp redis_script do 144 | """ 145 | -- Get current time in seconds 146 | local now = redis.call("TIME")[1] 147 | 148 | -- Get current bucket state 149 | local bucket = redis.call("HMGET", KEYS[1], "level", "last_update") 150 | local current_level = tonumber(bucket[1]) or ARGV[1] -- Default to capacity if new 151 | local last_update = tonumber(bucket[2]) or now 152 | 153 | -- Calculate tokens to add since last update 154 | local elapsed = now - last_update 155 | local new_tokens = math.floor(elapsed * ARGV[2]) -- refill_rate per second 156 | local capacity = tonumber(ARGV[1]) 157 | local current_tokens = math.min(capacity, current_level + new_tokens) 158 | 159 | -- Try to consume tokens 160 | local cost = tonumber(ARGV[3]) 161 | if current_tokens >= cost then 162 | local final_level = current_tokens - cost 163 | redis.call("HMSET", KEYS[1], "level", final_level, "last_update", now) 164 | -- Set TTL to time needed to refill to capacity plus a small buffer 165 | local time_to_full = math.ceil((capacity - final_level) / ARGV[2]) 166 | local ttl = time_to_full + 60 -- Add 60 second buffer 167 | redis.call("EXPIRE", KEYS[1], ttl) 168 | return {1, final_level} -- Allow with new level 169 | else 170 | -- Calculate time until enough tokens available 171 | local tokens_needed = cost - current_tokens 172 | local time_needed = tokens_needed / ARGV[2] 173 | return {0, math.ceil(time_needed * 1000)} -- Deny with ms wait time 174 | end 175 | """ 176 | end 177 | end 178 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.MixProject do 2 | use Mix.Project 3 | 4 | @version "7.0.2" 5 | 6 | def project do 7 | [ 8 | app: :hammer_backend_redis, 9 | description: "Redis backend for Hammer rate-limiter", 10 | source_url: "https://github.com/ExHammer/hammer-backend-redis", 11 | homepage_url: "https://github.com/ExHammer/hammer-backend-redis", 12 | version: @version, 13 | elixir: "~> 1.14", 14 | deps: deps(), 15 | docs: docs(), 16 | package: package(), 17 | test_coverage: [summary: [threshold: 85]] 18 | ] 19 | end 20 | 21 | def application do 22 | [extra_applications: [:logger]] 23 | end 24 | 25 | def docs do 26 | [ 27 | main: "readme", 28 | extras: ["README.md", "CHANGELOG.md"], 29 | skip_undefined_reference_warnings_on: ["README.md", "CHANGELOG.md"], 30 | source_ref: "v#{@version}" 31 | ] 32 | end 33 | 34 | defp deps do 35 | [ 36 | {:benchee, "~> 1.2", only: :bench}, 37 | {:credo, "~> 1.7", only: [:dev, :test]}, 38 | {:dialyxir, "~> 1.4", only: [:dev], runtime: false}, 39 | {:ex_doc, "~> 0.34", only: :dev}, 40 | {:hammer, "~> 7.0"}, 41 | {:redix, "~> 1.5"} 42 | ] 43 | end 44 | 45 | defp package do 46 | [ 47 | name: :hammer_backend_redis, 48 | maintainers: ["Emmanuel Pinault", "June Kelly"], 49 | licenses: ["MIT"], 50 | links: %{ 51 | "GitHub" => "https://github.com/ExHammer/hammer-backend-redis", 52 | "Changelog" => "https://github.com/ExHammer/hammer-backend-redis/blob/master/CHANGELOG.md" 53 | } 54 | ] 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.3.1", "c786e6a76321121a44229dde3988fc772bca73ea75170a73fd5f4ddf1af95ccf", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "76224c58ea1d0391c8309a8ecbfe27d71062878f59bd41a390266bf4ac1cc56d"}, 3 | "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, 4 | "credo": {:hex, :credo, "1.7.11", "d3e805f7ddf6c9c854fd36f089649d7cf6ba74c42bc3795d587814e3c9847102", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "56826b4306843253a66e47ae45e98e7d284ee1f95d53d1612bb483f88a8cf219"}, 5 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 6 | "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"}, 7 | "earmark_parser": {:hex, :earmark_parser, "1.4.43", "34b2f401fe473080e39ff2b90feb8ddfeef7639f8ee0bbf71bb41911831d77c5", [:mix], [], "hexpm", "970a3cd19503f5e8e527a190662be2cee5d98eed1ff72ed9b3d1a3d466692de8"}, 8 | "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, 9 | "ex_doc": {:hex, :ex_doc, "0.37.3", "f7816881a443cd77872b7d6118e8a55f547f49903aef8747dbcb345a75b462f9", [:mix], [{:earmark_parser, "~> 1.4.42", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "e6aebca7156e7c29b5da4daa17f6361205b2ae5f26e5c7d8ca0d3f7e18972233"}, 10 | "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, 11 | "hammer": {:hex, :hammer, "7.0.1", "136edcd81af44becbe6b73a958c109e2364ab0dc026d7b19892037dc2632078c", [:mix], [], "hexpm", "796edf14ab2aa80df72080210fcf944ee5e8868d8ece7a7511264d802f58cc2d"}, 12 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 13 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 14 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, 15 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, 16 | "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, 17 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"}, 18 | "redix": {:hex, :redix, "1.5.2", "ab854435a663f01ce7b7847f42f5da067eea7a3a10c0a9d560fa52038fd7ab48", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:nimble_options, "~> 0.5.0 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "78538d184231a5d6912f20567d76a49d1be7d3fca0e1aaaa20f4df8e1142dcb8"}, 19 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 20 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, 21 | } 22 | -------------------------------------------------------------------------------- /publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check out 13 | uses: actions/checkout@v4 14 | 15 | - name: Publish package to hex.pm 16 | uses: hipcall/github_action_publish_hex@v1 17 | env: 18 | HEX_API_KEY: ${{ secrets.HEX_API_KEY }} 19 | -------------------------------------------------------------------------------- /test/hammer/redis/leaky_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.LeakyBucketTest do 2 | use ExUnit.Case, async: true 3 | 4 | @moduletag :redis 5 | 6 | defmodule RateLimitLeakyBucket do 7 | use Hammer, backend: Hammer.Redis, algorithm: :leaky_bucket 8 | end 9 | 10 | setup do 11 | start_supervised!({RateLimitLeakyBucket, url: "redis://localhost:6379"}) 12 | key = "key#{:rand.uniform(1_000_000)}" 13 | 14 | {:ok, %{key: key}} 15 | end 16 | 17 | test "key prefix is set to the module name by default", %{key: key} do 18 | scale = :timer.seconds(10) 19 | limit = 5 20 | 21 | RateLimitLeakyBucket.hit(key, scale, limit) 22 | 23 | assert Redix.command!(RateLimitLeakyBucket, [ 24 | "HGET", 25 | "Hammer.Redis.LeakyBucketTest.RateLimitLeakyBucket:#{key}", 26 | "level" 27 | ]) == "1" 28 | end 29 | 30 | describe "hit" do 31 | test "returns {:allow, 1} tuple on first access", %{key: key} do 32 | leak_rate = :timer.seconds(10) 33 | capacity = 10 34 | 35 | assert {:allow, 1} = RateLimitLeakyBucket.hit(key, leak_rate, capacity) 36 | end 37 | 38 | test "returns {:allow, 4} tuple on in-limit checks", %{key: key} do 39 | leak_rate = 2 40 | capacity = 10 41 | 42 | assert {:allow, 1} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 43 | assert {:allow, 2} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 44 | assert {:allow, 3} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 45 | assert {:allow, 4} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 46 | end 47 | 48 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{key: key} do 49 | leak_rate = 1 50 | capacity = 2 51 | 52 | assert {:allow, 1} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 53 | assert {:allow, 2} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 54 | 55 | assert {:deny, 1000} = 56 | RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 57 | 58 | assert {:deny, _retry_after} = 59 | RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 60 | end 61 | 62 | test "returns expected tuples after waiting for the next window", %{key: key} do 63 | leak_rate = 1 64 | capacity = 2 65 | 66 | assert {:allow, 1} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 67 | assert {:allow, 2} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 68 | 69 | assert {:deny, retry_after} = 70 | RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 71 | 72 | :timer.sleep(retry_after) 73 | 74 | assert {:allow, 2} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 75 | 76 | assert {:deny, _retry_after} = 77 | RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 78 | end 79 | end 80 | 81 | describe "get" do 82 | test "get returns the count set for the given key and scale", %{key: key} do 83 | leak_rate = :timer.seconds(10) 84 | capacity = 10 85 | 86 | assert RateLimitLeakyBucket.get(key, leak_rate) == 0 87 | assert {:allow, 3} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 3) 88 | assert RateLimitLeakyBucket.get(key, leak_rate) == 3 89 | end 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /test/hammer/redis/token_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Redis.TokenBucketTest do 2 | use ExUnit.Case, async: true 3 | 4 | @moduletag :redis 5 | 6 | defmodule RateLimitTokenBucket do 7 | use Hammer, backend: Hammer.Redis, algorithm: :token_bucket 8 | end 9 | 10 | setup do 11 | start_supervised!({RateLimitTokenBucket, url: "redis://localhost:6379"}) 12 | key = "key#{:rand.uniform(1_000_000)}" 13 | 14 | {:ok, %{key: key}} 15 | end 16 | 17 | test "key prefix is set to the module name by default", %{key: key} do 18 | scale = :timer.seconds(10) 19 | limit = 5 20 | 21 | RateLimitTokenBucket.hit(key, scale, limit) 22 | 23 | assert Redix.command!(RateLimitTokenBucket, [ 24 | "HGET", 25 | "Hammer.Redis.TokenBucketTest.RateLimitTokenBucket:#{key}", 26 | "level" 27 | ]) == "4" 28 | end 29 | 30 | describe "hit" do 31 | test "returns {:allow, 9} tuple on first access", %{key: key} do 32 | refill_rate = 10 33 | capacity = 10 34 | 35 | assert {:allow, 9} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 36 | end 37 | 38 | test "returns {:allow, 6} tuple on in-limit checks", %{key: key} do 39 | refill_rate = 2 40 | capacity = 10 41 | 42 | assert {:allow, 9} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 43 | assert {:allow, 8} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 44 | assert {:allow, 7} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 45 | assert {:allow, 6} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 46 | end 47 | 48 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{key: key} do 49 | refill_rate = 1 50 | capacity = 2 51 | 52 | assert {:allow, 1} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 53 | assert {:allow, 0} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 54 | 55 | assert {:deny, 1000} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 56 | 57 | assert {:deny, _retry_after} = 58 | RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 59 | end 60 | 61 | test "returns expected tuples after waiting for the next window", %{key: key} do 62 | refill_rate = 1 63 | capacity = 2 64 | 65 | assert {:allow, 1} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 66 | assert {:allow, 0} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 67 | 68 | assert {:deny, retry_after} = 69 | RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 70 | 71 | :timer.sleep(retry_after) 72 | 73 | assert {:allow, 0} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 74 | 75 | assert {:deny, _retry_after} = 76 | RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 77 | end 78 | end 79 | 80 | describe "get" do 81 | test "get returns the count set for the given key and scale", %{key: key} do 82 | refill_rate = :timer.seconds(10) 83 | capacity = 10 84 | 85 | assert RateLimitTokenBucket.get(key, refill_rate) == 0 86 | 87 | assert {:allow, _} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 4) 88 | assert RateLimitTokenBucket.get(key, refill_rate) == 6 89 | 90 | assert {:allow, _} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 3) 91 | assert RateLimitTokenBucket.get(key, refill_rate) == 3 92 | end 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /test/hammer/redis_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.RedisTest do 2 | use ExUnit.Case, async: true 3 | 4 | @moduletag :redis 5 | 6 | defmodule RateLimit do 7 | use Hammer, backend: Hammer.Redis 8 | end 9 | 10 | setup do 11 | start_supervised!({RateLimit, url: "redis://localhost:6379"}) 12 | key = "key#{:rand.uniform(1_000_000)}" 13 | 14 | {:ok, %{key: key}} 15 | end 16 | 17 | defp redis_all(key, conn \\ RateLimit) do 18 | keys = Redix.command!(conn, ["KEYS", "Hammer.RedisTest.RateLimit:#{key}*"]) 19 | 20 | Enum.map(keys, fn key -> 21 | {key, Redix.command!(conn, ["GET", key])} 22 | end) 23 | end 24 | 25 | defp clean_keys(conn \\ RateLimit) do 26 | keys = Redix.command!(conn, ["KEYS", "Hammer.RedisTest.RateLimit*"]) 27 | 28 | to_delete = 29 | Enum.map(keys, fn key -> 30 | ["DEL", key] 31 | end) 32 | 33 | Redix.pipeline!(RateLimit, to_delete) 34 | end 35 | 36 | test "key prefix is set to the module name by default", %{key: key} do 37 | scale = :timer.seconds(10) 38 | limit = 5 39 | 40 | RateLimit.hit(key, scale, limit) 41 | 42 | assert [{"Hammer.RedisTest.RateLimit:" <> _, "1"}] = redis_all(key) 43 | clean_keys() 44 | end 45 | 46 | test "key has expirytime set", %{key: key} do 47 | scale = :timer.seconds(10) 48 | limit = 5 49 | 50 | RateLimit.hit(key, scale, limit) 51 | [{redis_key, "1"}] = redis_all(key) 52 | 53 | expected_expiretime = div(System.system_time(:second), 10) * 10 + 10 54 | 55 | assert Redix.command!(RateLimit, ["EXPIRETIME", redis_key]) == expected_expiretime 56 | 57 | clean_keys() 58 | end 59 | 60 | describe "hit" do 61 | test "returns {:allow, 1} tuple on first access", %{key: key} do 62 | scale = :timer.seconds(10) 63 | limit = 10 64 | 65 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 66 | end 67 | 68 | test "returns {:allow, 4} tuple on in-limit checks", %{key: key} do 69 | scale = :timer.minutes(10) 70 | limit = 10 71 | 72 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 73 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 74 | assert {:allow, 3} = RateLimit.hit(key, scale, limit) 75 | assert {:allow, 4} = RateLimit.hit(key, scale, limit) 76 | 77 | clean_keys() 78 | end 79 | 80 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{key: key} do 81 | scale = :timer.minutes(10) 82 | limit = 2 83 | 84 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 85 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 86 | assert {:deny, _wait} = RateLimit.hit(key, scale, limit) 87 | assert {:deny, _wait} = RateLimit.hit(key, scale, limit) 88 | clean_keys() 89 | end 90 | 91 | @tag :slow 92 | test "returns expected tuples after waiting for the next window", %{key: key} do 93 | scale = :timer.seconds(1) 94 | limit = 2 95 | 96 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 97 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 98 | assert {:deny, wait} = RateLimit.hit(key, scale, limit) 99 | 100 | :timer.sleep(wait) 101 | 102 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 103 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 104 | assert {:deny, _wait} = RateLimit.hit(key, scale, limit) 105 | clean_keys() 106 | end 107 | 108 | test "with custom increment", %{key: key} do 109 | scale = :timer.seconds(1) 110 | limit = 10 111 | 112 | assert {:allow, 4} = RateLimit.hit(key, scale, limit, 4) 113 | assert {:allow, 9} = RateLimit.hit(key, scale, limit, 5) 114 | assert {:deny, _wait} = RateLimit.hit(key, scale, limit, 3) 115 | clean_keys() 116 | end 117 | 118 | test "mixing default and custom increment", %{key: key} do 119 | scale = :timer.seconds(1) 120 | limit = 10 121 | 122 | assert {:allow, 3} = RateLimit.hit(key, scale, limit, 3) 123 | assert {:allow, 4} = RateLimit.hit(key, scale, limit) 124 | assert {:allow, 5} = RateLimit.hit(key, scale, limit) 125 | assert {:allow, 9} = RateLimit.hit(key, scale, limit, 4) 126 | assert {:allow, 10} = RateLimit.hit(key, scale, limit) 127 | assert {:deny, _wait} = RateLimit.hit(key, scale, limit, 2) 128 | clean_keys() 129 | end 130 | end 131 | 132 | describe "inc" do 133 | test "increments the count for the given key and scale", %{key: key} do 134 | scale = :timer.seconds(10) 135 | 136 | assert RateLimit.get(key, scale) == 0 137 | 138 | assert RateLimit.inc(key, scale) == 1 139 | assert RateLimit.get(key, scale) == 1 140 | 141 | assert RateLimit.inc(key, scale) == 2 142 | assert RateLimit.get(key, scale) == 2 143 | 144 | assert RateLimit.inc(key, scale) == 3 145 | assert RateLimit.get(key, scale) == 3 146 | 147 | assert RateLimit.inc(key, scale) == 4 148 | assert RateLimit.get(key, scale) == 4 149 | clean_keys() 150 | end 151 | end 152 | 153 | describe "get/set" do 154 | test "get returns the count set for the given key and scale", %{key: key} do 155 | scale = :timer.seconds(10) 156 | count = 10 157 | 158 | assert RateLimit.get(key, scale) == 0 159 | assert RateLimit.set(key, scale, count) == count 160 | assert RateLimit.get(key, scale) == count 161 | clean_keys() 162 | end 163 | end 164 | end 165 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | task = 2 | Task.async(fn -> 3 | {:ok, redix} = Redix.start_link("redis://localhost:6379") 4 | {:ok, "PONG"} == Redix.command(redix, ["PING"]) 5 | end) 6 | 7 | redis_available? = Task.await(task) 8 | 9 | exclude = 10 | if redis_available? do 11 | # force flush all keys before running tests 12 | {:ok, redix} = Redix.start_link("redis://localhost:6379") 13 | Redix.command!(redix, ["FLUSHALL"]) 14 | [] 15 | else 16 | Mix.shell().error(""" 17 | To enable Redis tests, start the local container with the following command: 18 | 19 | docker compose up -d redis 20 | """) 21 | 22 | [:redis] 23 | end 24 | 25 | ExUnit.start(exclude: exclude) 26 | --------------------------------------------------------------------------------