├── .credo.exs ├── .editorconfig ├── .formatter.exs ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── dependabot.yml └── workflows │ ├── ci.yml │ └── publish.yml ├── .gitignore ├── .tool-versions ├── BENCHMARKS.md ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CREDITS.md ├── LICENSE.md ├── README.md ├── RELEASE.md ├── assets ├── horizontal.png ├── horizontal.svg ├── logo.png └── logo.svg ├── bench └── base.exs ├── guides ├── Tutorial.md ├── distributed-ets.md └── upgrade-v7.md ├── lib ├── hammer.ex └── hammer │ ├── atomic.ex │ ├── atomic │ ├── fix_window.ex │ ├── leaky_bucket.ex │ └── token_bucket.ex │ ├── ets.ex │ └── ets │ ├── fix_window.ex │ ├── leaky_bucket.ex │ ├── sliding_window.ex │ └── token_bucket.ex ├── mix.exs ├── mix.lock └── test ├── hammer ├── atomic │ ├── clean_test.exs │ ├── fix_window_test.exs │ ├── leaky_bucket_test.exs │ └── token_bucket_test.exs ├── atomic_test.exs ├── ets │ ├── clean_test.exs │ ├── fix_window_test.exs │ ├── leaky_bucket_test.exs │ ├── sliding_window_test.exs │ └── token_bucket_test.exs └── ets_test.exs └── test_helper.exs /.credo.exs: -------------------------------------------------------------------------------- 1 | # Last updated for credo 1.6.1 2 | %{ 3 | # 4 | # You can have as many configs as you like in the `configs:` field. 5 | configs: [ 6 | %{ 7 | # 8 | # Run any config using `mix credo -C `. If no config name is given 9 | # "default" is used. 10 | # 11 | name: "default", 12 | # 13 | # These are the files included in the analysis: 14 | files: %{ 15 | # 16 | # You can give explicit globs or simply directories. 17 | # In the latter case `**/*.{ex,exs}` will be used. 18 | # 19 | included: [ 20 | "lib/", 21 | "src/", 22 | "test/", 23 | "web/", 24 | "apps/*/lib/", 25 | "apps/*/src/", 26 | "apps/*/test/", 27 | "apps/*/web/" 28 | ], 29 | excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"] 30 | }, 31 | # 32 | # Load and configure plugins here: 33 | # 34 | plugins: [], 35 | # 36 | # If you create your own checks, you must specify the source files for 37 | # them here, so they can be loaded by Credo before running the analysis. 38 | # 39 | requires: [], 40 | # 41 | # If you want to enforce a style guide and need a more traditional linting 42 | # experience, you can change `strict` to `true` below: 43 | # 44 | strict: true, 45 | # 46 | # To modify the timeout for parsing files, change this value: 47 | # 48 | parse_timeout: 5000, 49 | # 50 | # If you want to use uncolored output by default, you can change `color` 51 | # to `false` below: 52 | # 53 | color: true, 54 | # 55 | # You can customize the parameters of any check by adding a second element 56 | # to the tuple. 57 | # 58 | # To disable a check put `false` as second element: 59 | # 60 | # {Credo.Check.Design.DuplicatedCode, false} 61 | # 62 | checks: %{ 63 | enabled: [ 64 | # 65 | ## Consistency Checks 66 | # 67 | {Credo.Check.Consistency.ExceptionNames, []}, 68 | {Credo.Check.Consistency.LineEndings, []}, 69 | {Credo.Check.Consistency.ParameterPatternMatching, []}, 70 | {Credo.Check.Consistency.SpaceAroundOperators, []}, 71 | {Credo.Check.Consistency.SpaceInParentheses, []}, 72 | {Credo.Check.Consistency.TabsOrSpaces, []}, 73 | 74 | # 75 | ## Design Checks 76 | # 77 | # You can customize the priority of any check 78 | # Priority values are: `low, normal, high, higher` 79 | # 80 | {Credo.Check.Design.AliasUsage, 81 | priority: :low, if_nested_deeper_than: 2, if_called_more_often_than: 0}, 82 | {Credo.Check.Design.SkipTestWithoutComment, []}, 83 | # You can also customize the exit_status of each check. 84 | # If you don't want TODO comments to cause `mix credo` to fail, just 85 | # set this value to 0 (zero). 86 | # 87 | {Credo.Check.Design.TagTODO, [exit_status: 2]}, 88 | {Credo.Check.Design.TagFIXME, []}, 89 | 90 | # 91 | ## Readability Checks 92 | # 93 | {Credo.Check.Readability.AliasAs, 94 | files: %{excluded: ["lib/*_web.ex", "test/support/conn_case.ex"]}}, 95 | {Credo.Check.Readability.AliasOrder, []}, 96 | {Credo.Check.Readability.BlockPipe, []}, 97 | {Credo.Check.Readability.FunctionNames, []}, 98 | {Credo.Check.Readability.ImplTrue, []}, 99 | {Credo.Check.Readability.LargeNumbers, []}, 100 | {Credo.Check.Readability.MaxLineLength, priority: :low, max_length: 120}, 101 | {Credo.Check.Readability.ModuleAttributeNames, []}, 102 | {Credo.Check.Readability.ModuleDoc, []}, 103 | {Credo.Check.Readability.ModuleNames, []}, 104 | {Credo.Check.Readability.MultiAlias, []}, 105 | {Credo.Check.Readability.ParenthesesInCondition, []}, 106 | {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, 107 | {Credo.Check.Readability.PipeIntoAnonymousFunctions, []}, 108 | {Credo.Check.Readability.PredicateFunctionNames, []}, 109 | {Credo.Check.Readability.PreferImplicitTry, []}, 110 | {Credo.Check.Readability.RedundantBlankLines, []}, 111 | {Credo.Check.Readability.Semicolons, []}, 112 | {Credo.Check.Readability.SinglePipe, []}, 113 | {Credo.Check.Readability.SpaceAfterCommas, []}, 114 | {Credo.Check.Readability.StrictModuleLayout, []}, 115 | {Credo.Check.Readability.StringSigils, []}, 116 | {Credo.Check.Readability.TrailingBlankLine, []}, 117 | {Credo.Check.Readability.TrailingWhiteSpace, []}, 118 | {Credo.Check.Readability.UnnecessaryAliasExpansion, []}, 119 | {Credo.Check.Readability.VariableNames, []}, 120 | {Credo.Check.Readability.WithCustomTaggedTuple, []}, 121 | {Credo.Check.Readability.WithSingleClause, []}, 122 | 123 | # 124 | ## Refactoring Opportunities 125 | # 126 | {Credo.Check.Refactor.Apply, []}, 127 | {Credo.Check.Refactor.CondStatements, []}, 128 | {Credo.Check.Refactor.CyclomaticComplexity, []}, 129 | {Credo.Check.Refactor.FilterFilter, []}, 130 | {Credo.Check.Refactor.FilterReject, []}, 131 | {Credo.Check.Refactor.FunctionArity, []}, 132 | {Credo.Check.Refactor.IoPuts, []}, 133 | {Credo.Check.Refactor.LongQuoteBlocks, []}, 134 | {Credo.Check.Refactor.MapJoin, []}, 135 | {Credo.Check.Refactor.MapMap, []}, 136 | {Credo.Check.Refactor.MatchInCondition, []}, 137 | {Credo.Check.Refactor.NegatedConditionsInUnless, []}, 138 | {Credo.Check.Refactor.NegatedConditionsWithElse, []}, 139 | {Credo.Check.Refactor.Nesting, []}, 140 | {Credo.Check.Refactor.PipeChainStart, []}, 141 | {Credo.Check.Refactor.RedundantWithClauseResult, []}, 142 | {Credo.Check.Refactor.RejectFilter, []}, 143 | {Credo.Check.Refactor.RejectReject, []}, 144 | {Credo.Check.Refactor.UnlessWithElse, []}, 145 | {Credo.Check.Refactor.WithClauses, []}, 146 | 147 | # 148 | ## Warnings 149 | # 150 | {Credo.Check.Warning.ApplicationConfigInModuleAttribute, []}, 151 | {Credo.Check.Warning.BoolOperationOnSameValues, []}, 152 | {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, 153 | {Credo.Check.Warning.IExPry, []}, 154 | {Credo.Check.Warning.IoInspect, []}, 155 | {Credo.Check.Warning.MapGetUnsafePass, []}, 156 | {Credo.Check.Warning.MixEnv, []}, 157 | {Credo.Check.Warning.OperationOnSameValues, []}, 158 | {Credo.Check.Warning.OperationWithConstantResult, []}, 159 | {Credo.Check.Warning.RaiseInsideRescue, []}, 160 | {Credo.Check.Warning.SpecWithStruct, []}, 161 | {Credo.Check.Warning.UnsafeExec, []}, 162 | {Credo.Check.Warning.UnusedEnumOperation, []}, 163 | {Credo.Check.Warning.UnusedFileOperation, []}, 164 | {Credo.Check.Warning.UnusedKeywordOperation, []}, 165 | {Credo.Check.Warning.UnusedListOperation, []}, 166 | {Credo.Check.Warning.UnusedPathOperation, []}, 167 | {Credo.Check.Warning.UnusedRegexOperation, []}, 168 | {Credo.Check.Warning.UnusedStringOperation, []}, 169 | {Credo.Check.Warning.UnusedTupleOperation, []}, 170 | {Credo.Check.Warning.WrongTestFileExtension, []} 171 | ], 172 | disabled: [ 173 | # 174 | # Controversial and experimental checks (opt-in, just move the check to `:enabled` 175 | # and be sure to use `mix credo --strict` to see low priority checks if you set 176 | # `strict: false` above) 177 | # 178 | {Credo.Check.Consistency.MultiAliasImportRequireUse, []}, 179 | {Credo.Check.Consistency.UnusedVariableNames, []}, 180 | {Credo.Check.Design.DuplicatedCode, []}, 181 | {Credo.Check.Readability.SeparateAliasRequire, []}, 182 | {Credo.Check.Readability.SingleFunctionToBlockPipe, []}, 183 | {Credo.Check.Refactor.ABCSize, []}, 184 | {Credo.Check.Refactor.AppendSingleItem, []}, 185 | {Credo.Check.Refactor.DoubleBooleanNegation, []}, 186 | {Credo.Check.Refactor.ModuleDependencies, []}, 187 | {Credo.Check.Refactor.NegatedIsNil, []}, 188 | {Credo.Check.Refactor.VariableRebinding, []}, 189 | {Credo.Check.Warning.LeakyEnvironment, []}, 190 | {Credo.Check.Warning.UnsafeToAtom, []}, 191 | {Credo.Check.Readability.Specs, 192 | files: %{ 193 | excluded: [ 194 | "lib/*_web.ex", 195 | "lib/*_web/controllers/*_controller.ex", 196 | "lib/*_web/graphql/*/resolvers.ex" 197 | ] 198 | }} 199 | 200 | # 201 | # Custom checks can be created using `mix credo.gen.check`. 202 | # 203 | ] 204 | } 205 | } 206 | ] 207 | } 208 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | 2 | # Check http://editorconfig.org for more information 3 | # This is the main config file for this project: 4 | root = true 5 | 6 | [*] 7 | charset = utf-8 8 | indent_style = spaces 9 | indent_size = 2 10 | trim_trailing_whitespace = true 11 | end_of_line = lf 12 | insert_final_newline = true 13 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: epinault 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | ** Provide the following details 14 | 15 | - Version of the library: 16 | - Backend used: 17 | - Algorithm used: 18 | - Elixir version (elixir -v): 19 | - Erlang version (erl -v): 20 | - Operating system: 21 | 22 | **Expected behavior** 23 | A clear and concise description of what you expected to happen. 24 | 25 | **Actual behavior** 26 | A clear and concise description of what actually happens. 27 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | 9 | - package-ecosystem: "mix" 10 | directory: "/" 11 | schedule: 12 | interval: "weekly" -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [master] 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-24.04 11 | 12 | env: 13 | MIX_ENV: test 14 | 15 | strategy: 16 | # https://hexdocs.pm/elixir/compatibility-and-deprecations.html#between-elixir-and-erlang-otp 17 | matrix: 18 | elixir: [1.17, 1.16, 1.15] 19 | otp: [26, 25] 20 | include: 21 | - elixir: 1.18 22 | otp: 27 23 | - elixir: 1.17 24 | otp: 27 25 | - elixir: 1.14 26 | otp: 25 27 | 28 | steps: 29 | - uses: actions/checkout@v4 30 | 31 | - uses: erlef/setup-beam@v1 32 | with: 33 | otp-version: ${{matrix.otp}} 34 | elixir-version: ${{matrix.elixir}} 35 | 36 | - uses: actions/cache@v4 37 | with: 38 | path: | 39 | deps 40 | _build 41 | key: test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-${{ github.head_ref || github.ref }}-mix-${{ hashFiles('**/mix.lock') }} 42 | restore-keys: | 43 | test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-${{ github.head_ref || github.ref }}-mix- 44 | test-otp-${{ matrix.otp }}-elixir-${{ matrix.elixir }}-ref-refs/heads/master-mix- 45 | 46 | - run: mix deps.get 47 | - run: mix deps.unlock --check-unused 48 | - run: mix deps.compile 49 | - run: mix compile --warnings-as-errors 50 | - run: mix credo --strict --format=oneline 51 | - run: mix test --warnings-as-errors --cover 52 | 53 | format: 54 | runs-on: ubuntu-latest 55 | steps: 56 | - uses: actions/checkout@v4 57 | - uses: erlef/setup-beam@v1 58 | with: 59 | elixir-version: 1 60 | otp-version: 27 61 | - run: mix format --check-formatted 62 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check out 13 | uses: actions/checkout@v4 14 | 15 | - name: Publish package to hex.pm 16 | uses: hipcall/github_action_publish_hex@v1 17 | env: 18 | HEX_API_KEY: ${{ secrets.HEX_API_KEY }} 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | hammer-*.tar 24 | 25 | # Temporary files for e.g. tests 26 | /tmp 27 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | elixir 1.18.2-otp-27 2 | erlang 27.1.2 3 | -------------------------------------------------------------------------------- /BENCHMARKS.md: -------------------------------------------------------------------------------- 1 | Following are different benchmarks for different algorithms. it also comparing with other libraries to give you an idea of how Hammer performs. Clearly 7.x series is much faster than 6.x series. Prior to 7.x series, Hammer had some performance issues. 2 | 3 | ## Running benchmarks 4 | 5 | Install the dependencies 6 | 7 | ```shell 8 | mix deps.get 9 | ``` 10 | 11 | Run the benchmarks 12 | 13 | ```shell 14 | MIX_ENV=bench LIMIT=1 SCALE=5000 RANGE=200000 PARALLEL=600 mix run bench/base.exs 15 | ``` 16 | 17 | ## 7.x series 18 | 19 | Results are from my local machine 20 | 21 | ```shell 22 | ❯ MIX_ENV=bench LIMIT=1 SCALE=5000 RANGE=200000 PARALLEL=600 mix run bench/base.exs 23 | Compiling 8 files (.ex) 24 | parallel: 600 25 | limit: 1 26 | scale: 5000 27 | range: 200000 28 | 29 | Operating System: macOS 30 | CPU Information: Apple M1 Max 31 | Number of Available Cores: 10 32 | Available memory: 32 GB 33 | Elixir 1.17.3 34 | Erlang 27.1.2 35 | JIT enabled: true 36 | 37 | Benchmark suite executing with the following configuration: 38 | warmup: 14 s 39 | time: 6 s 40 | memory time: 0 ns 41 | reduction time: 0 ns 42 | parallel: 600 43 | inputs: none specified 44 | Estimated total run time: 3 min 20 s 45 | 46 | Benchmarking ex_rated ... 47 | Benchmarking hammer_atomic_fix_window ... 48 | Benchmarking hammer_atomic_leaky_bucket ... 49 | Benchmarking hammer_atomic_token_bucket ... 50 | Benchmarking hammer_fix_window ... 51 | Benchmarking hammer_leaky_bucket ... 52 | Benchmarking hammer_sliding_window ... 53 | Benchmarking hammer_token_bucket ... 54 | Benchmarking plug_attack ... 55 | Benchmarking rate_limiter ... 56 | Calculating statistics... 57 | Formatting results... 58 | 59 | Name ips average deviation median 99th % 60 | hammer_atomic_token_bucket 28.60 K 34.97 μs ±1400.75% 0.63 μs 2.17 μs 61 | hammer_atomic_leaky_bucket 28.16 K 35.51 μs ±1438.27% 0.63 μs 2.13 μs 62 | hammer_atomic_fix_window 21.56 K 46.37 μs ±1348.09% 0.88 μs 8.50 μs 63 | plug_attack 15.94 K 62.75 μs ±1391.65% 0.71 μs 57.88 μs 64 | hammer_leaky_bucket 15.48 K 64.60 μs ±1329.87% 0.79 μs 68.33 μs 65 | hammer_token_bucket 14.68 K 68.11 μs ±1326.20% 0.75 μs 76.42 μs 66 | rate_limiter 14.17 K 70.58 μs ±1461.48% 2.08 μs 18.63 μs 67 | hammer_fix_window 12.91 K 77.48 μs ±1287.59% 0.79 μs 68.29 μs 68 | ex_rated 6.06 K 164.91 μs ±1647.42% 2.29 μs 117.79 μs 69 | hammer_sliding_window 0.00255 K 391671.28 μs ±22.88% 394739.80 μs 627207.29 μs 70 | 71 | Comparison: 72 | hammer_atomic_token_bucket 28.60 K 73 | hammer_atomic_leaky_bucket 28.16 K - 1.02x slower +0.54 μs 74 | hammer_atomic_fix_window 21.56 K - 1.33x slower +11.40 μs 75 | plug_attack 15.94 K - 1.79x slower +27.78 μs 76 | hammer_leaky_bucket 15.48 K - 1.85x slower +29.63 μs 77 | hammer_token_bucket 14.68 K - 1.95x slower +33.14 μs 78 | rate_limiter 14.17 K - 2.02x slower +35.61 μs 79 | hammer_fix_window 12.91 K - 2.22x slower +42.51 μs 80 | ex_rated 6.06 K - 4.72x slower +129.94 μs 81 | hammer_sliding_window 0.00255 K - 11200.21x slower +391636.31 μs 82 | 83 | Extended statistics: 84 | 85 | Name minimum maximum sample size mode 86 | hammer_atomic_token_bucket 0.125 μs 60127.88 μs 41.39 M 0.50 μs 87 | hammer_atomic_leaky_bucket 0.0830 μs 253198.17 μs 41.47 M 0.50 μs 88 | hammer_atomic_fix_window 0.166 μs 51585.75 μs 33.51 M 0.75 μs 89 | plug_attack 0.125 μs 159736.35 μs 23.60 M 0.63 μs 90 | hammer_leaky_bucket 0.167 μs 152246.55 μs 22.19 M 0.63 μs 91 | hammer_token_bucket 0.21 μs 37019.11 μs 21.08 M 0.63 μs 92 | rate_limiter 0.33 μs 95669.81 μs 19.97 M 2 μs 93 | hammer_fix_window 0.166 μs 80411.22 μs 21.27 M 0.63 μs 94 | ex_rated 0.75 μs 161347.46 μs 10.68 M 2.13 μs 95 | hammer_sliding_window 3376.38 μs 848362.89 μs 9.36 K419281.00 μs, 382658.61 μ 96 | ``` 97 | 98 | ## 6.x series 99 | 100 | Results are from my local machine 101 | 102 | ```shell 103 | Generated rate_limit app 104 | prallel: 600 105 | limit: 1 106 | scale: 5000 107 | range: 200000 108 | 109 | Operating System: macOS 110 | CPU Information: Apple M1 Max 111 | Number of Available Cores: 10 112 | Available memory: 32 GB 113 | Elixir 1.17.2 114 | Erlang 26.2.5.2 115 | JIT enabled: true 116 | 117 | Benchmark suite executing with the following configuration: 118 | warmup: 14 s 119 | time: 6 s 120 | memory time: 0 ns 121 | reduction time: 0 ns 122 | parallel: 600 123 | inputs: none specified 124 | Estimated total run time: 1 min 20 s 125 | 126 | Benchmarking ex_rated ... 127 | Benchmarking hammer ... 128 | Benchmarking plug_attack .. 129 | Benchmarking rate_limiter ... 130 | Calculating statistics... 131 | Formatting results... 132 | 133 | Name ips average deviation median 99th % 134 | plug_attack 15.49 K 64.57 μs ±1164.32% 0.63 μs 75.75 μs 135 | rate_limiter 14.43 K 69.32 μs ±1481.80% 2.08 μs 7.63 μs 136 | ex_rated 5.10 K 196.02 μs ±1723.71% 2.21 μs 103.38 μs 137 | hammer 0.60 K 1673.82 μs ±20.82% 1587.92 μs 3502.98 μs 138 | 139 | Comparison: 140 | plug_attack 15.49 K 141 | rate_limiter 14.43 K - 1.07x slower +4.75 μs 142 | ex_rated 5.10 K - 3.04x slower +131.44 μs 143 | hammer 0.60 K - 25.92x slower +1609.25 μs 144 | 145 | Extended statistics: 146 | 147 | Name minimum maximum sample size mode 148 | plug_attack 0.125 μs 33191.21 μs 25.27 M 0.50 μs 149 | rate_limiter 0.38 μs 117571.92 μs 20.21 M 2 μs 150 | ex_rated 0.71 μs 169859.21 μs 8.82 M 1.88 μs 151 | hammer 1405.46 μs 9620.92 μs 2.15 M 1559.75 μs 152 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 7.0.1 - 2025-03-04 4 | 5 | - Fix race condition in Atomic backends during creation of key. 6 | 7 | ## 7.0.0 - 2025-02-06 8 | 9 | - Release candidate for 7.0.0. See [./guides/upgrade-v7.md] for upgrade instructions. 10 | 11 | ## 7.0.0-rc.4 - 2025-01-06 12 | 13 | - Fix Token bucket to respect custom cost 14 | 15 | ## 7.0.0-rc.3 - 2024-12-18 16 | 17 | - Fix regression to support other backends 18 | 19 | ## 7.0.0-rc.2 - 2024-12-17 20 | 21 | - Fix type specs for ETS backends 22 | - Adds Atomic backends and possible algorithms 23 | - Added `:algorithm` option to the Atomic backend with support for: 24 | - `:fix_window` (default) - Fixed time window rate limiting 25 | - `:leaky_bucket` - Constant rate limiting with burst capacity 26 | - `:token_bucket` - Token-based rate limiting with burst capacity 27 | - Add benchmarks file and run them with `bench` 28 | 29 | ## 7.0.0-rc.1 - 2024-12-13 30 | 31 | - Improved API a little more. Should be compatibe with previous RC 32 | - Made ETS backend more flexible with `:algorithm` option 33 | - Added `:key_older_than` option to the ETS backend 34 | - Added `:algorithm` option to the ETS backend with support for: 35 | - `:fix_window` (default) - Fixed time window rate limiting 36 | - `:sliding_window` - Sliding time window for smoother rate limiting 37 | - `:leaky_bucket` - Constant rate limiting with burst capacity 38 | - `:token_bucket` - Token-based rate limiting with burst capacity 39 | 40 | ## 7.0.0-rc.0 - 2024-12-13 41 | 42 | - Breaking change. Completely new API. Consider upgrading if you are experiencing performance or usability problems with Hammer v6. See [./guides/upgrade-v7.md] for upgrade instructions. https://github.com/ExHammer/hammer/pull/104 43 | - Hammer.Plug has been removed. See documentation for using Hammer as a plug in Phoenix. 44 | 45 | ## 6.2.1 - 2024-02-23 46 | 47 | - Fix issue in OTP 26 and Elixir 1.15 by not using to_existing_atom in configuration 48 | 49 | ### Changed 50 | 51 | ## 6.2.0 - 2024-01-31 52 | 53 | - Ensure Elixir version is ~> 1.13 https://github.com/ExHammer/hammer/pull/79. 54 | 55 | ## 6.1.0 - 2022-06-13 56 | 57 | ### Changed 58 | 59 | - Updgrade dependency packages 60 | - Merged https://github.com/ExHammer/hammer/pull/41 resulting in ETC without GenServer (and therefore better performance) 61 | - Merged https://github.com/ExHammer/hammer/pull/46 remove additional whitespace 62 | - Updated Docs based on https://github.com/ExHammer/hammer/pull/45 63 | - Adds CREDITS.md 64 | 65 | ## 6.0.0 - 2018-10-12 66 | 67 | ### Changed 68 | 69 | - Change the `ETS` backend to throw an error if either `expiry_ms` or 70 | `cleanup_interval_ms` config values are missing. This should have been fixed 71 | ages ago. 72 | - Default `:pool_max_overflow` changed to `0`. It's a better default, given 73 | that some users have seen weird errors when using a higher overflow. 74 | In general, capacity should be increased by using a higher `:pool_size` instead 75 | - Changed how the ETS backend does cleanups of data, should be more performant. 76 | 77 | 78 | ## 5.0.0 - 2018-05-18 79 | 80 | ### Added 81 | 82 | - A new `check_rate_inc` function, which allows the caller to specify the 83 | integer with which to increment the bucket by. This is useful for limiting 84 | APIs which have some notion of "cost" per call. 85 | 86 | 87 | ## 4.0.0 - 2018-04-23 88 | 89 | ### Changed 90 | 91 | - Use a worker-pool for the backend (via poolboy), 92 | this avoids bottle-necking all traffic through a single hammer 93 | process, thus improving throughput for the system overall 94 | 95 | ### Added 96 | 97 | - New configuration options for backends: 98 | - `:pool_size`, determines the number of workers in the pool (default 4) 99 | - `:pool_max_overflow`, maximum extra workers to be spawned when the 100 | system is under pressure (default 4) 101 | - Multiple instances of the same backend! You can now have two ETS backends, 102 | fifteen Redis's, whatever you want 103 | 104 | 105 | ## 3.0.0 - 2018-02-20 106 | 107 | ### Changed 108 | 109 | - Require elixir >= 1.6 110 | - Use a more sane supervision tree structure 111 | 112 | 113 | ## 2.1.0 2017-11-25 114 | 115 | ### Changed 116 | 117 | - Add option to use more than one backend 118 | - Add option to suppress all logging 119 | 120 | 121 | ## 2.0.0 - 2017-09-24 122 | 123 | ### Changed 124 | 125 | - New, simpler API 126 | - No longer need to start backend processes manually 127 | - Call `Hammer.check_rate` directly, rather than `use`ing a macro 128 | - Hammer is now an OTP application, configured via `Mix.Config` 129 | 130 | 131 | ## 1.0.0 - 2017-08-22 132 | 133 | ### Added 134 | - Formalise backend API in `Hammer.Backend` behaviour 135 | 136 | 137 | ## 0.2.1 - 2017-08-10 138 | 139 | ### Changed 140 | 141 | - Minor fixes 142 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. 4 | 5 | We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. 6 | 7 | Examples of unacceptable behavior by participants include: 8 | 9 | * The use of sexualized language or imagery 10 | * Personal attacks 11 | * Trolling or insulting/derogatory comments 12 | * Public or private harassment 13 | * Publishing other's private information, such as physical or electronic addresses, without explicit permission 14 | * Other unethical or unprofessional conduct. 15 | 16 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. 17 | 18 | This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. 19 | 20 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers. 21 | 22 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.2.0, available at [https://www.contributor-covenant.org/version/1/2/0/code-of-conduct/](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct/) 23 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Hammer 2 | 3 | Please take a moment to review this document in order to make the contribution 4 | process easy and effective for everyone involved! 5 | Also make sure you read our [Code of Conduct](CODE_OF_CONDUCT.md) that outlines our commitment towards an open and welcoming environment. 6 | 7 | ## Using the issue tracker 8 | 9 | Use the issues tracker for: 10 | 11 | * [Bug reports](#bug-reports) 12 | * [Submitting pull requests](#pull-requests) 13 | 14 | We do our best to keep the issue tracker tidy and organized, making it useful 15 | for everyone. For example, we classify open issues per perceived difficulty, 16 | making it easier for developers to [contribute to Hammer](#pull-requests). 17 | 18 | ## Bug reports 19 | 20 | A bug is either a _demonstrable problem_ that is caused by the code in the repository, 21 | or indicate missing, unclear, or misleading documentation. Good bug reports are extremely 22 | helpful - thank you! 23 | 24 | Guidelines for bug reports: 25 | 26 | 1. **Use the GitHub issue search** — check if the issue has already been 27 | reported. 28 | 29 | 2. **Check if the issue has been fixed** — try to reproduce it using the 30 | `master` branch in the repository. 31 | 32 | 3. **Isolate and report the problem** — ideally create a reduced test 33 | case. 34 | 35 | Please try to be as detailed as possible in your report. Include information about 36 | your Operating System, as well as your Erlang, Elixir and Hammer versions. Please provide steps to 37 | reproduce the issue as well as the outcome you were expecting! All these details 38 | will help developers to fix any potential bugs. 39 | 40 | Example: 41 | 42 | > Short and descriptive example bug report title 43 | > 44 | > A summary of the issue and the environment in which it occurs. If suitable, 45 | > include the steps required to reproduce the bug. 46 | > 47 | > 1. This is the first step 48 | > 2. This is the second step 49 | > 3. Further steps, etc. 50 | > 51 | > `` - a link to the reduced test case (e.g. a GitHub Gist) 52 | > 53 | > Any other information you want to share that is relevant to the issue being 54 | > reported. This might include the lines of code that you have identified as 55 | > causing the bug, and potential solutions (and your opinions on their 56 | > merits). 57 | 58 | ## Contributing Documentation 59 | 60 | Code documentation (`@doc`, `@moduledoc`, `@typedoc`) has a special convention: 61 | the first paragraph is considered to be a short summary. 62 | 63 | For functions, macros and callbacks say what it will do. For example write 64 | something like: 65 | 66 | ```elixir 67 | @doc """ 68 | Marks the given value as HTML safe. 69 | """ 70 | def safe({:safe, value}), do: {:safe, value} 71 | ``` 72 | 73 | For modules, protocols and types say what it is. For example write 74 | something like: 75 | 76 | ```elixir 77 | defmodule MyModule do 78 | @moduledoc """ 79 | Conveniences for working HTML strings and templates. 80 | ... 81 | """ 82 | ``` 83 | 84 | Keep in mind that the first paragraph might show up in a summary somewhere, long 85 | texts in the first paragraph create very ugly summaries. As a rule of thumb 86 | anything longer than 80 characters is too long. 87 | 88 | Try to keep unnecessary details out of the first paragraph, it's only there to 89 | give a user a quick idea of what the documented "thing" does/is. The rest of the 90 | documentation string can contain the details, for example when a value and when 91 | `nil` is returned. 92 | 93 | If possible include examples, preferably in a form that works with doctests. 94 | This makes it easy to test the examples so that they don't go stale and examples 95 | are often a great help in explaining what a function does. 96 | 97 | ## Pull requests 98 | 99 | Good pull requests - patches, improvements, new features - are a fantastic 100 | help. They should remain focused in scope and avoid containing unrelated 101 | commits. 102 | 103 | **IMPORTANT**: By submitting a patch, you agree that your work will be 104 | licensed under the license used by the project. 105 | 106 | If you have any large pull request in mind (e.g. implementing features, 107 | refactoring code, etc), **please ask first** otherwise you risk spending 108 | a lot of time working on something that the project's developers might 109 | not want to merge into the project. 110 | 111 | Please adhere to the coding conventions in the project (indentation, 112 | accurate comments, etc.) and don't forget to add your own tests and 113 | documentation. When working with git, we recommend the following process 114 | in order to craft an excellent pull request: 115 | 116 | 1. [Fork](https://help.github.com/articles/fork-a-repo/) the project, clone your fork, 117 | and configure the remotes: 118 | 119 | ```bash 120 | # Clone your fork of the repo into the current directory 121 | git clone https://github.com//hammer 122 | 123 | # Navigate to the newly cloned directory 124 | cd hammer 125 | 126 | # Assign the original repo to a remote called "upstream" 127 | git remote add upstream https://github.com/ExHammer/hammer 128 | ``` 129 | 130 | 2. If you cloned a while ago, get the latest changes from upstream, and update your fork: 131 | 132 | ```bash 133 | git checkout master 134 | git pull upstream master 135 | git push 136 | ``` 137 | 138 | 3. Create a new topic branch (off of `master`) to contain your feature, change, 139 | or fix. 140 | 141 | **IMPORTANT**: Making changes in `master` is discouraged. You should always 142 | keep your local `master` in sync with upstream `master` and make your 143 | changes in topic branches. 144 | 145 | ```bash 146 | git checkout -b 147 | ``` 148 | 149 | 4. Commit your changes in logical chunks. Keep your commit messages organized, 150 | with a short description in the first line and more detailed information on 151 | the following lines. Feel free to use Git's 152 | [interactive rebase](https://help.github.com/articles/about-git-rebase/) 153 | feature to tidy up your commits before making them public. 154 | 155 | 5. Make sure all the tests are still passing. 156 | 157 | ```bash 158 | mix test 159 | ``` 160 | 161 | 6. Push your topic branch up to your fork: 162 | 163 | ```bash 164 | git push origin 165 | ``` 166 | 167 | 7. [Open a Pull Request](https://help.github.com/articles/about-pull-requests/) 168 | with a clear title and description. 169 | 170 | 8. If you haven't updated your pull request for a while, you should consider 171 | rebasing on master and resolving any conflicts. 172 | 173 | **IMPORTANT**: _Never ever_ merge upstream `master` into your branches. You 174 | should always `git rebase` on `master` to bring your changes up to date when 175 | necessary. 176 | 177 | ```bash 178 | git checkout master 179 | git pull upstream master 180 | git checkout 181 | git rebase master 182 | ``` 183 | 184 | Thank you for your contributions! 185 | 186 | ## Guides 187 | 188 | These Guides aim to be inclusive. We use "we" and "our" instead of "you" and 189 | "your" to foster this sense of inclusion. 190 | 191 | Ideally there is something for everybody in each guide, from beginner to expert. 192 | This is hard, maybe impossible. When we need to compromise, we do so on behalf 193 | of beginning users because expert users have more tools at their disposal to 194 | help themselves. 195 | 196 | The general pattern we use for presenting information is to first introduce a 197 | small, discrete topic, then write a small amount of code to demonstrate the 198 | concept, then verify that the code worked. 199 | 200 | In this way, we build from small, easily digestible concepts into more complex 201 | ones. The shorter this cycle is, as long as the information is still clear and 202 | complete, the better. 203 | 204 | For formatting the guides: 205 | 206 | - We use the `elixir` code fence for all module code. 207 | - We use the `iex` for IEx sessions. 208 | - We use the `console` code fence for shell commands. 209 | - We use the `html` code fence for html templates, even if there is elixir code 210 | in the template. 211 | - We use backticks for filenames and directory paths. 212 | - We use backticks for module names, function names, and variable names. 213 | - Documentation line length should hard wrapped at around 100 characters if possible. 214 | -------------------------------------------------------------------------------- /CREDITS.md: -------------------------------------------------------------------------------- 1 | Special Credits to [June Kelly](https://github.com/JuneKelly) Original creator of the Hammer library 2 | 3 | Thanks to our Contributors as well: 4 | 5 | - [Nikita Sobolev](https://github.com/sobolevn) 6 | - [reallinfo](https://github.com/reallinfo) 7 | - [Ben Smith](https://github.com/slashdotdash) 8 | - [Rob Madole](https://github.com/robmadole) 9 | - [Kian-Meng Ang](https://github.com/kianmeng) 10 | - [Gary Rennie](https://github.com/Gazler) 11 | - [Ross Wilson](https://github.com/rosswilson) 12 | - [Emmanuel Pinault](https://github.com/epinault) 13 | - [ruslandoga](https://github.com/ruslandoga) 14 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | =============== 2 | The MIT License 3 | =============== 4 | 5 | Copyright (c) 2023 June Kelly 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hammer 2 | 3 | [![Build Status](https://github.com/ExHammer/hammer/actions/workflows/ci.yml/badge.svg)](https://github.com/ExHammer/hammer/actions/workflows/ci.yml) 4 | [![Hex.pm](https://img.shields.io/hexpm/v/hammer.svg)](https://hex.pm/packages/hammer) 5 | [![Documentation](https://img.shields.io/badge/documentation-gray)](https://hexdocs.pm/hammer) 6 | [![Total Download](https://img.shields.io/hexpm/dt/hammer.svg)](https://hex.pm/packages/hammer) 7 | [![License](https://img.shields.io/hexpm/l/hammer.svg)](https://github.com/ExHammer/hammer/blob/master/LICENSE.md) 8 | 9 | **Hammer** is a rate-limiter for Elixir with pluggable storage backends. Hammer enables users to set limits on actions performed within specified time intervals, applying per-user or global limits on API requests, file uploads, and more. 10 | 11 | --- 12 | 13 | > [!NOTE] 14 | > 15 | > This README is for the unreleased master branch, please reference the [official documentation on hexdocs](https://hexdocs.pm/hammer) for the latest stable release. 16 | 17 | --- 18 | 19 | ## Installation 20 | 21 | Hammer is [available in Hex](https://hex.pm/packages/hammer). Install by adding `:hammer` to your list of dependencies in `mix.exs`: 22 | 23 | ```elixir 24 | def deps do 25 | [ 26 | {:hammer, "~> 7.0"} 27 | ] 28 | end 29 | ``` 30 | 31 | ## Available Backends 32 | 33 | Atomic backends are single-node rate limiting but will be the fastest option. 34 | 35 | - [Hammer.ETS](https://hexdocs.pm/hammer/Hammer.ETS.html) (default, can be [distributed](./guides/distributed-ets.md)) 36 | - [Hammer.Atomic](https://hexdocs.pm/hammer/Hammer.Atomic.html) 37 | - [Hammer.Redis](https://github.com/ExHammer/hammer-backend-redis) 38 | - [Hammer.Mnesia](https://github.com/ExHammer/hammer-backend-mnesia) 39 | 40 | ## Available Algorithms: 41 | 42 | Each backend supports multiple algorithms. Not all of them are available for all backends. The following table shows which algorithms are available for which backends. 43 | 44 | | Algorithm | Backend | 45 | | --------- | ------- | 46 | | [Hammer.Atomic.FixWindow](https://hexdocs.pm/hammer/Hammer.Atomic.FixWindow.html) | [Hammer.Atomic](https://hexdocs.pm/hammer/Hammer.Atomic.html) | 47 | | [Hammer.Atomic.LeakyBucket](https://hexdocs.pm/hammer/Hammer.Atomic.LeakyBucket.html) | [Hammer.Atomic](https://hexdocs.pm/hammer/Hammer.Atomic.html) | 48 | | [Hammer.Atomic.TokenBucket](https://hexdocs.pm/hammer/Hammer.Atomic.TokenBucket.html) | [Hammer.Atomic](https://hexdocs.pm/hammer/Hammer.Atomic.html) | 49 | | [Hammer.ETS.FixWindow](https://hexdocs.pm/hammer/Hammer.ETS.FixWindow.html) | [Hammer.ETS](https://hexdocs.pm/hammer/Hammer.ETS.html) | 50 | | [Hammer.ETS.LeakyBucket](https://hexdocs.pm/hammer/Hammer.ETS.LeakyBucket.html) | [Hammer.ETS](https://hexdocs.pm/hammer/Hammer.ETS.html) | 51 | | [Hammer.ETS.TokenBucket](https://hexdocs.pm/hammer/Hammer.ETS.TokenBucket.html) | [Hammer.ETS](https://hexdocs.pm/hammer/Hammer.ETS.html) | 52 | | [Hammer.ETS.SlidingWindow](https://hexdocs.pm/hammer/Hammer.ETS.SlidingWindow.html) | [Hammer.ETS](https://hexdocs.pm/hammer/Hammer.ETS.html) | 53 | | [Hammer.Redis.FixedWindow](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.FixedWindow.html) | [Hammer.Redis](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.html) | 54 | | [Hammer.Redis.LeakyBucket](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.LeakyBucket.html) | [Hammer.Redis](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.html) | 55 | | [Hammer.Redis.TokenBucket](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.TokenBucket.html) | [Hammer.Redis](https://hexdocs.pm/hammer_backend_redis/Hammer.Redis.html) | 56 | | [Hammer.Mnesia.FixedWindow](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.FixedWindow.html) | [Hammer.Mnesia](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.html) | 57 | | [Hammer.Mnesia.LeakyBucket](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.LeakyBucket.html) | [Hammer.Mnesia](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.html) | 58 | | [Hammer.Mnesia.TokenBucket](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.TokenBucket.html) | [Hammer.Mnesia](https://hexdocs.pm/hammer_backend_mnesia/Hammer.Mnesia.html) | 59 | 60 | ## Default Algorithm 61 | 62 | By default, Hammer backends use the **fixed window counter** to track actions within set time windows, resetting the count at the start of each new window. For example, with a limit of 10 uploads per minute, a user could upload up to 10 files between 12:00:00 and 12:00:59, and up to 10 more between 12:01:00 and 12:01:59. Notice that the user can upload 20 videos in a second if the uploads are timed at the window edges. If this is an issue, it can be worked around with a "bursty" counter which can be implemented with the current API by making two checks, one for the original interval with the total limit, and one for a shorter interval with a fraction of the limit. That would smooth out the number of requests allowed. 63 | 64 | ## Algorithm Comparison 65 | 66 | Here's a comparison of the different rate limiting algorithms to help you choose: 67 | 68 | ### [Fixed Window](https://hexdocs.pm/hammer/Hammer.Atomic.FixWindow.html) 69 | - Simplest implementation with lowest overhead 70 | - Good for basic rate limiting with clear time boundaries 71 | - Potential edge case: Up to 2x requests possible at window boundaries 72 | - Best for: Basic API limits where occasional bursts are acceptable 73 | 74 | ### [Leaky Bucket](https://hexdocs.pm/hammer/Hammer.Atomic.LeakyBucket.html) 75 | - Provides smooth, consistent request rate 76 | - Requests "leak" out at constant rate 77 | - Good for traffic shaping and steady throughput 78 | - Best for: Network traffic control, queue processing 79 | 80 | ### [Token Bucket](https://hexdocs.pm/hammer/Hammer.Atomic.TokenBucket.html) 81 | - Allows controlled bursts while maintaining average rate 82 | - Tokens regenerate at fixed rate 83 | - More flexible than fixed windows 84 | - Best for: APIs needing burst tolerance, gaming mechanics 85 | 86 | ### [Sliding Window](https://hexdocs.pm/hammer/Hammer.ETS.SlidingWindow.html) 87 | - Most precise rate limiting 88 | - No boundary conditions like fixed windows 89 | - Higher overhead than other algorithms 90 | - Best for: Strict rate enforcement, critical systems 91 | 92 | Selection Guide: 93 | - Need simple implementation? → Fixed Window 94 | - Need smooth output rate? → Leaky Bucket 95 | - Need burst tolerance? → Token Bucket 96 | - Need precise limits? → Sliding Window 97 | 98 | ## How to use Hammer 99 | 100 | - Basic usage is covered in the [Tutorial](https://hexdocs.pm/hammer/tutorial.html). 101 | - Distributed usage is covered in the [Distributed ETS](https://hexdocs.pm/hammer/distributed-ets.html) guide. 102 | 103 | ## The quick start 104 | 105 | - **Limit:** Maximum number of actions allowed in a window. 106 | - **Scale:** Duration of the time window (in milliseconds). 107 | - **Key:** Unique identifier (e.g., user ID) to scope the rate limiting. 108 | 109 | ## Example Usage 110 | 111 | ```elixir 112 | defmodule MyApp.RateLimit do 113 | use Hammer, backend: :ets 114 | end 115 | 116 | MyApp.RateLimit.start_link() 117 | 118 | user_id = 42 119 | key = "upload_video:#{user_id}" 120 | scale = :timer.minutes(1) 121 | limit = 3 122 | 123 | case MyApp.RateLimit.hit(key, scale, limit) do 124 | {:allow, _count} -> 125 | # upload the video 126 | :ok 127 | 128 | {:deny, retry_after} -> 129 | # deny the request 130 | {:error, :rate_limit, _message = "try again in #{retry_after}ms"} 131 | end 132 | ``` 133 | 134 | ## Benchmarks 135 | 136 | See the [BENCHMARKS.md](https://github.com/ExHammer/hammer/blob/master/BENCHMARKS.md) for more details. 137 | 138 | ## Acknowledgements 139 | 140 | Hammer was originally inspired by the [ExRated](https://github.com/grempe/ex_rated) library, by [grempe](https://github.com/grempe). 141 | 142 | ## License 143 | 144 | Copyright (c) 2023 June Kelly 145 | Copyright (c) 2023-2024 See [CONTRIBUTORS.md](https://github.com/ExHammer/hammer/blob/master/CONTRIBUTORS.md) 146 | 147 | This library is MIT licensed. See the [LICENSE](https://github.com/ExHammer/hammer/blob/master/LICENSE.md) for details. 148 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Instructions 2 | 3 | 1. Check related deps for required version bumps and compatibility 4 | 2. Bump version in related files below 5 | 3. Bump external dependency version in related external files below 6 | 4. Run tests: 7 | - `mix test` in the root folder 8 | - `mix credo` in the root folder 9 | 5. Commit, push code 10 | 6. Publish `hammer` packages and docs 11 | 12 | -------------------------------------------------------------------------------- /assets/horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExHammer/hammer/d06a17b6be0166267bf2dc635025c256102f9f7b/assets/horizontal.png -------------------------------------------------------------------------------- /assets/horizontal.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 9 | 13 | 16 | 19 | 23 | 25 | 26 | 27 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExHammer/hammer/d06a17b6be0166267bf2dc635025c256102f9f7b/assets/logo.png -------------------------------------------------------------------------------- /assets/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 9 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /bench/base.exs: -------------------------------------------------------------------------------- 1 | # MIX_ENV=bench LIMIT=1 SCALE=5000 RANGE=10000 PARALLEL=500 mix run bench/basic.exs 2 | # inspired from https://github.com/PragTob/rate_limit/blob/master/bench/basic.exs 3 | profile? = !!System.get_env("PROFILE") 4 | parallel = String.to_integer(System.get_env("PARALLEL", "1")) 5 | limit = String.to_integer(System.get_env("LIMIT", "1000000")) 6 | scale = String.to_integer(System.get_env("SCALE", "60000")) 7 | range = String.to_integer(System.get_env("RANGE", "1_000")) 8 | 9 | IO.puts(""" 10 | parallel: #{parallel} 11 | limit: #{limit} 12 | scale: #{scale} 13 | range: #{range} 14 | """) 15 | 16 | # TODO: clean up ETS table before/after each scenario 17 | defmodule ETSFixWindowRateLimiter do 18 | use Hammer, backend: :ets, algorithm: :fix_window 19 | end 20 | 21 | defmodule ETSSlidingWindowRateLimiter do 22 | use Hammer, backend: :ets, algorithm: :sliding_window 23 | end 24 | 25 | defmodule ETSLeakyBucketRateLimiter do 26 | use Hammer, backend: :ets, algorithm: :leaky_bucket 27 | end 28 | 29 | defmodule ETSTokenBucketRateLimiter do 30 | use Hammer, backend: :ets, algorithm: :token_bucket 31 | end 32 | 33 | defmodule AtomicFixWindowRateLimiter do 34 | use Hammer, backend: :atomic, algorithm: :fix_window 35 | end 36 | 37 | defmodule AtomicTokenBucketRateLimiter do 38 | use Hammer, backend: :atomic, algorithm: :token_bucket 39 | end 40 | 41 | defmodule AtomicLeakyBucketRateLimiter do 42 | use Hammer, backend: :atomic, algorithm: :leaky_bucket 43 | end 44 | 45 | PlugAttack.Storage.Ets.start_link(:plug_attack_sites, clean_period: :timer.minutes(10)) 46 | 47 | ETSFixWindowRateLimiter.start_link(clean_period: :timer.minutes(10)) 48 | ETSSlidingWindowRateLimiter.start_link(clean_period: :timer.minutes(10)) 49 | ETSTokenBucketRateLimiter.start_link(clean_period: :timer.minutes(10)) 50 | ETSLeakyBucketRateLimiter.start_link(clean_period: :timer.minutes(10)) 51 | AtomicFixWindowRateLimiter.start_link(clean_period: :timer.minutes(10)) 52 | AtomicTokenBucketRateLimiter.start_link(clean_period: :timer.minutes(10)) 53 | AtomicLeakyBucketRateLimiter.start_link(clean_period: :timer.minutes(10)) 54 | 55 | Benchee.run( 56 | %{ 57 | "hammer_sliding_window" => fn key -> ETSSlidingWindowRateLimiter.hit("sites:#{key}", scale, limit) end, 58 | "hammer_fix_window" => fn key -> ETSFixWindowRateLimiter.hit("sites:#{key}", scale, limit) end, 59 | "hammer_leaky_bucket" => fn key -> ETSLeakyBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 60 | "hammer_token_bucket" => fn key -> ETSTokenBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 61 | "hammer_atomic_fix_window" => fn key -> AtomicFixWindowRateLimiter.hit("sites:#{key}", scale, limit) end, 62 | "hammer_atomic_token_bucket" => fn key -> AtomicTokenBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 63 | "hammer_atomic_leaky_bucket" => fn key -> AtomicLeakyBucketRateLimiter.hit("sites:#{key}", scale, limit) end, 64 | "plug_attack" => fn key -> 65 | PlugAttack.Rule.throttle(_key = key, 66 | storage: {PlugAttack.Storage.Ets, :plug_attack_sites}, 67 | limit: limit, 68 | period: scale 69 | ) 70 | end, 71 | "ex_rated" => fn key -> ExRated.check_rate("sites:#{key}", scale, limit) end, 72 | "rate_limiter" => fn key -> 73 | key = "sites:#{key}" 74 | rate_limiter = RateLimiter.get(key) || RateLimiter.new(key, scale, limit) 75 | RateLimiter.hit(rate_limiter) 76 | end 77 | }, 78 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}], 79 | before_each: fn _ -> :rand.uniform(range) end, 80 | print: [fast_warning: false], 81 | time: 6, 82 | # fill the table with some data 83 | warmup: 14, 84 | profile_after: profile?, 85 | parallel: parallel 86 | ) 87 | -------------------------------------------------------------------------------- /guides/Tutorial.md: -------------------------------------------------------------------------------- 1 | # Tutorial 2 | 3 | Hammer is a rate limiting library for Elixir that can help you control the frequency of specific actions in your application, such as limiting API requests, login attempts, or file uploads. This tutorial will guide you through setting up Hammer, defining a rate limiter, and applying rate limiting in your app. 4 | 5 | ## Installation 6 | 7 | Add Hammer as a dependency in `mix.exs`: 8 | 9 | ```elixir 10 | def deps do 11 | [{:hammer, "~> 7.0.0"}] 12 | end 13 | ``` 14 | 15 | Then, run: 16 | 17 | ```console 18 | $ mix deps.get 19 | ``` 20 | 21 | ## Core Concepts 22 | 23 | When rate-limiting an action, you specify a maximum number of allowed occurrences (the limit) within a certain time frame (the scale). For example, you might allow only 5 login attempts per minute for each user. The limit is typically enforced based on a unique identifier (like a user ID or IP address) but can also be applied globally. 24 | 25 | In Hammer: 26 | - `limit` is the maximum number of actions permitted. 27 | - `scale` is the time period (in milliseconds) for that limit. 28 | - `key` is a unique identifier for the rate limit, combining the action name with a user identifier (like "login_attempt:42" for user 42) is a common approach. 29 | 30 | Hammer uses a fixed window counter approach. It divides time into fixed-size windows of `scale` size and counts the number of requests in each window, blocking any requests that exceed the `limit`. 31 | 32 | ## Usage 33 | 34 | To use Hammer, you need to: 35 | 36 | - Define a rate limiter module. 37 | - Add the Hammer backend to your application's supervision tree. 38 | 39 | In this example, we'll use the Hammer.ETS backend, which stores data in an in-memory ETS table. 40 | 41 | ### Step 1: Define a Rate Limiter 42 | 43 | First, define a rate limiter module in your application. Use the `Hammer` module with your chosen backend and configure options as needed: 44 | 45 | ```elixir 46 | defmodule MyApp.RateLimit do 47 | use Hammer, backend: :ets 48 | end 49 | ``` 50 | 51 | Here: 52 | - `:backend` specifies the storage backend (`:ets` for in-memory storage, `Hammer.Redis` for Redis, etc.). 53 | 54 | ### Step 2: Start the Rate Limiter 55 | 56 | Add the rate limiter to your application's supervision tree or start it manually by calling `MyApp.RateLimit.start_link/1` with any runtime options: 57 | 58 | ```elixir 59 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 60 | ``` 61 | 62 | - `:clean_period` is an optional parameter for `:ets` backend that specifies how often to clean expired buckets in the ETS table. 63 | 64 | ## Using the Rate Limiter 65 | 66 | With the rate limiter running, you can use `hit/3` or `hit/4` to enforce rate limits. 67 | 68 | ### Example: Basic Rate Limit Check 69 | 70 | Suppose you want to limit file uploads to 10 per minute per user. 71 | 72 | ```elixir 73 | user_id = 42 74 | key = "upload_file:#{user_id}" 75 | scale = :timer.minutes(1) 76 | limit = 10 77 | 78 | case MyApp.RateLimit.hit(key, scale, limit) do 79 | {:allow, _current_count} -> # proceed with file upload 80 | {:deny, _ms_until_next_window} -> # deny the request 81 | end 82 | ``` 83 | 84 | ### Customizing Rate Increments 85 | 86 | If you want to specify a custom increment—useful when each action has a "cost"—you can use `hit/4`. Here's an example for a bulk upload scenario: 87 | 88 | ```elixir 89 | user_id = 42 90 | key = "upload_file:#{user_id}" 91 | scale = :timer.minutes(1) 92 | limit = 10 93 | number_of_files = 3 94 | 95 | case MyApp.RateLimit.hit(key, scale, limit, number_of_files) do 96 | {:allow, _current_count} -> # upload all files 97 | {:deny, _ms_until_next_window} -> # deny the request 98 | end 99 | ``` 100 | ## Using Hammer as a Plug in Phoenix 101 | 102 | you can easily use Hammer as a plug by using the controller plug in Phoenix: 103 | 104 | ```elixir 105 | plug :rate_limit_videos when action in ... 106 | 107 | defp rate_limit_videos(conn, _opts) do 108 | user_id = conn.assigns.current_user.id 109 | key = "videos:#{user_id}" 110 | scale = :timer.minutes(1) 111 | limit = 10 112 | 113 | case MyApp.RateLimit.hit(key, scale, limit) do 114 | {:allow, _count} -> 115 | conn 116 | 117 | {:deny, retry_after} -> 118 | conn 119 | |> put_resp_header("retry-after", Integer.to_string(div(retry_after, 1000))) 120 | |> send_resp(429, []) 121 | |> halt() 122 | end 123 | end 124 | ``` 125 | 126 | Or you could add it to your endpoint: 127 | 128 | ```elixir 129 | defmodule MyAppWeb.Endpoint do 130 | use Phoenix.Endpoint 131 | 132 | plug RemoteIP 133 | plug :rate_limit 134 | 135 | # ... 136 | 137 | defp rate_limit(conn, _opts) do 138 | key = "web_requests:#{:inet.ntoa(conn.remote_ip)}" 139 | scale = :timer.minutes(1) 140 | limit = 1000 141 | 142 | case MyApp.RateLimit.hit(key, scale, limit) do 143 | {:allow, _count} -> 144 | conn 145 | 146 | {:deny, retry_after} -> 147 | retry_after_seconds = div(retry_after, 1000) 148 | 149 | conn 150 | |> put_resp_header("retry-after", Integer.to_string(retry_after_seconds)) 151 | |> send_resp(429, []) 152 | |> halt() 153 | end 154 | end 155 | end 156 | ``` 157 | 158 | ## Using Hammer with Redis 159 | 160 | To persist rate-limiting data across multiple nodes, you can use the Redis backend. Install the `Hammer.Redis` backend and update your rate limiter configuration: 161 | 162 | ```elixir 163 | defmodule MyApp.RateLimit do 164 | use Hammer, backend: Hammer.Redis 165 | end 166 | ``` 167 | 168 | Then, start the rate limiter pool with Redis configuration: 169 | 170 | ```elixir 171 | MyApp.RateLimit.start_link(host: "redix.myapp.com") 172 | ``` 173 | 174 | Configuration options are the same as [Redix](https://hexdocs.pm/redix/Redix.html#start_link/1), except for `:name`, which comes from the module definition. 175 | -------------------------------------------------------------------------------- /guides/distributed-ets.md: -------------------------------------------------------------------------------- 1 | # Distributed Rate Limiter with ETS Backend 2 | 3 | This example implements a distributed, eventually consistent rate limiter using Phoenix.PubSub for broadcasting each hit across nodes and a local ETS backend to manage rate-limiting counters. This setup is useful when you need to limit the number of actions (e.g., requests) across multiple nodes in a cluster. 4 | 5 | Based on [HexpmWeb.RateLimitPubSub.](https://github.com/hexpm/hexpm/blob/main/lib/hexpm_web/rate_limit_pub_sub.ex) 6 | 7 | ```elixir 8 | defmodule MyApp.RateLimit do 9 | @moduledoc """ 10 | Distributed, eventually consistent rate limiter using `Phoenix.PubSub` and `Hammer`. 11 | 12 | This module provides a rate-limiting mechanism for requests using a distributed, 13 | eventually consistent approach. It combines local in-memory counting with a 14 | broadcasting mechanism to keep counters in sync across nodes in a cluster. 15 | """ 16 | 17 | # Checks rate locally and broadcasts the hit to other nodes to synchronize. 18 | def hit(key, scale, limit, increment \\ 1) do 19 | :ok = broadcast({:inc, key, scale, increment}) 20 | Local.hit(key, scale, limit, increment) 21 | end 22 | 23 | defmodule Local do 24 | @moduledoc false 25 | use Hammer, backend: :ets 26 | # This inner module handles local hit counting via Hammer with ETS as a backend. 27 | end 28 | 29 | defmodule Listener do 30 | @moduledoc false 31 | use GenServer 32 | 33 | # Starts the listener process, subscribing to the specified PubSub topic. 34 | # This process will listen for `:inc` messages to keep local counters in sync. 35 | 36 | @doc false 37 | def start_link(opts) do 38 | pubsub = Keyword.fetch!(opts, :pubsub) 39 | topic = Keyword.fetch!(opts, :topic) 40 | GenServer.start_link(__MODULE__, {pubsub, topic}) 41 | end 42 | 43 | @impl true 44 | def init({pubsub, topic}) do 45 | :ok = Phoenix.PubSub.subscribe(pubsub, topic) 46 | {:ok, []} 47 | end 48 | 49 | # Handles remote `:inc` messages by updating the local counter. 50 | 51 | @impl true 52 | def handle_info({:inc, key, scale, increment}, state) do 53 | _count = Local.inc(key, scale, increment) 54 | {:noreply, state} 55 | end 56 | end 57 | 58 | @pubsub MyApp.PubSub 59 | @topic "__ratelimit" 60 | 61 | # Sends a message to other nodes in the cluster to synchronize rate-limiting information. 62 | defp broadcast(message) do 63 | {:ok, {Phoenix.PubSub.PG2, adapter_name}} = Registry.meta(@pubsub, :pubsub) 64 | adapter_name.broadcast(adapter_name, @topic, message) 65 | end 66 | 67 | def child_spec(opts) do 68 | %{ 69 | id: __MODULE__, 70 | start: {__MODULE__, :start_link, [opts]}, 71 | type: :supervisor 72 | } 73 | end 74 | 75 | # Wraps the local Hammer counter and the listener processes under a single supervisor. 76 | def start_link(opts) do 77 | children = [{Local, opts}, {Listener, pubsub: @pubsub, topic: @topic}] 78 | Supervisor.start_link(children, strategy: :one_for_one) 79 | end 80 | end 81 | ``` 82 | 83 | To be covered: 84 | - new nodes joining have empty counters 85 | - net split recovery 86 | - dns_cluster 87 | -------------------------------------------------------------------------------- /guides/upgrade-v7.md: -------------------------------------------------------------------------------- 1 | # Upgrading to Hammer V7 2 | 3 | ## Elixir and Erlang/OTP Compatibility 4 | 5 | * Hammer v7 requires Elixir 1.14 and Erlang/OTP 25 at a minimum. 6 | * We recommend using the latest Elixir and Erlang/OTP versions. 7 | 8 | ## Changes to your Project 9 | 10 | * Update your `mix.exs` to depend on version `7.0.0` of Hammer. 11 | 12 | ```elixir 13 | def deps do 14 | [ 15 | ... 16 | {:hammer, "~> 7.0.0"} 17 | ... 18 | ] 19 | end 20 | ``` 21 | 22 | ## Define a Rate Limiter 23 | 24 | First, define a rate limiter module in your application. Use the `Hammer` module with your chosen backend and configure options as needed: 25 | 26 | ```elixir 27 | defmodule MyApp.RateLimit do 28 | use Hammer, backend: :ets 29 | end 30 | ``` 31 | 32 | This would setup the rate limiter using the `Hammer.ETS` backend. See the [Tutorial](./Tutorial.md) guide for more information on other backends. 33 | 34 | ## Update your Application Supervisor 35 | 36 | * Pick up the value in your config file for `cleanup_interval_ms`. 37 | * remove the `config` lines for `Hammer` as they are no longer needed in all of the `config/*.exs` files. 38 | * In your `application.ex` file, add the following line to start the rate limiter: 39 | 40 | ```elixir 41 | def start(_type, _args) do 42 | 43 | children = [ 44 | ... 45 | {MyApp.RateLimit, [clean_period: 60_000]} 46 | ... 47 | ] 48 | 49 | Supervisor.start_link(children, strategy: :one_for_one) 50 | end 51 | ``` 52 | 53 | ## Change to Backend Configuration 54 | 55 | * Remapped all the `Hammer.check_rate/3` and `Hammer.check_rate/4` to `Hammer.hit/3`. 56 | * Remapped all the `Hammer.check_rate_inc/4` and `Hammer.check_rate_inc/5` to `Hammer.hit/4`. 57 | * for the `Hammer.delete_buckets`, you need to remove them as there no true replacement. You could potentially use `Hammer.ETS.set/1` to reset specific key 58 | * for the `Hammer.make_rate_checker`, you need to remove them as there no replacement. 59 | 60 | ## Changes to the Hammer.Plug 61 | 62 | * The `Hammer.Plug` has been removed. Remove any references to it in your code. 63 | * Migrate it by using regular Phoenix plugs in either a controller plug or an endpoint plug. See the [Tutorial](./Tutorial.md) guide for some examples. 64 | -------------------------------------------------------------------------------- /lib/hammer.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer do 2 | @moduledoc """ 3 | Hammer is a rate-limiting library for Elixir. 4 | 5 | It provides a simple way for creating rate limiters, and comes with a built-in ETS backend. 6 | 7 | defmodule MyApp.RateLimit do 8 | use Hammer, backend: :ets 9 | end 10 | 11 | # Start the rate limiter, in case of ETS it will create the ETS table and schedule cleanups 12 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(10)) 13 | 14 | # Check the rate limit allowing 10 requests per second 15 | MyApp.RateLimit.hit("some-key", _scale = :timer.seconds(1), _limit = 10) 16 | """ 17 | 18 | @type key :: term 19 | @type scale :: pos_integer 20 | @type limit :: pos_integer 21 | @type count :: pos_integer 22 | @type increment :: non_neg_integer 23 | 24 | @doc """ 25 | Checks if a key is allowed to perform an action, and increment the counter. 26 | 27 | Same as `hit/4` with `increment` set to 1. 28 | """ 29 | @callback hit(key, scale, limit) :: {:allow, count} | {:deny, timeout} 30 | 31 | @doc """ 32 | Optional callback to check if a key is allowed to perform an action, and increment the counter. 33 | 34 | Returns `{:allow, count}` if the action is allowed, or `{:deny, timeout}` if the action is denied. 35 | 36 | This is the only required callback. 37 | """ 38 | @callback hit(key, scale, limit, increment) :: {:allow, count} | {:deny, timeout} 39 | 40 | @doc """ 41 | Same as `inc/3` with `increment` set to 1. 42 | """ 43 | @callback inc(key, scale) :: count 44 | 45 | @doc """ 46 | Optional callback for incrementing a counter value for a kit without performing limit check. 47 | 48 | Returns the new counter value. 49 | """ 50 | @callback inc(key, scale, increment) :: count 51 | 52 | @doc """ 53 | Optional callback for setting the counter value for a key. 54 | 55 | Returns the new counter value. 56 | """ 57 | @callback set(key, scale, count) :: count 58 | 59 | @doc """ 60 | Optional callback for getting the counter value for a key. 61 | 62 | Returns the current counter value. 63 | """ 64 | @callback get(key, scale) :: count 65 | 66 | @optional_callbacks hit: 4, inc: 2, inc: 3, set: 3, get: 2 67 | 68 | @doc """ 69 | Use the Hammer library in a module to create a rate limiter. 70 | 71 | defmodule MyApp.RateLimit do 72 | use Hammer, backend: :ets 73 | end 74 | 75 | """ 76 | defmacro __using__(opts) do 77 | quote bind_quoted: [opts: opts] do 78 | @behaviour Hammer 79 | @hammer_opts opts 80 | 81 | backend = 82 | Keyword.get(opts, :backend) || 83 | raise ArgumentError, """ 84 | Hammer requires a backend to be specified. Example: 85 | 86 | use Hammer, backend: :ets 87 | """ 88 | 89 | # this allows :ets to be aliased to Hammer.ETS 90 | backend = 91 | case backend do 92 | :ets -> Hammer.ETS 93 | :atomic -> Hammer.Atomic 94 | backend -> backend 95 | end 96 | 97 | @before_compile backend 98 | end 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /lib/hammer/atomic.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic do 2 | @moduledoc """ 3 | A rate limiter implementation using Erlang's :atomics module for atomic counters. 4 | 5 | This provides fast, atomic counter operations without the overhead of ETS or process messaging. 6 | Requires Erlang/OTP 21.2 or later. 7 | 8 | The atomic backend supports the following algorithms: 9 | 10 | - `:fix_window` - Fixed window rate limiting (default) 11 | Simple counting within fixed time windows. See [Hammer.Atomic.FixWindow](Hammer.Atomic.FixWindow.html) for more details. 12 | 13 | - `:leaky_bucket` - Leaky bucket rate limiting 14 | Smooth rate limiting with a fixed rate of tokens. See [Hammer.Atomic.LeakyBucket](Hammer.Atomic.LeakyBucket.html) for more details. 15 | 16 | - `:token_bucket` - Token bucket rate limiting 17 | Flexible rate limiting with bursting capability. See [Hammer.Atomic.TokenBucket](Hammer.Atomic.TokenBucket.html) for more details. 18 | """ 19 | 20 | use GenServer 21 | require Logger 22 | 23 | @type start_option :: 24 | {:clean_period, pos_integer()} 25 | | {:key_older_than, pos_integer()} 26 | | GenServer.option() 27 | 28 | @type config :: %{ 29 | table: atom(), 30 | table_opts: list(), 31 | clean_period: pos_integer(), 32 | key_older_than: pos_integer(), 33 | algorithm: module() 34 | } 35 | 36 | # credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity 37 | defmacro __before_compile__(%{module: module}) do 38 | hammer_opts = Module.get_attribute(module, :hammer_opts) 39 | 40 | algorithm = 41 | case Keyword.get(hammer_opts, :algorithm) do 42 | nil -> 43 | Hammer.Atomic.FixWindow 44 | 45 | :atomic -> 46 | Hammer.Atomic.FixWindow 47 | 48 | :fix_window -> 49 | Hammer.Atomic.FixWindow 50 | 51 | :sliding_window -> 52 | Hammer.Atomic.SlidingWindow 53 | 54 | :leaky_bucket -> 55 | Hammer.Atomic.LeakyBucket 56 | 57 | :token_bucket -> 58 | Hammer.Atomic.TokenBucket 59 | 60 | _module -> 61 | raise ArgumentError, """ 62 | Hammer requires a valid backend to be specified. Must be one of: :atomic, :fix_window, :sliding_window, :leaky_bucket, :token_bucket. 63 | If none is specified, :fix_window is used. 64 | 65 | Example: 66 | 67 | use Hammer, backend: :atomic 68 | """ 69 | end 70 | 71 | Code.ensure_loaded!(algorithm) 72 | 73 | quote do 74 | @table __MODULE__ 75 | @algorithm unquote(algorithm) 76 | 77 | def child_spec(opts) do 78 | %{id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :worker} 79 | end 80 | 81 | def start_link(opts) do 82 | opts = Keyword.put(opts, :table, @table) 83 | opts = Keyword.put_new(opts, :clean_period, :timer.minutes(1)) 84 | opts = Keyword.put_new(opts, :algorithm, @algorithm) 85 | Hammer.Atomic.start_link(opts) 86 | end 87 | 88 | if function_exported?(@algorithm, :hit, 4) do 89 | def hit(key, scale, limit) do 90 | @algorithm.hit(@table, key, scale, limit) 91 | end 92 | end 93 | 94 | if function_exported?(@algorithm, :hit, 5) do 95 | def hit(key, scale, limit, increment \\ 1) do 96 | @algorithm.hit(@table, key, scale, limit, increment) 97 | end 98 | end 99 | 100 | if function_exported?(@algorithm, :inc, 4) do 101 | def inc(key, scale, increment \\ 1) do 102 | @algorithm.inc(@table, key, scale, increment) 103 | end 104 | end 105 | 106 | if function_exported?(@algorithm, :set, 4) do 107 | def set(key, scale, count) do 108 | @algorithm.set(@table, key, scale, count) 109 | end 110 | end 111 | 112 | if function_exported?(@algorithm, :get, 3) do 113 | def get(key, scale) do 114 | @algorithm.get(@table, key, scale) 115 | end 116 | end 117 | 118 | if function_exported?(@algorithm, :get, 2) do 119 | def get(key, scale) do 120 | @algorithm.get(@table, key) 121 | end 122 | end 123 | end 124 | end 125 | 126 | @doc """ 127 | Starts the atomic rate limiter process. 128 | 129 | Options: 130 | - `:clean_period` - How often to run cleanup (ms). Default 1 minute. 131 | - `:key_older_than` - Max age for entries (ms). Default 24 hours. 132 | """ 133 | @spec start_link([start_option]) :: GenServer.on_start() 134 | def start_link(opts) do 135 | {gen_opts, opts} = Keyword.split(opts, [:debug, :spawn_opt, :hibernate_after]) 136 | 137 | {clean_period, opts} = Keyword.pop!(opts, :clean_period) 138 | {table, opts} = Keyword.pop!(opts, :table) 139 | {algorithm, opts} = Keyword.pop!(opts, :algorithm) 140 | {key_older_than, opts} = Keyword.pop(opts, :key_older_than, :timer.hours(24)) 141 | 142 | case opts do 143 | [] -> 144 | :ok 145 | 146 | _ -> 147 | Logger.warning( 148 | "Unrecognized options passed to Hammer.Atomic.start_link/1: #{inspect(opts)}" 149 | ) 150 | end 151 | 152 | config = %{ 153 | table: table, 154 | table_opts: algorithm.ets_opts(), 155 | clean_period: clean_period, 156 | key_older_than: key_older_than, 157 | algorithm: algorithm 158 | } 159 | 160 | GenServer.start_link(__MODULE__, config, gen_opts) 161 | end 162 | 163 | @impl GenServer 164 | def init(config) do 165 | :ets.new(config.table, config.table_opts) 166 | 167 | schedule(config.clean_period) 168 | {:ok, config} 169 | end 170 | 171 | @doc """ 172 | Returns the current time in milliseconds. 173 | """ 174 | @spec now() :: pos_integer() 175 | @compile inline: [now: 0] 176 | def now do 177 | System.system_time(:millisecond) 178 | end 179 | 180 | @impl GenServer 181 | def handle_info(:clean, config) do 182 | clean(config) 183 | 184 | schedule(config.clean_period) 185 | {:noreply, config} 186 | end 187 | 188 | defp clean(config) do 189 | table = config.table 190 | 191 | now = now() 192 | 193 | :ets.foldl( 194 | fn {_key, atomic} = term, deleted -> 195 | expires_at = :atomics.get(atomic, 2) 196 | 197 | if now - expires_at > config.key_older_than do 198 | :ets.delete_object(table, term) 199 | deleted + 1 200 | else 201 | deleted 202 | end 203 | end, 204 | 0, 205 | table 206 | ) 207 | end 208 | 209 | defp schedule(clean_period) do 210 | Process.send_after(self(), :clean, clean_period) 211 | end 212 | end 213 | -------------------------------------------------------------------------------- /lib/hammer/atomic/fix_window.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.FixWindow do 2 | @moduledoc """ 3 | This module implements the Fix Window algorithm. 4 | 5 | The fixed window algorithm works by dividing time into fixed intervals or "windows" 6 | of a specified duration (scale). Each window tracks request counts independently. 7 | 8 | For example, with a 60 second window: 9 | - Window 1: 0-60 seconds 10 | - Window 2: 60-120 seconds 11 | - And so on... 12 | 13 | ## The algorithm: 14 | 1. When a request comes in, we: 15 | - Calculate which window it belongs to based on current time 16 | - Increment the counter for that window 17 | - Store expiration time as end of window 18 | 2. To check if rate limit is exceeded: 19 | - If count <= limit: allow request 20 | - If count > limit: deny and return time until window expires 21 | 3. Old windows are automatically cleaned up after expiration 22 | 23 | This provides simple rate limiting but has edge cases where a burst of requests 24 | spanning a window boundary could allow up to 2x the limit in a short period. 25 | For more precise limiting, consider using the sliding window algorithm instead. 26 | 27 | The fixed window algorithm is a good choice when: 28 | 29 | - You need simple, predictable rate limiting with clear time boundaries 30 | - The exact precision of the rate limit is not critical 31 | - You want efficient implementation with minimal storage overhead 32 | - Your use case can tolerate potential bursts at window boundaries 33 | 34 | ## Common use cases include: 35 | 36 | - Basic API rate limiting where occasional bursts are acceptable 37 | - Protecting backend services from excessive load 38 | - Implementing fair usage policies 39 | - Scenarios where clear time-based quotas are desired (e.g. "100 requests per minute") 40 | 41 | The main tradeoff is that requests near window boundaries can allow up to 2x the 42 | intended limit in a short period. For example with a limit of 100 per minute: 43 | - 100 requests at 11:59:59 44 | - Another 100 requests at 12:00:01 45 | 46 | This results in 200 requests in 2 seconds, while still being within limits. 47 | If this behavior is problematic, consider using the sliding window algorithm instead. 48 | 49 | The fixed window algorithm supports the following options: 50 | 51 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 52 | Defaults to 1 minute. The cleanup process removes expired window entries. 53 | 54 | ## Example 55 | 56 | ### Example configuration: 57 | 58 | MyApp.RateLimit.start_link( 59 | clean_period: :timer.minutes(5), 60 | ) 61 | 62 | This would run cleanup every 5 minutes and clean up old windows. 63 | 64 | ### Example usage: 65 | 66 | defmodule MyApp.RateLimit do 67 | use Hammer, backend: :atomic, algorithm: :fix_window 68 | end 69 | 70 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 71 | 72 | # Allow 10 requests per second 73 | MyApp.RateLimit.hit("user_123", 1000, 10) 74 | """ 75 | alias Hammer.Atomic 76 | @doc false 77 | @spec ets_opts() :: list() 78 | def ets_opts do 79 | [ 80 | :named_table, 81 | :set, 82 | :public, 83 | {:read_concurrency, true}, 84 | {:write_concurrency, true}, 85 | {:decentralized_counters, true} 86 | ] 87 | end 88 | 89 | @doc """ 90 | Checks if a key is allowed to perform an action based on the fixed window algorithm. 91 | """ 92 | @spec hit( 93 | table :: atom(), 94 | key :: String.t(), 95 | scale :: pos_integer(), 96 | limit :: pos_integer(), 97 | increment :: pos_integer() 98 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 99 | def hit(table, key, scale, limit, increment) do 100 | now = Atomic.now() 101 | window = div(now, scale) 102 | full_key = {key, window} 103 | expires_at = (window + 1) * scale 104 | 105 | case :ets.lookup(table, full_key) do 106 | [{_, atomic}] -> 107 | count = :atomics.add_get(atomic, 1, increment) 108 | :atomics.exchange(atomic, 2, expires_at) 109 | 110 | if count <= limit do 111 | {:allow, count} 112 | else 113 | {:deny, expires_at - now} 114 | end 115 | 116 | [] -> 117 | :ets.insert_new(table, {full_key, :atomics.new(2, signed: false)}) 118 | hit(table, key, scale, limit, increment) 119 | end 120 | end 121 | 122 | @doc """ 123 | Increments the counter for a given key in the fixed window algorithm. 124 | """ 125 | @spec inc( 126 | table :: atom(), 127 | key :: String.t(), 128 | scale :: pos_integer(), 129 | increment :: pos_integer() 130 | ) :: 131 | non_neg_integer() 132 | def inc(table, key, scale, increment) do 133 | window = div(Atomic.now(), scale) 134 | full_key = {key, window} 135 | expires_at = (window + 1) * scale 136 | 137 | case :ets.lookup(table, full_key) do 138 | [{_, atomic}] -> 139 | :atomics.exchange(atomic, 2, expires_at) 140 | :atomics.add_get(atomic, 1, increment) 141 | 142 | [] -> 143 | :ets.insert(table, {full_key, :atomics.new(2, signed: false)}) 144 | inc(table, key, scale, increment) 145 | end 146 | end 147 | 148 | @doc """ 149 | Sets the counter for a given key in the fixed window algorithm. 150 | """ 151 | @spec set( 152 | table :: atom(), 153 | key :: String.t(), 154 | scale :: pos_integer(), 155 | count :: non_neg_integer() 156 | ) :: 157 | non_neg_integer() 158 | def set(table, key, scale, count) do 159 | window = div(Atomic.now(), scale) 160 | full_key = {key, window} 161 | expires_at = (window + 1) * scale 162 | 163 | case :ets.lookup(table, full_key) do 164 | [{_, atomic}] -> 165 | :atomics.exchange(atomic, 2, expires_at) 166 | :atomics.exchange(atomic, 1, count) 167 | count 168 | 169 | [] -> 170 | :ets.insert(table, {full_key, :atomics.new(2, signed: false)}) 171 | set(table, key, scale, count) 172 | end 173 | end 174 | 175 | @doc """ 176 | Returns the count of requests for a given key within the last seconds. 177 | """ 178 | @spec get(table :: atom(), key :: String.t(), scale :: pos_integer()) :: non_neg_integer() 179 | def get(table, key, scale) do 180 | window = div(Atomic.now(), scale) 181 | full_key = {key, window} 182 | 183 | case :ets.lookup(table, full_key) do 184 | [{_, atomic}] -> :atomics.get(atomic, 1) 185 | [] -> 0 186 | end 187 | end 188 | end 189 | -------------------------------------------------------------------------------- /lib/hammer/atomic/leaky_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.LeakyBucket do 2 | @moduledoc """ 3 | This module implements the Leaky Bucket algorithm. 4 | 5 | The leaky bucket algorithm works by modeling a bucket that: 6 | - Fills up with requests at the input rate 7 | - "Leaks" requests at a constant rate 8 | - Has a maximum capacity (the bucket size) 9 | 10 | For example, with a leak rate of 10 requests/second and bucket size of 100: 11 | - Requests add to the bucket's current level 12 | - The bucket leaks 10 requests per second steadily 13 | - If bucket reaches capacity (100), new requests are denied 14 | - Once bucket level drops, new requests are allowed again 15 | 16 | ## The algorithm: 17 | 1. When a request comes in, we: 18 | - Calculate how much has leaked since last request 19 | - Subtract leaked amount from current bucket level 20 | - Try to add new request to bucket 21 | - Store new bucket level and timestamp 22 | 2. To check if rate limit is exceeded: 23 | - If new bucket level <= capacity: allow request 24 | - If new bucket level > capacity: deny and return time until enough leaks 25 | 3. Old entries are automatically cleaned up after expiration 26 | 27 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 28 | The leaky bucket is a good choice when: 29 | 30 | - You need to enforce a constant processing rate 31 | - Want to allow temporary bursts within bucket capacity 32 | - Need to smooth out traffic spikes 33 | - Want to prevent resource exhaustion 34 | 35 | ## Common use cases include: 36 | 37 | - API rate limiting needing consistent throughput 38 | - Network traffic shaping 39 | - Service protection from sudden load spikes 40 | - Queue processing rate control 41 | - Scenarios needing both burst tolerance and steady-state limits 42 | 43 | The main advantages are: 44 | - Smooth, predictable output rate 45 | - Configurable burst tolerance 46 | - Natural queueing behavior 47 | 48 | The tradeoffs are: 49 | - More complex implementation than fixed windows 50 | - Need to track last request time and current bucket level 51 | - May need tuning of bucket size and leak rate parameters 52 | 53 | For example, with 100 requests/sec limit and 500 bucket size: 54 | - Can handle bursts of up to 500 requests 55 | - But long-term average rate won't exceed 100/sec 56 | - Provides smoother traffic than fixed windows 57 | 58 | The leaky bucket algorithm supports the following options: 59 | 60 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 61 | Defaults to 1 minute. The cleanup process removes expired bucket entries. 62 | 63 | - `:key_older_than` - Optional maximum age for bucket entries (in milliseconds) 64 | If set, entries older than this will be removed during cleanup. 65 | This helps prevent memory growth from abandoned buckets. 66 | 67 | ## Example 68 | ### Example configuration: 69 | 70 | MyApp.RateLimit.start_link( 71 | clean_period: :timer.minutes(5), 72 | key_older_than: :timer.hours(24) 73 | ) 74 | 75 | This would run cleanup every 5 minutes and remove buckets not used in 24 hours. 76 | 77 | ### Example usage: 78 | 79 | defmodule MyApp.RateLimit do 80 | use Hammer, backend: :atomic, algorithm: :leaky_bucket 81 | end 82 | 83 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 84 | 85 | # Allow 100 requests/sec leak rate with max capacity of 500 86 | MyApp.RateLimit.hit("user_123", 100, 500, 1) 87 | """ 88 | 89 | @doc false 90 | @spec ets_opts() :: list() 91 | def ets_opts do 92 | [ 93 | :named_table, 94 | :set, 95 | :public, 96 | {:read_concurrency, true}, 97 | {:write_concurrency, true}, 98 | {:decentralized_counters, true} 99 | ] 100 | end 101 | 102 | @doc """ 103 | Checks if a key is allowed to perform an action, and increment the counter by the given amount. 104 | """ 105 | @spec hit( 106 | table :: atom(), 107 | key :: String.t(), 108 | leak_rate :: pos_integer(), 109 | capacity :: pos_integer(), 110 | cost :: pos_integer() 111 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 112 | def hit(table, key, leak_rate, capacity, cost) do 113 | # bucket window 114 | now = System.system_time(:second) 115 | 116 | case :ets.lookup(table, key) do 117 | [{_, atomic}] -> 118 | # Get current bucket state 119 | current_fill = :atomics.get(atomic, 1) 120 | last_update = :atomics.get(atomic, 2) 121 | 122 | leaked = trunc((now - last_update) * leak_rate) 123 | 124 | # Subtract leakage from current level (don't go below 0) 125 | current_fill = max(0, current_fill - leaked) 126 | 127 | if current_fill < capacity do 128 | final_level = current_fill + cost 129 | 130 | :atomics.exchange(atomic, 1, final_level) 131 | :atomics.exchange(atomic, 2, now) 132 | 133 | {:allow, final_level} 134 | else 135 | {:deny, 1000} 136 | end 137 | 138 | [] -> 139 | :ets.insert_new(table, {key, :atomics.new(2, signed: false)}) 140 | hit(table, key, leak_rate, capacity, cost) 141 | end 142 | end 143 | 144 | @doc """ 145 | Returns the current level of the bucket for a given key. 146 | """ 147 | @spec get(table :: atom(), key :: String.t()) :: non_neg_integer() 148 | def get(table, key) do 149 | case :ets.lookup(table, key) do 150 | [] -> 151 | 0 152 | 153 | [{_, atomic}] -> 154 | :atomics.get(atomic, 1) 155 | 156 | _ -> 157 | 0 158 | end 159 | end 160 | end 161 | -------------------------------------------------------------------------------- /lib/hammer/atomic/token_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.TokenBucket do 2 | @moduledoc """ 3 | This module implements the Token Bucket algorithm. 4 | The token bucket algorithm works by modeling a bucket that: 5 | - Fills with tokens at a constant rate (the refill rate) 6 | - Has a maximum capacity of tokens (the bucket size) 7 | - Each request consumes one or more tokens 8 | - If there are enough tokens, the request is allowed 9 | - If not enough tokens, the request is denied 10 | 11 | For example, with a refill rate of 10 tokens/second and bucket size of 100: 12 | - Tokens are added at 10 per second up to max of 100 13 | - Each request needs tokens to proceed 14 | - If bucket has enough tokens, request allowed and tokens consumed 15 | - If not enough tokens, request denied until bucket refills 16 | 17 | ## The algorithm: 18 | 1. When a request comes in, we: 19 | - Calculate tokens added since last request based on time elapsed 20 | - Add new tokens to bucket (up to max capacity) 21 | - Try to consume tokens for the request 22 | - Store new token count and timestamp 23 | 2. To check if rate limit is exceeded: 24 | - If enough tokens: allow request and consume tokens 25 | - If not enough: deny and return time until enough tokens refill 26 | 3. Old entries are automatically cleaned up after expiration 27 | 28 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 29 | The token bucket is a good choice when: 30 | 31 | - You need to allow temporary bursts of traffic 32 | - Want to enforce an average rate limit 33 | - Need to support different costs for different operations 34 | - Want to avoid the sharp edges of fixed windows 35 | 36 | ## Common use cases include: 37 | 38 | - API rate limiting with burst tolerance 39 | - Network traffic shaping 40 | - Resource allocation control 41 | - Gaming systems with "energy" mechanics 42 | - Scenarios needing flexible rate limits 43 | 44 | The main advantages are: 45 | - Natural handling of bursts 46 | - Flexible token costs for different operations 47 | - Smooth rate limiting behavior 48 | - Simple to reason about 49 | 50 | The tradeoffs are: 51 | - Need to track token count and last update time 52 | - May need tuning of bucket size and refill rate 53 | - More complex than fixed windows 54 | 55 | For example with 100 tokens/minute limit and 500 bucket size: 56 | - Can handle bursts using saved up tokens 57 | - Automatically smooths out over time 58 | - Different operations can cost different amounts 59 | - More flexible than fixed request counts 60 | 61 | The token bucket algorithm supports the following options: 62 | 63 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 64 | Defaults to 1 minute. The cleanup process removes expired bucket entries. 65 | 66 | - `:key_older_than` - Optional maximum age for bucket entries (in milliseconds) 67 | If set, entries older than this will be removed during cleanup. 68 | This helps prevent memory growth from abandoned buckets. 69 | 70 | ## Example 71 | 72 | ### Example configuration: 73 | 74 | MyApp.RateLimit.start_link( 75 | clean_period: :timer.minutes(5), 76 | key_older_than: :timer.hours(24) 77 | ) 78 | 79 | This would run cleanup every 5 minutes and remove buckets not used in 24 hours. 80 | 81 | ### Example usage: 82 | 83 | defmodule MyApp.RateLimit do 84 | use Hammer, backend: :atomic, algorithm: :token_bucket 85 | end 86 | 87 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 88 | 89 | # Allow 10 tokens per second with max capacity of 100 90 | MyApp.RateLimit.hit("user_123", 10, 100, 1) 91 | """ 92 | 93 | @doc false 94 | @spec ets_opts() :: list() 95 | def ets_opts do 96 | [ 97 | :named_table, 98 | :set, 99 | :public, 100 | {:read_concurrency, true}, 101 | {:write_concurrency, true}, 102 | {:decentralized_counters, true} 103 | ] 104 | end 105 | 106 | @doc """ 107 | Checks if a key is allowed to perform an action, and consume the bucket by the given amount. 108 | """ 109 | @spec hit( 110 | table :: atom(), 111 | key :: String.t(), 112 | refill_rate :: pos_integer(), 113 | capacity :: pos_integer(), 114 | cost :: pos_integer() 115 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 116 | def hit(table, key, refill_rate, capacity, cost \\ 1) do 117 | # bucket window 118 | now = System.system_time(:second) 119 | 120 | case :ets.lookup(table, key) do 121 | [{_, atomic}] -> 122 | # Get current bucket state 123 | current_fill = :atomics.get(atomic, 1) 124 | last_update = :atomics.get(atomic, 2) 125 | 126 | new_tokens = trunc((now - last_update) * refill_rate) 127 | 128 | current_tokens = min(capacity, current_fill + new_tokens) 129 | 130 | if current_tokens >= cost do 131 | final_level = current_tokens - cost 132 | 133 | :atomics.exchange(atomic, 1, final_level) 134 | :atomics.exchange(atomic, 2, now) 135 | 136 | {:allow, final_level} 137 | else 138 | {:deny, 1000} 139 | end 140 | 141 | [] -> 142 | atomic = :atomics.new(2, signed: false) 143 | 144 | if :ets.insert_new(table, {key, atomic}) do 145 | :atomics.exchange(atomic, 1, capacity) 146 | end 147 | 148 | hit(table, key, refill_rate, capacity, cost) 149 | end 150 | end 151 | 152 | @doc """ 153 | Returns the current level of the bucket for a given key. 154 | """ 155 | @spec get(table :: atom(), key :: String.t()) :: non_neg_integer() 156 | def get(table, key) do 157 | case :ets.lookup(table, key) do 158 | [] -> 159 | 0 160 | 161 | [{_, atomic}] -> 162 | :atomics.get(atomic, 1) 163 | 164 | _ -> 165 | 0 166 | end 167 | end 168 | end 169 | -------------------------------------------------------------------------------- /lib/hammer/ets.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS do 2 | @moduledoc """ 3 | An ETS backend for Hammer. 4 | 5 | To use the ETS backend, you need to start the process that creates and cleans the ETS table. The table is named after the module. 6 | 7 | defmodule MyApp.RateLimit do 8 | use Hammer, backend: :ets 9 | end 10 | 11 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 12 | 13 | # Allow 10 requests per second 14 | MyApp.RateLimit.hit("user_123", 1000, 10) 15 | 16 | Runtime configuration: 17 | - `:clean_period` - (in milliseconds) period to clean up expired entries, defaults to 1 minute 18 | - `:key_older_than` - (in milliseconds) maximum age for entries before they are cleaned up, defaults to 1 hour 19 | - `:algorithm` - the rate limiting algorithm to use, one of: `:fix_window`, `:sliding_window`, `:leaky_bucket`, `:token_bucket`. Defaults to `:fix_window` 20 | 21 | The ETS backend supports the following algorithms: 22 | - `:fix_window` - Fixed window rate limiting (default) 23 | Simple counting within fixed time windows. See [Hammer.ETS.FixWindow](Hammer.ETS.FixWindow.html) for more details. 24 | 25 | - `:leaky_bucket` - Leaky bucket rate limiting 26 | Smooth rate limiting with a fixed rate of tokens. See [Hammer.ETS.LeakyBucket](Hammer.ETS.LeakyBucket.html) for more details. 27 | 28 | - `:token_bucket` - Token bucket rate limiting 29 | Flexible rate limiting with bursting capability. See [Hammer.ETS.TokenBucket](Hammer.ETS.TokenBucket.html) for more details. 30 | """ 31 | 32 | use GenServer 33 | require Logger 34 | 35 | @type start_option :: 36 | {:clean_period, pos_integer()} 37 | | {:table, atom()} 38 | | {:algorithm, module()} 39 | | {:key_older_than, pos_integer()} 40 | | GenServer.option() 41 | 42 | @type config :: %{ 43 | table: atom(), 44 | table_opts: list(), 45 | clean_period: pos_integer(), 46 | key_older_than: pos_integer(), 47 | algorithm: module() 48 | } 49 | 50 | # credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity 51 | defmacro __before_compile__(%{module: module}) do 52 | hammer_opts = Module.get_attribute(module, :hammer_opts) 53 | 54 | algorithm = 55 | case Keyword.get(hammer_opts, :algorithm) do 56 | nil -> 57 | Hammer.ETS.FixWindow 58 | 59 | :ets -> 60 | Hammer.ETS.FixWindow 61 | 62 | :fix_window -> 63 | Hammer.ETS.FixWindow 64 | 65 | :sliding_window -> 66 | Hammer.ETS.SlidingWindow 67 | 68 | :leaky_bucket -> 69 | Hammer.ETS.LeakyBucket 70 | 71 | :token_bucket -> 72 | Hammer.ETS.TokenBucket 73 | 74 | _module -> 75 | raise ArgumentError, """ 76 | Hammer requires a valid backend to be specified. Must be one of: :ets,:fix_window, :sliding_window, :leaky_bucket, :token_bucket. 77 | If none is specified, :fix_window is used. 78 | 79 | Example: 80 | 81 | use Hammer, backend: :ets 82 | """ 83 | end 84 | 85 | Code.ensure_loaded!(algorithm) 86 | 87 | quote do 88 | @table __MODULE__ 89 | @algorithm unquote(algorithm) 90 | 91 | def child_spec(opts) do 92 | %{id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :worker} 93 | end 94 | 95 | def start_link(opts) do 96 | opts = Keyword.put(opts, :table, @table) 97 | opts = Keyword.put_new(opts, :clean_period, :timer.minutes(1)) 98 | opts = Keyword.put_new(opts, :algorithm, @algorithm) 99 | Hammer.ETS.start_link(opts) 100 | end 101 | 102 | if function_exported?(@algorithm, :hit, 4) do 103 | def hit(key, scale, limit) do 104 | @algorithm.hit(@table, key, scale, limit) 105 | end 106 | end 107 | 108 | if function_exported?(@algorithm, :hit, 5) do 109 | def hit(key, scale, limit, increment \\ 1) do 110 | @algorithm.hit(@table, key, scale, limit, increment) 111 | end 112 | end 113 | 114 | if function_exported?(@algorithm, :inc, 4) do 115 | def inc(key, scale, increment \\ 1) do 116 | @algorithm.inc(@table, key, scale, increment) 117 | end 118 | end 119 | 120 | if function_exported?(@algorithm, :set, 4) do 121 | def set(key, scale, count) do 122 | @algorithm.set(@table, key, scale, count) 123 | end 124 | end 125 | 126 | if function_exported?(@algorithm, :get, 3) do 127 | def get(key, scale) do 128 | @algorithm.get(@table, key, scale) 129 | end 130 | end 131 | 132 | if function_exported?(@algorithm, :get, 2) do 133 | def get(key, scale) do 134 | @algorithm.get(@table, key) 135 | end 136 | end 137 | end 138 | end 139 | 140 | @doc """ 141 | Starts the process that creates and cleans the ETS table. 142 | 143 | Accepts the following options: 144 | - `:clean_period` - How often to run the cleanup process (in milliseconds). Defaults to 1 minute. 145 | - `:key_older_than` - Optional maximum age for bucket entries (in milliseconds). Defaults to 24 hours. 146 | Entries older than this will be removed during cleanup. 147 | - `:algorithm` - The rate limiting algorithm to use. Can be `:fixed_window`, `:sliding_window`, 148 | `:token_bucket`, or `:leaky_bucket`. Defaults to `:fixed_window`. 149 | - optional `:debug`, `:spawn_opts`, and `:hibernate_after` GenServer options 150 | """ 151 | @spec start_link([start_option]) :: GenServer.on_start() 152 | def start_link(opts) do 153 | {gen_opts, opts} = Keyword.split(opts, [:debug, :spawn_opt, :hibernate_after]) 154 | 155 | {clean_period, opts} = Keyword.pop!(opts, :clean_period) 156 | {table, opts} = Keyword.pop!(opts, :table) 157 | {algorithm, opts} = Keyword.pop!(opts, :algorithm) 158 | {key_older_than, opts} = Keyword.pop(opts, :key_older_than, :timer.hours(24)) 159 | 160 | case opts do 161 | [] -> 162 | :ok 163 | 164 | _ -> 165 | Logger.warning( 166 | "Unrecognized options passed to #{inspect(table)}.start_link/1: #{inspect(opts)}" 167 | ) 168 | end 169 | 170 | config = %{ 171 | table: table, 172 | table_opts: algorithm.ets_opts(), 173 | clean_period: clean_period, 174 | key_older_than: key_older_than, 175 | algorithm: algorithm 176 | } 177 | 178 | GenServer.start_link(__MODULE__, config, gen_opts) 179 | end 180 | 181 | @compile inline: [update_counter: 4] 182 | def update_counter(table, key, op, expires_at) do 183 | :ets.update_counter(table, key, op, {key, 0, expires_at}) 184 | end 185 | 186 | @compile inline: [now: 0] 187 | @spec now() :: pos_integer() 188 | def now do 189 | System.system_time(:millisecond) 190 | end 191 | 192 | @impl GenServer 193 | def init(config) do 194 | :ets.new(config.table, config.table_opts) 195 | 196 | schedule(config.clean_period) 197 | {:ok, config} 198 | end 199 | 200 | @impl GenServer 201 | def handle_info(:clean, config) do 202 | algorithm = config.algorithm 203 | algorithm.clean(config) 204 | schedule(config.clean_period) 205 | {:noreply, config} 206 | end 207 | 208 | defp schedule(clean_period) do 209 | Process.send_after(self(), :clean, clean_period) 210 | end 211 | end 212 | -------------------------------------------------------------------------------- /lib/hammer/ets/fix_window.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.FixWindow do 2 | @moduledoc """ 3 | This module implements the Fix Window algorithm. 4 | 5 | The fixed window algorithm works by dividing time into fixed intervals or "windows" 6 | of a specified duration (scale). Each window tracks request counts independently. 7 | 8 | For example, with a 60 second window: 9 | - Window 1: 0-60 seconds 10 | - Window 2: 60-120 seconds 11 | - And so on... 12 | 13 | ## The algorithm: 14 | 1. When a request comes in, we: 15 | - Calculate which window it belongs to based on current time 16 | - Increment the counter for that window 17 | - Store expiration time as end of window 18 | 2. To check if rate limit is exceeded: 19 | - If count <= limit: allow request 20 | - If count > limit: deny and return time until window expires 21 | 3. Old windows are automatically cleaned up after expiration 22 | 23 | This provides simple rate limiting but has edge cases where a burst of requests 24 | spanning a window boundary could allow up to 2x the limit in a short period. 25 | For more precise limiting, consider using the sliding window algorithm instead. 26 | 27 | The fixed window algorithm is a good choice when: 28 | 29 | - You need simple, predictable rate limiting with clear time boundaries 30 | - The exact precision of the rate limit is not critical 31 | - You want efficient implementation with minimal storage overhead 32 | - Your use case can tolerate potential bursts at window boundaries 33 | 34 | ## Common use cases include: 35 | 36 | - Basic API rate limiting where occasional bursts are acceptable 37 | - Protecting backend services from excessive load 38 | - Implementing fair usage policies 39 | - Scenarios where clear time-based quotas are desired (e.g. "100 requests per minute") 40 | 41 | The main tradeoff is that requests near window boundaries can allow up to 2x the 42 | intended limit in a short period. For example with a limit of 100 per minute: 43 | - 100 requests at 11:59:59 44 | - Another 100 requests at 12:00:01 45 | 46 | This results in 200 requests in 2 seconds, while still being within limits. 47 | If this behavior is problematic, consider using the sliding window algorithm instead. 48 | 49 | The fixed window algorithm supports the following options: 50 | 51 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 52 | Defaults to 1 minute. The cleanup process removes expired window entries. 53 | 54 | 55 | ## Example 56 | ### Example configuration: 57 | 58 | MyApp.RateLimit.start_link( 59 | clean_period: :timer.minutes(5), 60 | ) 61 | 62 | This would run cleanup every 5 minutes and clean up old windows. 63 | 64 | ### Example usage: 65 | 66 | defmodule MyApp.RateLimit do 67 | use Hammer, backend: :ets, algorithm: :fix_window 68 | end 69 | 70 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 71 | 72 | # Allow 10 requests per second 73 | MyApp.RateLimit.hit("user_123", 1000, 10) 74 | """ 75 | alias Hammer.ETS 76 | @doc false 77 | @spec ets_opts() :: list() 78 | def ets_opts do 79 | [ 80 | :named_table, 81 | :set, 82 | :public, 83 | {:read_concurrency, true}, 84 | {:write_concurrency, true}, 85 | {:decentralized_counters, true} 86 | ] 87 | end 88 | 89 | @doc """ 90 | Checks if a key is allowed to perform an action based on the fixed window algorithm. 91 | """ 92 | @spec hit( 93 | table :: atom(), 94 | key :: String.t(), 95 | scale :: pos_integer(), 96 | limit :: pos_integer(), 97 | increment :: pos_integer() 98 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 99 | def hit(table, key, scale, limit, increment) do 100 | now = ETS.now() 101 | window = div(now, scale) 102 | full_key = {key, window} 103 | expires_at = (window + 1) * scale 104 | count = ETS.update_counter(table, full_key, increment, expires_at) 105 | 106 | if count <= limit do 107 | {:allow, count} 108 | else 109 | {:deny, expires_at - now} 110 | end 111 | end 112 | 113 | @doc """ 114 | Increments the counter for a given key in the fixed window algorithm. 115 | """ 116 | @spec inc( 117 | table :: atom(), 118 | key :: String.t(), 119 | scale :: pos_integer(), 120 | increment :: pos_integer() 121 | ) :: 122 | non_neg_integer() 123 | def inc(table, key, scale, increment) do 124 | window = div(ETS.now(), scale) 125 | full_key = {key, window} 126 | expires_at = (window + 1) * scale 127 | ETS.update_counter(table, full_key, increment, expires_at) 128 | end 129 | 130 | @doc """ 131 | Sets the counter for a given key in the fixed window algorithm. 132 | """ 133 | @spec set(table :: atom(), key :: String.t(), scale :: pos_integer(), count :: pos_integer()) :: 134 | integer() 135 | def set(table, key, scale, count) do 136 | window = div(ETS.now(), scale) 137 | full_key = {key, window} 138 | expires_at = (window + 1) * scale 139 | ETS.update_counter(table, full_key, {2, 1, 0, count}, expires_at) 140 | end 141 | 142 | @doc """ 143 | Returns the count of requests for a given key within the last seconds. 144 | """ 145 | @spec get(table :: atom(), key :: String.t(), scale :: pos_integer()) :: non_neg_integer() 146 | def get(table, key, scale) do 147 | window = div(ETS.now(), scale) 148 | full_key = {key, window} 149 | 150 | case :ets.lookup(table, full_key) do 151 | [{_full_key, count, _expires_at}] -> count 152 | [] -> 0 153 | end 154 | end 155 | 156 | @doc """ 157 | Cleans up all of the old entries from the table. 158 | """ 159 | @spec clean(config :: ETS.config()) :: non_neg_integer() 160 | def clean(config) do 161 | table = config.table 162 | 163 | match_spec = [{{{:_, :_}, :_, :"$1"}, [], [{:<, :"$1", {:const, ETS.now()}}]}] 164 | :ets.select_delete(table, match_spec) 165 | end 166 | end 167 | -------------------------------------------------------------------------------- /lib/hammer/ets/leaky_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.LeakyBucket do 2 | @moduledoc """ 3 | This module implements the Leaky Bucket algorithm. 4 | 5 | The leaky bucket algorithm works by modeling a bucket that: 6 | - Fills up with requests at the input rate 7 | - "Leaks" requests at a constant rate 8 | - Has a maximum capacity (the bucket size) 9 | 10 | For example, with a leak rate of 10 requests/second and bucket size of 100: 11 | - Requests add to the bucket's current level 12 | - The bucket leaks 10 requests per second steadily 13 | - If bucket reaches capacity (100), new requests are denied 14 | - Once bucket level drops, new requests are allowed again 15 | 16 | ## The algorithm: 17 | 1. When a request comes in, we: 18 | - Calculate how much has leaked since last request 19 | - Subtract leaked amount from current bucket level 20 | - Try to add new request to bucket 21 | - Store new bucket level and timestamp 22 | 2. To check if rate limit is exceeded: 23 | - If new bucket level <= capacity: allow request 24 | - If new bucket level > capacity: deny and return time until enough leaks 25 | 3. Old entries are automatically cleaned up after expiration 26 | 27 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 28 | The leaky bucket is a good choice when: 29 | 30 | - You need to enforce a constant processing rate 31 | - Want to allow temporary bursts within bucket capacity 32 | - Need to smooth out traffic spikes 33 | - Want to prevent resource exhaustion 34 | 35 | ## Common use cases include: 36 | 37 | - API rate limiting needing consistent throughput 38 | - Network traffic shaping 39 | - Service protection from sudden load spikes 40 | - Queue processing rate control 41 | - Scenarios needing both burst tolerance and steady-state limits 42 | 43 | The main advantages are: 44 | - Smooth, predictable output rate 45 | - Configurable burst tolerance 46 | - Natural queueing behavior 47 | 48 | The tradeoffs are: 49 | - More complex implementation than fixed windows 50 | - Need to track last request time and current bucket level 51 | - May need tuning of bucket size and leak rate parameters 52 | 53 | For example, with 100 requests/sec limit and 500 bucket size: 54 | - Can handle bursts of up to 500 requests 55 | - But long-term average rate won't exceed 100/sec 56 | - Provides smoother traffic than fixed windows 57 | 58 | The leaky bucket algorithm supports the following options: 59 | 60 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 61 | Defaults to 1 minute. The cleanup process removes expired bucket entries. 62 | 63 | - `:key_older_than` - Optional maximum age for bucket entries (in milliseconds) 64 | If set, entries older than this will be removed during cleanup. 65 | This helps prevent memory growth from abandoned buckets. 66 | 67 | ## Example 68 | ### Example configuration: 69 | 70 | MyApp.RateLimit.start_link( 71 | clean_period: :timer.minutes(5), 72 | key_older_than: :timer.hours(24) 73 | ) 74 | 75 | This would run cleanup every 5 minutes and remove buckets not used in 24 hours. 76 | 77 | ### Example usage: 78 | 79 | defmodule MyApp.RateLimit do 80 | use Hammer, backend: :ets, algorithm: :leaky_bucket 81 | end 82 | 83 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 84 | 85 | # Allow 100 requests/sec leak rate with max capacity of 500 86 | MyApp.RateLimit.hit("user_123", 100, 500, 1) 87 | """ 88 | alias Hammer.ETS 89 | 90 | @doc false 91 | @spec ets_opts() :: list() 92 | def ets_opts do 93 | [ 94 | :named_table, 95 | :set, 96 | :public, 97 | {:read_concurrency, true}, 98 | {:write_concurrency, true}, 99 | {:decentralized_counters, true} 100 | ] 101 | end 102 | 103 | @doc """ 104 | Checks if a key is allowed to perform an action, and increment the counter by the given amount. 105 | """ 106 | @spec hit( 107 | table :: atom(), 108 | key :: String.t(), 109 | leak_rate :: pos_integer(), 110 | capacity :: pos_integer(), 111 | cost :: pos_integer() 112 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 113 | def hit(table, key, leak_rate, capacity, cost) do 114 | now = System.system_time(:second) 115 | 116 | # Try to insert new empty bucket if doesn't exist 117 | :ets.insert_new(table, {key, 0, now}) 118 | 119 | # Get current bucket state 120 | [{^key, current_fill, last_update}] = :ets.lookup(table, key) 121 | 122 | leaked = trunc((now - last_update) * leak_rate) 123 | 124 | # Subtract leakage from current level (don't go below 0) 125 | current_fill = max(0, current_fill - leaked) 126 | 127 | if current_fill < capacity do 128 | final_level = current_fill + cost 129 | 130 | :ets.insert(table, {key, final_level, now}) 131 | {:allow, final_level} 132 | else 133 | {:deny, 1000} 134 | end 135 | end 136 | 137 | @doc """ 138 | Returns the current level of the bucket for a given key. 139 | """ 140 | @spec get(table :: atom(), key :: String.t()) :: non_neg_integer() 141 | def get(table, key) do 142 | case :ets.lookup(table, key) do 143 | [] -> 144 | 0 145 | 146 | [{^key, level, _last_update}] -> 147 | level 148 | 149 | _ -> 150 | 0 151 | end 152 | end 153 | 154 | @doc """ 155 | Cleans up all of the old entries from the table based on the `key_older_than` option. 156 | """ 157 | @spec clean(config :: ETS.config()) :: non_neg_integer() 158 | def clean(config) do 159 | now = ETS.now() 160 | older_than = now - config.key_older_than 161 | 162 | match_spec = [{{:_, :_, :"$1"}, [], [{:<, :"$1", {:const, older_than}}]}] 163 | :ets.select_delete(config.table, match_spec) 164 | end 165 | end 166 | -------------------------------------------------------------------------------- /lib/hammer/ets/sliding_window.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.SlidingWindow do 2 | @moduledoc """ 3 | This module implements the Rate Limiting Sliding Window algorithm. 4 | 5 | The sliding window algorithm works by tracking requests within a moving time window. 6 | Unlike a fixed window that resets at specific intervals, the sliding window 7 | provides a smoother rate limiting experience by considering the most recent 8 | window of time. 9 | 10 | For example, with a 60 second window: 11 | - At time t, we look back 60 seconds and count all requests in that period 12 | - At time t+1, we look back 60 seconds from t+1, dropping any requests older than that 13 | - This creates a "sliding" effect where the window gradually moves forward in time 14 | 15 | ## The algorithm: 16 | 1. When a request comes in, we store it with the current timestamp 17 | 2. To check if rate limit is exceeded, we: 18 | - Count all requests within the last seconds 19 | - If count <= limit: allow the request 20 | - If count > limit: deny and return time until oldest request expires 21 | 3. Old entries outside the window are automatically cleaned up 22 | 23 | This provides more precise rate limiting compared to fixed windows, avoiding 24 | the edge case where a burst of requests spans a fixed window boundary. 25 | 26 | The sliding window algorithm is a good choice when: 27 | 28 | - You need precise rate limiting without allowing bursts at window boundaries 29 | - Accuracy of the rate limit is critical for your application 30 | - You can accept slightly higher storage overhead compared to fixed windows 31 | - You want to avoid sudden changes in allowed request rates 32 | 33 | ## Common use cases include: 34 | 35 | - API rate limiting where consistent request rates are important 36 | - Financial transaction rate limiting 37 | - User action throttling requiring precise control 38 | - Gaming or real-time applications needing smooth rate control 39 | - Security-sensitive rate limiting scenarios 40 | 41 | The main advantages over fixed windows are: 42 | 43 | - No possibility of 2x burst at window boundaries 44 | - Smoother rate limiting behavior 45 | - More predictable request patterns 46 | 47 | The tradeoffs are: 48 | - Slightly more complex implementation 49 | - Higher storage requirements (need to store individual request timestamps) 50 | - More computation required to check limits (need to count requests in window) 51 | 52 | For example, with a limit of 100 requests per minute: 53 | - Fixed window might allow 200 requests across a boundary (100 at 11:59, 100 at 12:00) 54 | - Sliding window ensures no more than 100 requests in ANY 60 second period 55 | 56 | The sliding window algorithm supports the following options: 57 | 58 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 59 | Defaults to 1 minute. The cleanup process removes expired window entries. 60 | 61 | ## Example 62 | ### Example configuration: 63 | 64 | MyApp.RateLimit.start_link( 65 | clean_period: :timer.minutes(5), 66 | ) 67 | 68 | This would run cleanup every 5 minutes and clean up old windows. 69 | 70 | ### Example usage: 71 | 72 | defmodule MyApp.RateLimit do 73 | use Hammer, backend: :ets, algorithm: :sliding_window 74 | end 75 | 76 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 77 | 78 | # Allow 10 requests in any 1 second window 79 | MyApp.RateLimit.hit("user_123", 1000, 10) 80 | """ 81 | alias Hammer.ETS.SlidingWindow 82 | 83 | @doc false 84 | @spec ets_opts() :: list() 85 | def ets_opts do 86 | [ 87 | :named_table, 88 | :set, 89 | :public, 90 | {:read_concurrency, true}, 91 | {:write_concurrency, true}, 92 | {:decentralized_counters, true} 93 | ] 94 | end 95 | 96 | @doc """ 97 | Checks if a key is allowed to perform an action based on the sliding window algorithm. 98 | """ 99 | @spec hit( 100 | table :: atom(), 101 | key :: String.t(), 102 | scale :: pos_integer(), 103 | limit :: pos_integer() 104 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 105 | def hit(table, key, scale, limit) do 106 | now = now() 107 | 108 | # need to convert scale to milliseconds 109 | scale_ms = scale * 1000 110 | expires_at = scale_ms + now 111 | 112 | remove_old_entries_for_key(table, key, now) 113 | 114 | :ets.insert_new(table, {{key, now}, expires_at}) 115 | count = SlidingWindow.get(table, key, scale) 116 | 117 | if count <= limit do 118 | {:allow, count} 119 | else 120 | # Get the earliest expiration time from all entries for this key 121 | earliest_expiry = get_earliest_expiry(table, key, now) 122 | earliest_expiry_ms = round((earliest_expiry - now) / 1000) 123 | {:deny, earliest_expiry_ms} 124 | end 125 | end 126 | 127 | @doc """ 128 | Returns the count of requests for a given key 129 | """ 130 | @spec get(table :: atom(), key :: String.t(), scale :: pos_integer()) :: non_neg_integer() 131 | def get(table, key, _scale) do 132 | now = now() 133 | 134 | match_spec = [ 135 | { 136 | {{:"$1", :_}, :"$2"}, 137 | [{:"=:=", {:const, key}, :"$1"}], 138 | [{:>, :"$2", {:const, now}}] 139 | } 140 | ] 141 | 142 | :ets.select_count(table, match_spec) 143 | end 144 | 145 | @doc """ 146 | Cleans up all of the old entries from the table based on the `key_older_than` option. 147 | """ 148 | @spec clean(config :: Hammer.ETS.config()) :: non_neg_integer() 149 | def clean(config) do 150 | now = now() 151 | table = config.table 152 | match_spec = [{{:_, :"$1"}, [], [{:<, :"$1", {:const, now}}]}] 153 | 154 | :ets.select_delete(table, match_spec) 155 | end 156 | 157 | defp get_earliest_expiry(table, key, now) do 158 | match_spec = [ 159 | { 160 | {{:"$1", :_}, :"$2"}, 161 | [{:"=:=", {:const, key}, :"$1"}, {:>, :"$2", {:const, now}}], 162 | [:"$2"] 163 | } 164 | ] 165 | 166 | table |> :ets.select(match_spec) |> Enum.min() 167 | end 168 | 169 | defp remove_old_entries_for_key(table, key, now) do 170 | match_spec = [ 171 | {{{:"$1", :_}, :"$2"}, [{:"=:=", {:const, key}, :"$1"}], [{:<, :"$2", {:const, now}}]} 172 | ] 173 | 174 | :ets.select_delete(table, match_spec) 175 | end 176 | 177 | @compile inline: [now: 0] 178 | defp now do 179 | System.system_time(:microsecond) 180 | end 181 | end 182 | -------------------------------------------------------------------------------- /lib/hammer/ets/token_bucket.ex: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.TokenBucket do 2 | @moduledoc """ 3 | This module implements the Token Bucket algorithm. 4 | The token bucket algorithm works by modeling a bucket that: 5 | - Fills with tokens at a constant rate (the refill rate) 6 | - Has a maximum capacity of tokens (the bucket size) 7 | - Each request consumes one or more tokens 8 | - If there are enough tokens, the request is allowed 9 | - If not enough tokens, the request is denied 10 | 11 | For example, with a refill rate of 10 tokens/second and bucket size of 100: 12 | - Tokens are added at 10 per second up to max of 100 13 | - Each request needs tokens to proceed 14 | - If bucket has enough tokens, request allowed and tokens consumed 15 | - If not enough tokens, request denied until bucket refills 16 | 17 | ## The algorithm: 18 | 1. When a request comes in, we: 19 | - Calculate tokens added since last request based on time elapsed 20 | - Add new tokens to bucket (up to max capacity) 21 | - Try to consume tokens for the request 22 | - Store new token count and timestamp 23 | 2. To check if rate limit is exceeded: 24 | - If enough tokens: allow request and consume tokens 25 | - If not enough: deny and return time until enough tokens refill 26 | 3. Old entries are automatically cleaned up after expiration 27 | 28 | This provides smooth rate limiting with ability to handle bursts up to bucket size. 29 | The token bucket is a good choice when: 30 | 31 | - You need to allow temporary bursts of traffic 32 | - Want to enforce an average rate limit 33 | - Need to support different costs for different operations 34 | - Want to avoid the sharp edges of fixed windows 35 | 36 | Common use cases include: 37 | 38 | - API rate limiting with burst tolerance 39 | - Network traffic shaping 40 | - Resource allocation control 41 | - Gaming systems with "energy" mechanics 42 | - Scenarios needing flexible rate limits 43 | 44 | The main advantages are: 45 | - Natural handling of bursts 46 | - Flexible token costs for different operations 47 | - Smooth rate limiting behavior 48 | - Simple to reason about 49 | 50 | The tradeoffs are: 51 | - Need to track token count and last update time 52 | - May need tuning of bucket size and refill rate 53 | - More complex than fixed windows 54 | 55 | For example with 100 tokens/minute limit and 500 bucket size: 56 | - Can handle bursts using saved up tokens 57 | - Automatically smooths out over time 58 | - Different operations can cost different amounts 59 | - More flexible than fixed request counts 60 | 61 | The token bucket algorithm supports the following options: 62 | 63 | - `:clean_period` - How often to run the cleanup process (in milliseconds) 64 | Defaults to 1 minute. The cleanup process removes expired bucket entries. 65 | 66 | - `:key_older_than` - Optional maximum age for bucket entries (in milliseconds) 67 | If set, entries older than this will be removed during cleanup. 68 | This helps prevent memory growth from abandoned buckets. 69 | 70 | ## Example 71 | ### Example configuration: 72 | 73 | MyApp.RateLimit.start_link( 74 | clean_period: :timer.minutes(5), 75 | key_older_than: :timer.hours(24) 76 | ) 77 | 78 | This would run cleanup every 5 minutes and remove buckets not used in 24 hours. 79 | 80 | ### Example usage: 81 | 82 | defmodule MyApp.RateLimit do 83 | use Hammer, backend: :ets, algorithm: :token_bucket 84 | end 85 | 86 | MyApp.RateLimit.start_link(clean_period: :timer.minutes(1)) 87 | 88 | # Allow 10 tokens per second with max capacity of 100 89 | MyApp.RateLimit.hit("user_123", 10, 100, 1) 90 | """ 91 | 92 | alias Hammer.ETS 93 | @doc false 94 | @spec ets_opts() :: list() 95 | def ets_opts do 96 | [ 97 | :named_table, 98 | :set, 99 | :public, 100 | {:read_concurrency, true}, 101 | {:write_concurrency, true}, 102 | {:decentralized_counters, true} 103 | ] 104 | end 105 | 106 | @doc """ 107 | Checks if a key is allowed to perform an action, and consume the bucket by the given amount. 108 | """ 109 | @spec hit( 110 | table :: atom(), 111 | key :: String.t(), 112 | refill_rate :: pos_integer(), 113 | capacity :: pos_integer(), 114 | cost :: pos_integer() 115 | ) :: {:allow, non_neg_integer()} | {:deny, non_neg_integer()} 116 | def hit(table, key, refill_rate, capacity, cost \\ 1) do 117 | now = System.system_time(:second) 118 | 119 | # Try to insert new empty bucket if doesn't exist 120 | :ets.insert_new(table, {key, capacity, now}) 121 | 122 | [{^key, current_level, last_update}] = :ets.lookup(table, key) 123 | new_tokens = trunc((now - last_update) * refill_rate) 124 | 125 | current_tokens = min(capacity, current_level + new_tokens) 126 | 127 | if current_tokens >= cost do 128 | final_level = current_tokens - cost 129 | :ets.insert(table, {key, final_level, now}) 130 | {:allow, final_level} 131 | else 132 | {:deny, 1000} 133 | end 134 | end 135 | 136 | @doc """ 137 | Returns the current level of the bucket for a given key. 138 | """ 139 | @spec get(table :: atom(), key :: String.t()) :: non_neg_integer() 140 | def get(table, key) do 141 | case :ets.lookup(table, key) do 142 | [] -> 143 | 0 144 | 145 | [{^key, level, _last_update}] -> 146 | level 147 | 148 | _ -> 149 | 0 150 | end 151 | end 152 | 153 | @doc """ 154 | Cleans up all of the old entries from the table based on the `key_older_than` option. 155 | """ 156 | @spec clean(config :: ETS.config()) :: non_neg_integer() 157 | def clean(config) do 158 | now = ETS.now() 159 | older_than = now - config.key_older_than 160 | 161 | match_spec = [{{:_, :_, :"$1"}, [], [{:<, :"$1", {:const, older_than}}]}] 162 | :ets.select_delete(config.table, match_spec) 163 | end 164 | end 165 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.MixProject do 2 | use Mix.Project 3 | 4 | @source_url "https://github.com/ExHammer/hammer" 5 | @version "7.0.1" 6 | 7 | def project do 8 | [ 9 | app: :hammer, 10 | description: "A rate-limiter with plugable backends.", 11 | version: @version, 12 | elixir: "~> 1.14", 13 | start_permanent: Mix.env() == :prod, 14 | deps: deps(), 15 | docs: docs(), 16 | package: package(), 17 | test_coverage: [summary: [threshold: 90]] 18 | ] 19 | end 20 | 21 | def application do 22 | [extra_applications: [:logger]] 23 | end 24 | 25 | defp deps do 26 | [ 27 | {:benchee, "~> 1.2", only: :bench}, 28 | {:credo, "~> 1.7", only: [:dev, :test]}, 29 | {:ex_doc, "~> 0.34", only: :dev}, 30 | {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false} 31 | 32 | # Keeping to perform benchmark test as needed at times 33 | # {:ex_rated, "~> 2.1", only: :bench}, 34 | # {:plug_attack, "~> 0.4.3", only: :bench}, 35 | # {:rate_limiter, "~> 0.4.0", only: :bench} 36 | ] 37 | end 38 | 39 | defp docs do 40 | [ 41 | main: "readme", 42 | extra_section: "GUIDES", 43 | extras: ["CHANGELOG.md", "README.md"] ++ Path.wildcard("guides/*.md"), 44 | skip_undefined_reference_warnings_on: ["CHANGELOG.md"], 45 | source_url: @source_url, 46 | source_ref: "v#{@version}", 47 | homepage_url: @source_url, 48 | assets: %{"assets" => "assets"} 49 | ] 50 | end 51 | 52 | defp package do 53 | [ 54 | name: :hammer, 55 | maintainers: ["Emmanuel Pinault", "June Kelly (june@junek.xyz)"], 56 | licenses: ["MIT"], 57 | links: %{ 58 | "GitHub" => @source_url, 59 | "Changelog" => "#{@source_url}/blob/master/CHANGELOG.md" 60 | } 61 | ] 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.3.1", "c786e6a76321121a44229dde3988fc772bca73ea75170a73fd5f4ddf1af95ccf", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "76224c58ea1d0391c8309a8ecbfe27d71062878f59bd41a390266bf4ac1cc56d"}, 3 | "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, 4 | "credo": {:hex, :credo, "1.7.12", "9e3c20463de4b5f3f23721527fcaf16722ec815e70ff6c60b86412c695d426c1", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8493d45c656c5427d9c729235b99d498bd133421f3e0a683e5c1b561471291e5"}, 5 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 6 | "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"}, 7 | "earmark_parser": {:hex, :earmark_parser, "1.4.43", "34b2f401fe473080e39ff2b90feb8ddfeef7639f8ee0bbf71bb41911831d77c5", [:mix], [], "hexpm", "970a3cd19503f5e8e527a190662be2cee5d98eed1ff72ed9b3d1a3d466692de8"}, 8 | "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, 9 | "ex_doc": {:hex, :ex_doc, "0.37.3", "f7816881a443cd77872b7d6118e8a55f547f49903aef8747dbcb345a75b462f9", [:mix], [{:earmark_parser, "~> 1.4.42", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "e6aebca7156e7c29b5da4daa17f6361205b2ae5f26e5c7d8ca0d3f7e18972233"}, 10 | "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, 11 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 12 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 13 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, 14 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, 15 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"}, 16 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 17 | } 18 | -------------------------------------------------------------------------------- /test/hammer/atomic/clean_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.CleanTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule RateAtomicLimit do 5 | use Hammer, backend: :atomic 6 | end 7 | 8 | defmodule RateAtomicLimitLeakyBucket do 9 | use Hammer, backend: :atomic, algorithm: :leaky_bucket 10 | end 11 | 12 | defmodule RateAtomicLimitTokenBucket do 13 | use Hammer, backend: :atomic, algorithm: :token_bucket 14 | end 15 | 16 | test "cleaning works for fix window/default ets backend" do 17 | start_supervised!({RateAtomicLimit, clean_period: 50, key_older_than: 10}) 18 | 19 | key = "key" 20 | scale = 100 21 | count = 10 22 | 23 | assert {:allow, 1} = RateAtomicLimit.hit(key, scale, count) 24 | 25 | assert [_] = :ets.tab2list(RateAtomicLimit) 26 | 27 | :timer.sleep(150) 28 | 29 | assert :ets.tab2list(RateAtomicLimit) == [] 30 | end 31 | 32 | test "cleaning works for token bucket" do 33 | start_supervised!({RateAtomicLimitTokenBucket, clean_period: 50, key_older_than: 10}) 34 | 35 | key = "key" 36 | refill_rate = 1 37 | capacity = 10 38 | 39 | assert {:allow, 9} = RateAtomicLimitTokenBucket.hit(key, refill_rate, capacity, 1) 40 | 41 | assert [_] = :ets.tab2list(RateAtomicLimitTokenBucket) 42 | 43 | :timer.sleep(150) 44 | 45 | assert :ets.tab2list(RateAtomicLimitTokenBucket) == [] 46 | end 47 | 48 | test "cleaning works for leaky bucket" do 49 | start_supervised!({RateAtomicLimitLeakyBucket, clean_period: 50, key_older_than: 10}) 50 | 51 | key = "key" 52 | leak_rate = 1 53 | capacity = 10 54 | 55 | assert {:allow, 1} = RateAtomicLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 56 | 57 | assert [_] = :ets.tab2list(RateAtomicLimitLeakyBucket) 58 | 59 | :timer.sleep(150) 60 | 61 | assert :ets.tab2list(RateAtomicLimitLeakyBucket) == [] 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /test/hammer/atomic/fix_window_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.FixWindowTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Hammer.Atomic.FixWindow 5 | 6 | setup do 7 | table = :ets.new(:hammer_atomic_fix_window_test, FixWindow.ets_opts()) 8 | {:ok, table: table} 9 | end 10 | 11 | describe "hit" do 12 | test "returns {:allow, 1} tuple on first access", %{table: table} do 13 | key = "key" 14 | scale = :timer.seconds(10) 15 | limit = 10 16 | 17 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 18 | end 19 | 20 | test "returns {:allow, 4} tuple on in-limit checks", %{table: table} do 21 | key = "key" 22 | scale = :timer.minutes(10) 23 | limit = 10 24 | 25 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 26 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 27 | assert {:allow, 3} = FixWindow.hit(table, key, scale, limit, 1) 28 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 1) 29 | end 30 | 31 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 32 | key = "key" 33 | scale = :timer.minutes(10) 34 | limit = 2 35 | 36 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 37 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 38 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 39 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 40 | end 41 | 42 | test "returns expected tuples after waiting for the next window", %{table: table} do 43 | key = "key" 44 | scale = 100 45 | limit = 2 46 | 47 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 48 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 49 | assert {:deny, retry_after} = FixWindow.hit(table, key, scale, limit, 1) 50 | 51 | :timer.sleep(retry_after) 52 | 53 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 54 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 55 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 56 | end 57 | 58 | test "with custom increment", %{table: table} do 59 | key = "cost-key" 60 | scale = :timer.seconds(1) 61 | limit = 10 62 | 63 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 4) 64 | assert {:allow, 9} = FixWindow.hit(table, key, scale, limit, 5) 65 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 3) 66 | end 67 | 68 | test "mixing default and custom increment", %{table: table} do 69 | key = "cost-key" 70 | scale = :timer.seconds(1) 71 | limit = 10 72 | 73 | assert {:allow, 3} = FixWindow.hit(table, key, scale, limit, 3) 74 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 1) 75 | assert {:allow, 5} = FixWindow.hit(table, key, scale, limit, 1) 76 | assert {:allow, 9} = FixWindow.hit(table, key, scale, limit, 4) 77 | assert {:allow, 10} = FixWindow.hit(table, key, scale, limit, 1) 78 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 2) 79 | end 80 | 81 | test "race condition", %{table: table} do 82 | key = "key" 83 | scale = :timer.seconds(1) 84 | limit = 10 85 | 86 | # Start two processes 87 | 88 | spawn_link(fn -> 89 | for _ <- 1..2 do 90 | FixWindow.hit(table, key, scale, limit, 1) 91 | end 92 | end) 93 | 94 | spawn_link(fn -> 95 | for _ <- 1..2 do 96 | FixWindow.hit(table, key, scale, limit, 1) 97 | end 98 | end) 99 | 100 | # Wait for both processes to finish 101 | Process.sleep(100) 102 | 103 | # Check the final count 104 | assert FixWindow.get(table, key, scale) == 4 105 | end 106 | end 107 | 108 | describe "inc" do 109 | test "increments the count for the given key and scale", %{table: table} do 110 | key = "key" 111 | scale = :timer.seconds(10) 112 | 113 | assert FixWindow.get(table, key, scale) == 0 114 | 115 | assert FixWindow.inc(table, key, scale, 1) == 1 116 | assert FixWindow.get(table, key, scale) == 1 117 | 118 | assert FixWindow.inc(table, key, scale, 1) == 2 119 | assert FixWindow.get(table, key, scale) == 2 120 | 121 | assert FixWindow.inc(table, key, scale, 1) == 3 122 | assert FixWindow.get(table, key, scale) == 3 123 | 124 | assert FixWindow.inc(table, key, scale, 1) == 4 125 | assert FixWindow.get(table, key, scale) == 4 126 | end 127 | end 128 | 129 | describe "get/set" do 130 | test "get returns the count set for the given key and scale", %{table: table} do 131 | key = "key" 132 | scale = :timer.seconds(10) 133 | count = 10 134 | 135 | assert FixWindow.get(table, key, scale) == 0 136 | assert FixWindow.set(table, key, scale, count) == count 137 | assert FixWindow.get(table, key, scale) == count 138 | end 139 | end 140 | end 141 | -------------------------------------------------------------------------------- /test/hammer/atomic/leaky_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.LeakyBucketTest do 2 | use ExUnit.Case, async: true 3 | alias Hammer.Atomic.LeakyBucket 4 | 5 | defmodule RateLimitLeakyBucket do 6 | use Hammer, backend: :atomic, algorithm: :leaky_bucket 7 | end 8 | 9 | setup do 10 | table = :ets.new(:hammer_atomic_leaky_bucket_test, LeakyBucket.ets_opts()) 11 | {:ok, table: table} 12 | end 13 | 14 | describe "hit/get" do 15 | test "returns {:allow, 1} tuple on first access", %{table: table} do 16 | key = "key" 17 | leak_rate = 10 18 | capacity = 10 19 | 20 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 21 | end 22 | 23 | test "returns {:allow, 4} tuple on in-limit checks", %{table: table} do 24 | key = "key" 25 | leak_rate = 2 26 | capacity = 10 27 | 28 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 29 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 30 | assert {:allow, 3} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 31 | assert {:allow, 4} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 32 | end 33 | 34 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 35 | key = "key" 36 | leak_rate = 1 37 | capacity = 2 38 | 39 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 40 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 41 | 42 | assert {:deny, 1000} = 43 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 44 | 45 | assert {:deny, _retry_after} = 46 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 47 | end 48 | 49 | test "returns expected tuples after waiting for the next window", %{table: table} do 50 | key = "key" 51 | leak_rate = 1 52 | capacity = 2 53 | 54 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 55 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 56 | 57 | assert {:deny, retry_after} = 58 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 59 | 60 | :timer.sleep(retry_after) 61 | 62 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 63 | 64 | assert {:deny, _retry_after} = 65 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 66 | end 67 | 68 | test "race condition", %{table: table} do 69 | key = "key" 70 | leak_rate = 1 71 | capacity = 4 72 | 73 | # Start two processes 74 | spawn_link(fn -> 75 | for _ <- 1..2 do 76 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 77 | end 78 | end) 79 | 80 | spawn_link(fn -> 81 | for _ <- 1..2 do 82 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 83 | end 84 | end) 85 | 86 | # Wait for both processes to finish 87 | Process.sleep(100) 88 | 89 | # Check the final count 90 | assert LeakyBucket.get(table, key) == 4 91 | end 92 | end 93 | 94 | describe "get" do 95 | test "get returns current bucket level", %{table: table} do 96 | key = "key" 97 | leak_rate = 1 98 | capacity = 10 99 | 100 | assert LeakyBucket.get(table, key) == 0 101 | 102 | assert {:allow, _} = LeakyBucket.hit(table, key, leak_rate, capacity, 4) 103 | assert LeakyBucket.get(table, key) == 4 104 | 105 | assert {:allow, _} = LeakyBucket.hit(table, key, leak_rate, capacity, 3) 106 | assert LeakyBucket.get(table, key) == 7 107 | end 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /test/hammer/atomic/token_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.Atomic.TokenBucketTest do 2 | use ExUnit.Case, async: true 3 | alias Hammer.Atomic.TokenBucket 4 | 5 | defmodule RateLimitTokenBucket do 6 | use Hammer, backend: :atomic, algorithm: :token_bucket 7 | end 8 | 9 | setup do 10 | table = :ets.new(:hammer_atomic_token_bucket_test, TokenBucket.ets_opts()) 11 | {:ok, table: table} 12 | end 13 | 14 | describe "hit/get" do 15 | test "returns {:allow, 9} tuple on first access", %{table: table} do 16 | key = "key" 17 | refill_rate = 10 18 | capacity = 10 19 | 20 | assert {:allow, 9} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 21 | end 22 | 23 | test "returns {:allow, 6} tuple on in-limit checks", %{table: table} do 24 | key = "key" 25 | refill_rate = 2 26 | capacity = 10 27 | 28 | assert {:allow, 9} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 29 | assert {:allow, 8} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 30 | assert {:allow, 7} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 31 | assert {:allow, 6} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 32 | end 33 | 34 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 35 | key = "key" 36 | refill_rate = 1 37 | capacity = 2 38 | 39 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 40 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 41 | 42 | assert {:deny, 1000} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 43 | 44 | assert {:deny, _retry_after} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 45 | end 46 | 47 | test "returns expected tuples after waiting for the next window", %{table: table} do 48 | key = "key" 49 | refill_rate = 1 50 | capacity = 2 51 | 52 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 53 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 54 | 55 | assert {:deny, retry_after} = 56 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 57 | 58 | :timer.sleep(retry_after) 59 | 60 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 61 | 62 | assert {:deny, _retry_after} = 63 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 64 | end 65 | 66 | test "handles costs greater than 1 correctly", %{table: table} do 67 | key = "key" 68 | refill_rate = 2 69 | capacity = 10 70 | 71 | # First hit with cost of 3 should succeed and leave 7 tokens 72 | assert {:allow, 7} = TokenBucket.hit(table, key, refill_rate, capacity, 3) 73 | 74 | # Second hit with cost of 4 should succeed and leave 3 tokens 75 | assert {:allow, 3} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 76 | 77 | # Third hit with cost of 4 should be denied (only 3 tokens left) 78 | assert {:deny, _retry_after} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 79 | 80 | # Small cost of 2 should still succeed since we have 3 tokens 81 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 2) 82 | end 83 | 84 | test "race condition", %{table: table} do 85 | key = "key" 86 | refill_rate = 1 87 | capacity = 4 88 | 89 | # Start two processes 90 | spawn_link(fn -> 91 | for _ <- 1..2 do 92 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 93 | end 94 | end) 95 | 96 | spawn_link(fn -> 97 | for _ <- 1..2 do 98 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 99 | end 100 | end) 101 | 102 | # Wait for both processes to finish 103 | Process.sleep(100) 104 | 105 | # Check the final count 106 | assert TokenBucket.get(table, key) == 0 107 | end 108 | end 109 | 110 | describe "get" do 111 | test "get returns current bucket level", %{table: table} do 112 | key = "key" 113 | refill_rate = 1 114 | capacity = 10 115 | 116 | assert TokenBucket.get(table, key) == 0 117 | 118 | assert {:allow, _} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 119 | assert TokenBucket.get(table, key) == 6 120 | 121 | assert {:allow, _} = TokenBucket.hit(table, key, refill_rate, capacity, 3) 122 | assert TokenBucket.get(table, key) == 3 123 | end 124 | end 125 | end 126 | -------------------------------------------------------------------------------- /test/hammer/atomic_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.AtomicTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule RateAtomicLimit do 5 | use Hammer, backend: :atomic 6 | end 7 | 8 | setup do 9 | start_supervised!(RateAtomicLimit) 10 | :ok 11 | end 12 | 13 | describe "hit through actual RateAtomicLimit implementation" do 14 | test "returns {:allow, 4} tuple on in-limit checks" do 15 | key = "key" 16 | scale = :timer.minutes(10) 17 | limit = 10 18 | 19 | assert {:allow, 1} = RateAtomicLimit.hit(key, scale, limit) 20 | assert {:allow, 2} = RateAtomicLimit.hit(key, scale, limit) 21 | assert {:allow, 3} = RateAtomicLimit.hit(key, scale, limit) 22 | assert {:allow, 4} = RateAtomicLimit.hit(key, scale, limit) 23 | end 24 | 25 | test "returns expected tuples on mix of in-limit and out-of-limit checks" do 26 | key = "key" 27 | scale = :timer.minutes(10) 28 | limit = 2 29 | 30 | assert {:allow, 1} = RateAtomicLimit.hit(key, scale, limit) 31 | assert {:allow, 2} = RateAtomicLimit.hit(key, scale, limit) 32 | assert {:deny, _retry_after} = RateAtomicLimit.hit(key, scale, limit) 33 | assert {:deny, _retry_after} = RateAtomicLimit.hit(key, scale, limit) 34 | end 35 | 36 | test "with custom increment" do 37 | key = "cost-key" 38 | scale = :timer.seconds(1) 39 | limit = 10 40 | 41 | assert {:allow, 4} = RateAtomicLimit.hit(key, scale, limit, 4) 42 | assert {:allow, 9} = RateAtomicLimit.hit(key, scale, limit, 5) 43 | assert {:deny, _retry_after} = RateAtomicLimit.hit(key, scale, limit, 3) 44 | end 45 | end 46 | 47 | describe "inc through actual RateAtomicLimit implementation" do 48 | test "increments the count for the given key and scale" do 49 | key = "key" 50 | scale = :timer.seconds(10) 51 | 52 | assert RateAtomicLimit.get(key, scale) == 0 53 | 54 | assert RateAtomicLimit.inc(key, scale) == 1 55 | assert RateAtomicLimit.get(key, scale) == 1 56 | 57 | assert RateAtomicLimit.inc(key, scale) == 2 58 | assert RateAtomicLimit.get(key, scale) == 2 59 | 60 | assert RateAtomicLimit.inc(key, scale) == 3 61 | assert RateAtomicLimit.get(key, scale) == 3 62 | 63 | assert RateAtomicLimit.inc(key, scale) == 4 64 | assert RateAtomicLimit.get(key, scale) == 4 65 | end 66 | end 67 | 68 | describe "get/set through actual RateAtomicLimit implementation" do 69 | test "get returns the count set for the given key and scale" do 70 | key = "key" 71 | scale = :timer.seconds(10) 72 | count = 10 73 | 74 | assert RateAtomicLimit.get(key, scale) == 0 75 | assert RateAtomicLimit.set(key, scale, count) == count 76 | assert RateAtomicLimit.get(key, scale) == count 77 | end 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /test/hammer/ets/clean_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.CleanTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule RateLimit do 5 | use Hammer, backend: :ets 6 | end 7 | 8 | defmodule RateLimitSlidingWindow do 9 | use Hammer, backend: :ets, algorithm: :sliding_window 10 | end 11 | 12 | defmodule RateLimitLeakyBucket do 13 | use Hammer, backend: :ets, algorithm: :leaky_bucket 14 | end 15 | 16 | defmodule RateLimitTokenBucket do 17 | use Hammer, backend: :ets, algorithm: :token_bucket 18 | end 19 | 20 | test "cleaning works for fix window/default ets backend" do 21 | start_supervised!({RateLimit, clean_period: 100}) 22 | 23 | key = "key" 24 | scale = 100 25 | count = 10 26 | 27 | assert {:allow, 1} = RateLimit.hit(key, scale, count) 28 | 29 | assert [_] = :ets.tab2list(RateLimit) 30 | 31 | :timer.sleep(150) 32 | 33 | assert :ets.tab2list(RateLimit) == [] 34 | end 35 | 36 | test "cleaning works for sliding window" do 37 | start_supervised!({RateLimitSlidingWindow, clean_period: 100}) 38 | 39 | key = "key" 40 | scale = 100 41 | count = 10 42 | 43 | assert {:allow, 1} = RateLimitSlidingWindow.hit(key, scale, count) 44 | 45 | assert [_] = :ets.tab2list(RateLimitSlidingWindow) 46 | 47 | :timer.sleep(150) 48 | 49 | assert :ets.tab2list(RateLimitSlidingWindow) == [] 50 | end 51 | 52 | test "cleaning works for token bucket" do 53 | start_supervised!({RateLimitTokenBucket, clean_period: 100}) 54 | 55 | key = "key" 56 | refill_rate = 1 57 | capacity = 10 58 | 59 | assert {:allow, 9} = RateLimitTokenBucket.hit(key, refill_rate, capacity, 1) 60 | 61 | assert [_] = :ets.tab2list(RateLimitTokenBucket) 62 | 63 | :timer.sleep(150) 64 | 65 | assert :ets.tab2list(RateLimitTokenBucket) == [] 66 | end 67 | 68 | test "cleaning works for leaky bucket" do 69 | start_supervised!({RateLimitLeakyBucket, clean_period: 100}) 70 | 71 | key = "key" 72 | leak_rate = 1 73 | capacity = 10 74 | 75 | assert {:allow, 1} = RateLimitLeakyBucket.hit(key, leak_rate, capacity, 1) 76 | 77 | assert [_] = :ets.tab2list(RateLimitLeakyBucket) 78 | 79 | :timer.sleep(150) 80 | 81 | assert :ets.tab2list(RateLimitLeakyBucket) == [] 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /test/hammer/ets/fix_window_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.FixWindowTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Hammer.ETS.FixWindow 5 | 6 | setup do 7 | table = :ets.new(:hammer_fix_window_test, FixWindow.ets_opts()) 8 | {:ok, table: table} 9 | end 10 | 11 | describe "hit" do 12 | test "returns {:allow, 1} tuple on first access", %{table: table} do 13 | key = "key" 14 | scale = :timer.seconds(10) 15 | limit = 10 16 | 17 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 18 | end 19 | 20 | test "returns {:allow, 4} tuple on in-limit checks", %{table: table} do 21 | key = "key" 22 | scale = :timer.minutes(10) 23 | limit = 10 24 | 25 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 26 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 27 | assert {:allow, 3} = FixWindow.hit(table, key, scale, limit, 1) 28 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 1) 29 | end 30 | 31 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 32 | key = "key" 33 | scale = :timer.minutes(10) 34 | limit = 2 35 | 36 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 37 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 38 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 39 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 40 | end 41 | 42 | test "returns expected tuples after waiting for the next window", %{table: table} do 43 | key = "key" 44 | scale = 100 45 | limit = 2 46 | 47 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 48 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 49 | assert {:deny, retry_after} = FixWindow.hit(table, key, scale, limit, 1) 50 | 51 | :timer.sleep(retry_after) 52 | 53 | assert {:allow, 1} = FixWindow.hit(table, key, scale, limit, 1) 54 | assert {:allow, 2} = FixWindow.hit(table, key, scale, limit, 1) 55 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 1) 56 | end 57 | 58 | test "with custom increment", %{table: table} do 59 | key = "cost-key" 60 | scale = :timer.seconds(1) 61 | limit = 10 62 | 63 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 4) 64 | assert {:allow, 9} = FixWindow.hit(table, key, scale, limit, 5) 65 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 3) 66 | end 67 | 68 | test "mixing default and custom increment", %{table: table} do 69 | key = "cost-key" 70 | scale = :timer.seconds(1) 71 | limit = 10 72 | 73 | assert {:allow, 3} = FixWindow.hit(table, key, scale, limit, 3) 74 | assert {:allow, 4} = FixWindow.hit(table, key, scale, limit, 1) 75 | assert {:allow, 5} = FixWindow.hit(table, key, scale, limit, 1) 76 | assert {:allow, 9} = FixWindow.hit(table, key, scale, limit, 4) 77 | assert {:allow, 10} = FixWindow.hit(table, key, scale, limit, 1) 78 | assert {:deny, _retry_after} = FixWindow.hit(table, key, scale, limit, 2) 79 | end 80 | end 81 | 82 | describe "inc" do 83 | test "increments the count for the given key and scale", %{table: table} do 84 | key = "key" 85 | scale = :timer.seconds(10) 86 | 87 | assert FixWindow.get(table, key, scale) == 0 88 | 89 | assert FixWindow.inc(table, key, scale, 1) == 1 90 | assert FixWindow.get(table, key, scale) == 1 91 | 92 | assert FixWindow.inc(table, key, scale, 1) == 2 93 | assert FixWindow.get(table, key, scale) == 2 94 | 95 | assert FixWindow.inc(table, key, scale, 1) == 3 96 | assert FixWindow.get(table, key, scale) == 3 97 | 98 | assert FixWindow.inc(table, key, scale, 1) == 4 99 | assert FixWindow.get(table, key, scale) == 4 100 | end 101 | end 102 | 103 | describe "get/set" do 104 | test "get returns the count set for the given key and scale", %{table: table} do 105 | key = "key" 106 | scale = :timer.seconds(10) 107 | count = 10 108 | 109 | assert FixWindow.get(table, key, scale) == 0 110 | assert FixWindow.set(table, key, scale, count) == count 111 | assert FixWindow.get(table, key, scale) == count 112 | end 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /test/hammer/ets/leaky_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.LeakyBucketTest do 2 | use ExUnit.Case, async: true 3 | alias Hammer.ETS.LeakyBucket 4 | 5 | defmodule RateLimitLeakyBucket do 6 | use Hammer, backend: :ets, algorithm: :leaky_bucket 7 | end 8 | 9 | setup do 10 | table = :ets.new(:hammer_leaky_bucket_test, LeakyBucket.ets_opts()) 11 | {:ok, table: table} 12 | end 13 | 14 | describe "hit/get" do 15 | test "returns {:allow, 1} tuple on first access", %{table: table} do 16 | key = "key" 17 | leak_rate = 10 18 | capacity = 10 19 | 20 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 21 | end 22 | 23 | test "returns {:allow, 4} tuple on in-limit checks", %{table: table} do 24 | key = "key" 25 | leak_rate = 2 26 | capacity = 10 27 | 28 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 29 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 30 | assert {:allow, 3} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 31 | assert {:allow, 4} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 32 | end 33 | 34 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 35 | key = "key" 36 | leak_rate = 1 37 | capacity = 2 38 | 39 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 40 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 41 | 42 | assert {:deny, 1000} = 43 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 44 | 45 | assert {:deny, _retry_after} = 46 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 47 | end 48 | 49 | test "returns expected tuples after waiting for the next window", %{table: table} do 50 | key = "key" 51 | leak_rate = 1 52 | capacity = 2 53 | 54 | assert {:allow, 1} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 55 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 56 | 57 | assert {:deny, retry_after} = 58 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 59 | 60 | :timer.sleep(retry_after) 61 | 62 | assert {:allow, 2} = LeakyBucket.hit(table, key, leak_rate, capacity, 1) 63 | 64 | assert {:deny, _retry_after} = 65 | LeakyBucket.hit(table, key, leak_rate, capacity, 1) 66 | end 67 | end 68 | 69 | describe "get" do 70 | test "get returns current bucket level", %{table: table} do 71 | key = "key" 72 | leak_rate = 1 73 | capacity = 10 74 | 75 | assert LeakyBucket.get(table, key) == 0 76 | 77 | assert {:allow, _} = LeakyBucket.hit(table, key, leak_rate, capacity, 4) 78 | assert LeakyBucket.get(table, key) == 4 79 | 80 | assert {:allow, _} = LeakyBucket.hit(table, key, leak_rate, capacity, 3) 81 | assert LeakyBucket.get(table, key) == 7 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /test/hammer/ets/sliding_window_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.SlidingWindowTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Hammer.ETS.SlidingWindow 5 | 6 | setup do 7 | table = :ets.new(:hammer_sliding_window_test, SlidingWindow.ets_opts()) 8 | {:ok, table: table} 9 | end 10 | 11 | describe "hit/get" do 12 | test "returns {:allow, 1} tuple on first access", %{table: table} do 13 | key = "key" 14 | scale = :timer.seconds(10) 15 | limit = 10 16 | 17 | assert {:allow, 1} = SlidingWindow.hit(table, key, scale, limit) 18 | end 19 | 20 | test "returns {:allow, 4} tuple on in-limit checks", %{table: table} do 21 | key = "key" 22 | scale = :timer.minutes(10) 23 | limit = 10 24 | 25 | assert {:allow, 1} = SlidingWindow.hit(table, key, scale, limit) 26 | assert {:allow, 2} = SlidingWindow.hit(table, key, scale, limit) 27 | assert {:allow, 3} = SlidingWindow.hit(table, key, scale, limit) 28 | assert {:allow, 4} = SlidingWindow.hit(table, key, scale, limit) 29 | end 30 | 31 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 32 | key = "key" 33 | scale = :timer.minutes(10) 34 | limit = 2 35 | 36 | assert {:allow, 1} = SlidingWindow.hit(table, key, scale, limit) 37 | assert {:allow, 2} = SlidingWindow.hit(table, key, scale, limit) 38 | assert {:deny, _retry_after} = SlidingWindow.hit(table, key, scale, limit) 39 | assert {:deny, _retry_after} = SlidingWindow.hit(table, key, scale, limit) 40 | end 41 | 42 | test "returns expected tuples after waiting for the next window", %{table: table} do 43 | key = "key" 44 | scale = 100 45 | limit = 2 46 | 47 | assert {:allow, 1} = SlidingWindow.hit(table, key, scale, limit) 48 | assert {:allow, 2} = SlidingWindow.hit(table, key, scale, limit) 49 | assert {:deny, retry_after} = SlidingWindow.hit(table, key, scale, limit) 50 | 51 | :timer.sleep(retry_after) 52 | 53 | assert {:allow, 1} = SlidingWindow.hit(table, key, scale, limit) 54 | assert {:allow, 2} = SlidingWindow.hit(table, key, scale, limit) 55 | assert {:deny, _retry_after} = SlidingWindow.hit(table, key, scale, limit) 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /test/hammer/ets/token_bucket_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETS.TokenBucketTest do 2 | use ExUnit.Case, async: true 3 | alias Hammer.ETS.TokenBucket 4 | 5 | defmodule RateLimitTokenBucket do 6 | use Hammer, backend: :ets, algorithm: :token_bucket 7 | end 8 | 9 | setup do 10 | table = :ets.new(:hammer_token_bucket_test, TokenBucket.ets_opts()) 11 | {:ok, table: table} 12 | end 13 | 14 | describe "hit/get" do 15 | test "returns {:allow, 9} tuple on first access", %{table: table} do 16 | key = "key" 17 | refill_rate = 10 18 | capacity = 10 19 | 20 | assert {:allow, 9} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 21 | end 22 | 23 | test "returns {:allow, 6} tuple on in-limit checks", %{table: table} do 24 | key = "key" 25 | refill_rate = 2 26 | capacity = 10 27 | 28 | assert {:allow, 9} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 29 | assert {:allow, 8} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 30 | assert {:allow, 7} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 31 | assert {:allow, 6} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 32 | end 33 | 34 | test "returns expected tuples on mix of in-limit and out-of-limit checks", %{table: table} do 35 | key = "key" 36 | refill_rate = 1 37 | capacity = 2 38 | 39 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 40 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 41 | 42 | assert {:deny, 1000} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 43 | 44 | assert {:deny, _retry_after} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 45 | end 46 | 47 | test "returns expected tuples after waiting for the next window", %{table: table} do 48 | key = "key" 49 | refill_rate = 1 50 | capacity = 2 51 | 52 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 53 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 54 | 55 | assert {:deny, retry_after} = 56 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 57 | 58 | :timer.sleep(retry_after) 59 | 60 | assert {:allow, 0} = TokenBucket.hit(table, key, refill_rate, capacity, 1) 61 | 62 | assert {:deny, _retry_after} = 63 | TokenBucket.hit(table, key, refill_rate, capacity, 1) 64 | end 65 | 66 | test "handles costs greater than 1 correctly", %{table: table} do 67 | key = "key" 68 | refill_rate = 2 69 | capacity = 10 70 | 71 | # First hit with cost of 3 should succeed and leave 7 tokens 72 | assert {:allow, 7} = TokenBucket.hit(table, key, refill_rate, capacity, 3) 73 | 74 | # Second hit with cost of 4 should succeed and leave 3 tokens 75 | assert {:allow, 3} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 76 | 77 | # Third hit with cost of 4 should be denied (only 3 tokens left) 78 | assert {:deny, _retry_after} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 79 | 80 | # Small cost of 2 should still succeed since we have 3 tokens 81 | assert {:allow, 1} = TokenBucket.hit(table, key, refill_rate, capacity, 2) 82 | end 83 | end 84 | 85 | describe "get" do 86 | test "get returns current bucket level", %{table: table} do 87 | key = "key" 88 | refill_rate = 1 89 | capacity = 10 90 | 91 | assert TokenBucket.get(table, key) == 0 92 | 93 | assert {:allow, _} = TokenBucket.hit(table, key, refill_rate, capacity, 4) 94 | assert TokenBucket.get(table, key) == 6 95 | 96 | assert {:allow, _} = TokenBucket.hit(table, key, refill_rate, capacity, 3) 97 | assert TokenBucket.get(table, key) == 3 98 | end 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /test/hammer/ets_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Hammer.ETSTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule RateLimit do 5 | use Hammer, backend: :ets 6 | end 7 | 8 | setup do 9 | start_supervised!(RateLimit) 10 | :ok 11 | end 12 | 13 | describe "hit through actual RateLimit implementation" do 14 | test "returns {:allow, 4} tuple on in-limit checks" do 15 | key = "key" 16 | scale = :timer.minutes(10) 17 | limit = 10 18 | 19 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 20 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 21 | assert {:allow, 3} = RateLimit.hit(key, scale, limit) 22 | assert {:allow, 4} = RateLimit.hit(key, scale, limit) 23 | end 24 | 25 | test "returns expected tuples on mix of in-limit and out-of-limit checks" do 26 | key = "key" 27 | scale = :timer.minutes(10) 28 | limit = 2 29 | 30 | assert {:allow, 1} = RateLimit.hit(key, scale, limit) 31 | assert {:allow, 2} = RateLimit.hit(key, scale, limit) 32 | assert {:deny, _retry_after} = RateLimit.hit(key, scale, limit) 33 | assert {:deny, _retry_after} = RateLimit.hit(key, scale, limit) 34 | end 35 | 36 | test "with custom increment" do 37 | key = "cost-key" 38 | scale = :timer.seconds(1) 39 | limit = 10 40 | 41 | assert {:allow, 4} = RateLimit.hit(key, scale, limit, 4) 42 | assert {:allow, 9} = RateLimit.hit(key, scale, limit, 5) 43 | assert {:deny, _retry_after} = RateLimit.hit(key, scale, limit, 3) 44 | end 45 | end 46 | 47 | describe "inc through actual RateLimit implementation" do 48 | test "increments the count for the given key and scale" do 49 | key = "key" 50 | scale = :timer.seconds(10) 51 | 52 | assert RateLimit.get(key, scale) == 0 53 | 54 | assert RateLimit.inc(key, scale) == 1 55 | assert RateLimit.get(key, scale) == 1 56 | 57 | assert RateLimit.inc(key, scale) == 2 58 | assert RateLimit.get(key, scale) == 2 59 | 60 | assert RateLimit.inc(key, scale) == 3 61 | assert RateLimit.get(key, scale) == 3 62 | 63 | assert RateLimit.inc(key, scale) == 4 64 | assert RateLimit.get(key, scale) == 4 65 | end 66 | end 67 | 68 | describe "get/set through actual RateLimit implementation" do 69 | test "get returns the count set for the given key and scale" do 70 | key = "key" 71 | scale = :timer.seconds(10) 72 | count = 10 73 | 74 | assert RateLimit.get(key, scale) == 0 75 | assert RateLimit.set(key, scale, count) == count 76 | assert RateLimit.get(key, scale) == count 77 | end 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | --------------------------------------------------------------------------------