├── .credo.exs ├── .formatter.exs ├── .gitattributes ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── benchmarks └── main.exs ├── coveralls.json ├── docs ├── extensions │ ├── custom-commands.md │ └── execution-lifecycle.md ├── general │ ├── batching-actions.md │ ├── local-persistence.md │ └── streaming-records.md ├── management │ ├── expiring-records.md │ ├── limiting-caches.md │ └── stats-gathering.md ├── migrations │ ├── migrating-to-v2.md │ ├── migrating-to-v3.md │ └── migrating-to-v4.md ├── overview.md ├── routing │ ├── cache-routers.md │ └── distributed-caches.md └── warming │ ├── proactive-warming.md │ └── reactive-warming.md ├── lib ├── cachex.ex └── cachex │ ├── actions.ex │ ├── actions │ ├── clear.ex │ ├── del.ex │ ├── empty.ex │ ├── exists.ex │ ├── expire.ex │ ├── export.ex │ ├── fetch.ex │ ├── get.ex │ ├── get_and_update.ex │ ├── import.ex │ ├── incr.ex │ ├── inspect.ex │ ├── invoke.ex │ ├── keys.ex │ ├── prune.ex │ ├── purge.ex │ ├── put.ex │ ├── put_many.ex │ ├── refresh.ex │ ├── reset.ex │ ├── restore.ex │ ├── save.ex │ ├── size.ex │ ├── stats.ex │ ├── stream.ex │ ├── take.ex │ ├── touch.ex │ ├── transaction.ex │ ├── ttl.ex │ ├── update.ex │ └── warm.ex │ ├── application.ex │ ├── error.ex │ ├── hook.ex │ ├── limit │ ├── accessed.ex │ ├── evented.ex │ └── scheduled.ex │ ├── options.ex │ ├── provision.ex │ ├── query.ex │ ├── router.ex │ ├── router │ ├── jump.ex │ ├── local.ex │ ├── mod.ex │ ├── ring.ex │ └── ring │ │ └── monitor.ex │ ├── services.ex │ ├── services │ ├── courier.ex │ ├── incubator.ex │ ├── informant.ex │ ├── janitor.ex │ ├── locksmith.ex │ ├── locksmith │ │ └── queue.ex │ ├── overseer.ex │ └── steward.ex │ ├── spec.ex │ ├── spec │ └── validator.ex │ ├── stats.ex │ └── warmer.ex ├── mix.exs ├── scripts └── overview.exs └── test ├── cachex ├── actions │ ├── clear_test.exs │ ├── decr_test.exs │ ├── del_test.exs │ ├── empty_test.exs │ ├── execute_test.exs │ ├── exists_test.exs │ ├── expire_at_test.exs │ ├── expire_test.exs │ ├── export_test.exs │ ├── fetch_test.exs │ ├── get_and_update_test.exs │ ├── get_test.exs │ ├── import_test.exs │ ├── incr_test.exs │ ├── inspect_test.exs │ ├── invoke_test.exs │ ├── keys_test.exs │ ├── persist_test.exs │ ├── prune_test.exs │ ├── purge_test.exs │ ├── put_many_test.exs │ ├── put_test.exs │ ├── refresh_test.exs │ ├── reset_test.exs │ ├── restore_test.exs │ ├── save_test.exs │ ├── size_test.exs │ ├── stats_test.exs │ ├── stream_test.exs │ ├── take_test.exs │ ├── touch_test.exs │ ├── transaction_test.exs │ ├── ttl_test.exs │ ├── update_test.exs │ └── warm_test.exs ├── actions_test.exs ├── error_test.exs ├── hook_test.exs ├── limit │ ├── accessed_test.exs │ ├── evented_test.exs │ └── scheduled_test.exs ├── options_test.exs ├── query_test.exs ├── router │ ├── jump_test.exs │ ├── local_test.exs │ ├── mod_test.exs │ └── ring_test.exs ├── router_test.exs ├── services │ ├── courier_test.exs │ ├── informant_test.exs │ ├── janitor_test.exs │ ├── locksmith_test.exs │ ├── overseer_test.exs │ └── steward_test.exs ├── services_test.exs ├── spec │ └── validator_test.exs ├── spec_test.exs ├── stats_test.exs ├── test │ ├── case.ex │ ├── hook │ │ ├── execute.ex │ │ └── forward.ex │ └── utils.ex └── warmer_test.exs ├── cachex_test.exs └── test_helper.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | # set of input files and directories to apply formatting to 3 | inputs: [ 4 | "{mix,.credo,.formatter}.exs", 5 | "{benchmarks,lib,scripts,test}/**/*.{ex,exs}" 6 | ], 7 | 8 | # match the Credo linter 9 | line_length: 100 10 | ] 11 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | name: Elixir ${{ matrix.elixir }} 12 | runs-on: ubuntu-latest 13 | container: 14 | image: elixir:${{ matrix.elixir }} 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | elixir: 19 | - '1.18' 20 | - '1.17' 21 | - '1.16' 22 | - '1.15' 23 | - '1.14' 24 | - '1.13' 25 | - '1.12' 26 | - '1.11' 27 | - '1.10' 28 | - '1.9' 29 | - '1.8' 30 | - '1.7' 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | 35 | - name: Setup Environment 36 | run: | 37 | epmd -daemon 38 | mix local.hex --force 39 | mix local.rebar --force 40 | mix deps.get 41 | 42 | - name: Run Tests 43 | run: mix test --trace 44 | 45 | bench: 46 | if: github.ref == 'refs/heads/main' 47 | name: Benchmark 48 | runs-on: ubuntu-latest 49 | container: 50 | image: elixir:1.18 51 | steps: 52 | - uses: actions/checkout@v4 53 | 54 | - name: Setup Environment 55 | run: | 56 | epmd -daemon 57 | mix local.hex --force 58 | mix local.rebar --force 59 | mix deps.get 60 | 61 | - name: Run Benchmarks 62 | run: mix bench 63 | 64 | coverage: 65 | name: Coverage 66 | runs-on: ubuntu-latest 67 | container: 68 | image: elixir:1.18 69 | env: 70 | MIX_ENV: cover 71 | steps: 72 | - uses: actions/checkout@v4 73 | 74 | - name: Setup Environment 75 | run: | 76 | epmd -daemon 77 | mix local.hex --force 78 | mix local.rebar --force 79 | mix deps.get 80 | 81 | - name: Generate Coverage 82 | run: mix coveralls.github 83 | env: 84 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 85 | 86 | docs: 87 | name: Documentation 88 | runs-on: ubuntu-latest 89 | container: 90 | image: elixir:1.18 91 | steps: 92 | - uses: actions/checkout@v4 93 | 94 | - name: Setup Environment 95 | run: | 96 | epmd -daemon 97 | mix local.hex --force 98 | mix local.rebar --force 99 | mix deps.get 100 | 101 | - name: Build Documentation 102 | run: | 103 | mix docs 2>&1 | tee output.txt 104 | ! grep warning output.txt 105 | 106 | mv docs/overview.md docs/overview.md.1 107 | mix run scripts/overview.exs 108 | diff docs/overview.md docs/overview.md.1 > /dev/null 2>&1 109 | 110 | lint: 111 | name: Linting 112 | runs-on: ubuntu-latest 113 | container: 114 | image: elixir:1.18 115 | steps: 116 | - uses: actions/checkout@v4 117 | 118 | - name: Setup Environment 119 | run: | 120 | epmd -daemon 121 | mix local.hex --force 122 | mix local.rebar --force 123 | mix deps.get 124 | 125 | - name: Validate Formatting 126 | run: mix format --check-formatted 127 | 128 | - name: Validate Linting 129 | run: mix credo --all --format=oneline 130 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /_build 2 | /_native 3 | /benchmarks 4 | !/benchmarks/main.exs 5 | /cover 6 | /deps 7 | /diff 8 | /doc 9 | /patch 10 | .elixir_ls 11 | .tool-versions 12 | erl_crash.dump 13 | mix.lock 14 | *.ez 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Isaac Whitfield 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /benchmarks/main.exs: -------------------------------------------------------------------------------- 1 | import Cachex.Spec 2 | 3 | one_hour = :timer.hours(1) 4 | tomorrow = now() + 1000 * 60 * 60 * 24 5 | 6 | Application.ensure_all_started(:cachex) 7 | 8 | with_env = fn var, caches -> 9 | if System.get_env(var) in ["true", "1"] do 10 | caches 11 | else 12 | [] 13 | end 14 | end 15 | 16 | caches = 17 | List.flatten([ 18 | [ 19 | {:name, []}, 20 | {:state, [inspect: true]} 21 | ], 22 | with_env.("CACHEX_BENCH_COMPRESS", [ 23 | {:name_compressed, [compressed: true]}, 24 | {:state_compressed, [inspect: true, compressed: true]} 25 | ]), 26 | with_env.("CACHEX_BENCH_TRANSACTIONS", [ 27 | {:name_transactional, [transactionl: true]}, 28 | {:state_transactional, [inspect: true, transactionl: true]} 29 | ]) 30 | ]) 31 | 32 | inputs = 33 | for {name, options} <- caches do 34 | Cachex.start(name, options) 35 | 36 | Cachex.put(name, "decr_test", 0) 37 | Cachex.put(name, "expire_at_test", "expire_at_value") 38 | Cachex.put(name, "expire_test", "expire_value") 39 | Cachex.put(name, "fetch_test", "fetch_value") 40 | Cachex.put(name, "get_test", "get_value") 41 | Cachex.put(name, "gad_test", "gad_value") 42 | Cachex.put(name, "incr_test", 0) 43 | Cachex.put(name, "persist_test", 0) 44 | Cachex.put(name, "refresh_test", "refresh_value", expire: one_hour) 45 | Cachex.put(name, "touch_test", "touch_value", expire: one_hour) 46 | Cachex.put(name, "ttl_test", "ttl_value", expire: one_hour) 47 | Cachex.put(name, "update_test", "update_value") 48 | 49 | if Keyword.get(options, :inspect) do 50 | {"#{name}", Cachex.inspect!(name, :cache)} 51 | else 52 | {"#{name}", name} 53 | end 54 | end 55 | 56 | Benchee.run( 57 | %{ 58 | "count" => fn cache -> 59 | Cachex.size(cache, expired: false) 60 | end, 61 | "decr" => fn cache -> 62 | Cachex.decr(cache, "decr_test") 63 | end, 64 | "del" => fn cache -> 65 | Cachex.del(cache, "del_test") 66 | end, 67 | "empty?" => fn cache -> 68 | Cachex.empty?(cache) 69 | end, 70 | "exists?" => fn cache -> 71 | Cachex.exists?(cache, "exists_test") 72 | end, 73 | "expire" => fn cache -> 74 | Cachex.expire(cache, "expire_test", one_hour) 75 | end, 76 | "expire_at" => fn cache -> 77 | Cachex.expire_at(cache, "expire_at_test", tomorrow) 78 | end, 79 | "fetch" => fn cache -> 80 | Cachex.fetch(cache, "fetch_test", & &1) 81 | end, 82 | "get" => fn cache -> 83 | Cachex.get(cache, "get_test") 84 | end, 85 | "get_and_update" => fn cache -> 86 | Cachex.get_and_update(cache, "gad_test", & &1) 87 | end, 88 | "incr" => fn cache -> 89 | Cachex.incr(cache, "incr_test") 90 | end, 91 | "keys" => fn cache -> 92 | Cachex.keys(cache) 93 | end, 94 | "persist" => fn cache -> 95 | Cachex.persist(cache, "persist_test") 96 | end, 97 | "purge" => fn cache -> 98 | Cachex.purge(cache) 99 | end, 100 | "put" => fn cache -> 101 | Cachex.put(cache, "put_test", "put_value") 102 | end, 103 | "put_many" => fn cache -> 104 | Cachex.put_many(cache, [{"put_test", "put_value"}]) 105 | end, 106 | "refresh" => fn cache -> 107 | Cachex.refresh(cache, "refresh_test") 108 | end, 109 | "size" => fn cache -> 110 | Cachex.size(cache) 111 | end, 112 | "stats" => fn cache -> 113 | Cachex.stats(cache) 114 | end, 115 | "stream" => fn cache -> 116 | Cachex.stream(cache) 117 | end, 118 | "take" => fn cache -> 119 | Cachex.take(cache, "take_test") 120 | end, 121 | "touch" => fn cache -> 122 | Cachex.touch(cache, "touch_test") 123 | end, 124 | "ttl" => fn cache -> 125 | Cachex.ttl(cache, "ttl_test") 126 | end, 127 | "update" => fn cache -> 128 | Cachex.update(cache, "update_test", "update_value") 129 | end 130 | }, 131 | formatters: [ 132 | { 133 | Benchee.Formatters.Console, 134 | [ 135 | comparison: false, 136 | extended_statistics: false 137 | ] 138 | } 139 | ], 140 | inputs: Map.new(inputs), 141 | print: [ 142 | fast_warning: false 143 | ] 144 | ) 145 | -------------------------------------------------------------------------------- /coveralls.json: -------------------------------------------------------------------------------- 1 | { 2 | "coverage_options": { 3 | "treat_no_relevant_lines_as_covered": true 4 | }, 5 | "skip_files": [ 6 | "lib/cachex/spec.ex", 7 | "mix.exs" 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /docs/general/local-persistence.md: -------------------------------------------------------------------------------- 1 | # Local Persistence 2 | 3 | Cachex ships with basic support for saving a cache to a local file using the [External Term Format](https://www.erlang.org/doc/apps/erts/erl_ext_dist). These files can then be used to seed data into a new instance of a cache to persist values between cache instances. As it stands all persistence must be handled manually via the Cachex API, although additional features may be added in future to add convenience around this. 4 | 5 | ## Writing to Disk 6 | 7 | To save a cache to a file on disk, you can use the `Cachex.save/3` function. This function will handle compression automatically and populate the path on disk with a file you can import later. It should be noted that the internal format of this file should not be relied upon. 8 | 9 | ```elixir 10 | { :ok, true } = Cachex.save(:my_cache, "/tmp/my_cache.dat") 11 | ``` 12 | 13 | The above demonstrates how simple it is to save your cache to a location on disk (in this case `/tmp/my_cache.dat`). Any options can be provided as a `Keyword` list as an optional third parameter. 14 | 15 | ## Loading from Disk 16 | 17 | To seed a cache from an existing file, you can use `Cachex.restore/3`. This will *merge* the file into your cache, overwriting and clashing keys and maintaining any keys which existed in the cache beforehand. If you want a direct match of the file inside your cache, you should use `Cachex.clear/2` before loading your data. 18 | 19 | ```elixir 20 | # optionally clean your cache first 21 | { :ok, _amt } = Cachex.clear(:my_cache) 22 | 23 | # then you can load the existing save into your cache 24 | { :ok, true } = Cachex.restore(:my_cache, "/tmp/my_cache.dat") 25 | ``` 26 | 27 | Please note that loading from an existing file will maintain all existing expirations, and records which have already expired will *not* be added to the cache table. This should not be surprising, but it is worth calling out. 28 | -------------------------------------------------------------------------------- /docs/general/streaming-records.md: -------------------------------------------------------------------------------- 1 | # Streaming Records 2 | 3 | Cachex provides the ability to create an Elixir `Stream` seeded by the contents of a cache, using an ETS table continuation and `Stream.resource/3`. This then allows the developer to use any of the `Enum` or `Stream` module functions against the entries in cache, which can be a very powerful and flexible tool. 4 | 5 | ## Basic Streams 6 | 7 | By default, `Cachex.stream/3` will return a `Stream` over all entries in a cache which are yet to expire (at the time of stream creation). These cache entries will be streamed as `Cachex.Spec.entry` records, so you can use pattern matching to pull any of the entry fields assuming you have `Cachex.Spec` imported: 8 | 9 | ```elixir 10 | # store some values in the cache 11 | Cachex.start(:my_cache) 12 | Cachex.put(:my_cache, "one", 1) 13 | Cachex.put(:my_cache, "two", 2) 14 | Cachex.put(:my_cache, "three", 3) 15 | 16 | # create our cache stream of all records 17 | { :ok, stream } = Cachex.stream(:my_cache) 18 | 19 | # sum up all the cache record values, which == 6 20 | Enum.reduce(stream, 0, fn entry(value: value), total -> 21 | total + value 22 | end) 23 | ``` 24 | 25 | ## Efficient Querying 26 | 27 | While the `Enum` module provides the ability to filter records easily, we can do better by pre-filtering using a match specification. Under the hood these matches are as defined by the Erlang documentation, and can be passed as the second argument to `Cachex.stream/3`. 28 | 29 | To avoid having to handle Cachex implementation details directly, the `Cachex.Query` module exposes a few functions designed to assist with creation of new queries. If we take our example above, we can use a query to sum only the odd numbers in the table without having to filter on the Elixir side: 30 | 31 | ```elixir 32 | # for matching 33 | import Cachex.Spec 34 | 35 | # store some values in the cache 36 | Cachex.start(:my_cache) 37 | Cachex.put(:my_cache, "one", 1) 38 | Cachex.put(:my_cache, "two", 2) 39 | Cachex.put(:my_cache, "three", 3) 40 | 41 | # generate our filter to find odd values 42 | filter = {:==, {:rem, :value, 2}, 1} 43 | 44 | # generate the query using the filter, only return `:value 45 | query = Cachex.Query.build(where: filter, output: :value) 46 | 47 | # == 4 48 | :my_cache 49 | |> Cachex.stream!(query) 50 | |> Enum.sum() 51 | ``` 52 | 53 | Rather than retrieve and handle the whole cache entry, here we're using `:output` to choose only the `:value` column from each entry. This lets us skip out on `Enum.reduce/3` and go directly to `Enum.sum/1`, much easier! 54 | 55 | It's important to note here is that cache queries do *not* distinguish between expired records in a cache; they match across all records within a cache. This is a change in Cachex v4.x to provide more flexibility in other areas of the Cachex library. If you want to filter out expired records, you can use the `Cachex.Query.expired/1` convenience function: 56 | 57 | ```elixir 58 | # store some values in the cache 59 | Cachex.start(:my_cache) 60 | Cachex.put(:my_cache, "one", 1) 61 | Cachex.put(:my_cache, "two", 2) 62 | Cachex.put(:my_cache, "three", 3) 63 | 64 | # generate our filter to find odd values 65 | filter = {:==, {:rem, :value, 2}, 1} 66 | 67 | # wrap our filter to filter expired values 68 | filter = Cachex.Query.expired(filter) 69 | 70 | # generate the query using the filter, only return `:value 71 | query = Cachex.Query.build(where: filter, output: :value) 72 | 73 | # == 4 74 | :my_cache 75 | |> Cachex.stream!(query) 76 | |> Enum.sum() 77 | ``` 78 | 79 | This function accepts a query guard and wraps it with clauses to filter out expired records. The returned guard can then be passed to `Cachex.Query.build/1` to return only the expired records which match your query. This is all fairly simple, but it's definitely something to keep in mind when working with `Cachex.Query`! 80 | -------------------------------------------------------------------------------- /docs/management/stats-gathering.md: -------------------------------------------------------------------------------- 1 | # Gathering Statistics 2 | 3 | Cachex includes basic support for tracking statistics in a cache, so you can look at things like throughput and hit/miss rates. This is provided via the `Cachex.Stats` hook implementation. 4 | 5 | ## Configuration 6 | 7 | As of Cachex v4.x this is configured as a hook during cache initialization: 8 | 9 | ```elixir 10 | # include records 11 | import Cachex.Spec 12 | 13 | # create a cache with stats 14 | Cachex.start(:my_cache, 15 | hooks: [ 16 | hook(module: Cachex.Stats) 17 | ] 18 | ) 19 | 20 | # insert 100 keys 21 | for i <- 1..100 do 22 | Cachex.put!(:my_cache, i, i) 23 | end 24 | 25 | # generate both a cache hit and a miss 26 | { :ok, 1 } = Cachex.get(:my_cache, 1) 27 | { :ok, nil } = Cachex.get(:my_cache, 101) 28 | 29 | # print stats 30 | :my_cache 31 | |> Cachex.stats!() 32 | |> IO.inspect 33 | ``` 34 | 35 | Running this will give you a map of various statistics based on the actions and operations taken by your cache. 36 | 37 | ## Example Statistics 38 | 39 | The statistics map returned by `Cachex.stats/2` should look something like the example below (at the time of writing): 40 | 41 | ```elixir 42 | %{ 43 | meta: %{creation_date: 1726777631670}, 44 | hits: 1, 45 | misses: 1, 46 | hit_rate: 50.0, 47 | miss_rate: 50.0, 48 | calls: %{get: 2, put: 100}, 49 | operations: 102, 50 | writes: 100 51 | } 52 | ``` 53 | 54 | As you can see, we see the breakdown of calls to the cache, the hit/miss rate, the total writes to a cache, etc. This is useful when gauging how much time your cache is actually saving and allows you to determine that everything is working as intended. 55 | 56 | It should be noted that the output format of `Cachex.stats/2` is *not* considered part of the Public API for backwards compatibility; the shape of this may change as and when it's necessary to do so. 57 | -------------------------------------------------------------------------------- /lib/cachex/actions.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions do 2 | @moduledoc false 3 | # Parent actions module for cache interactions. 4 | # 5 | # This module contains foundation actions required to implement cache actions, 6 | # such as typical CRUD style operations on cache entries. 7 | import Cachex.Spec 8 | 9 | # add some aliases 10 | alias Cachex.Services.Janitor 11 | 12 | ############## 13 | # Public API # 14 | ############## 15 | 16 | @doc """ 17 | Formats a fetched value into a Courier compatible tuple. 18 | 19 | If the value is tagged with `:commit`, `:ignore` or `:error`, 20 | it will be left alone; otherwise it will be wrapped and treated 21 | as a `:commit` Tuple. 22 | """ 23 | def format_fetch_value(value) do 24 | case value do 25 | {:error, _value} -> 26 | value 27 | 28 | {:commit, _value} -> 29 | value 30 | 31 | {:commit, _value, _options} -> 32 | value 33 | 34 | {:ignore, _value} -> 35 | value 36 | 37 | raw_value -> 38 | {:commit, raw_value} 39 | end 40 | end 41 | 42 | @doc """ 43 | Normalizes a commit formatted fetch value. 44 | 45 | This is simply compatibility for the options addition in v3.5, without 46 | breaking the previous versions of this library. 47 | """ 48 | def normalize_commit({:commit, value}), 49 | do: {:commit, value, []} 50 | 51 | def normalize_commit(value), 52 | do: value 53 | 54 | @doc """ 55 | Retrieves an entry from a cache. 56 | 57 | If the entry does not exist, a `nil` value will be returned. Likewise 58 | if the entry has expired, we lazily remove it (if enabled) and return 59 | a `nil` value. 60 | 61 | This will return an instance of an entry record as defined in the main 62 | `Cachex.Spec` module, rather than just the raw value. 63 | """ 64 | @spec read(Cachex.t(), any) :: Cachex.Spec.entry() | nil 65 | def read(cache(name: name) = cache, key) do 66 | case :ets.lookup(name, key) do 67 | [] -> 68 | nil 69 | 70 | [entry] -> 71 | case Janitor.expired?(cache, entry) do 72 | false -> 73 | entry 74 | 75 | true -> 76 | Cachex.del(cache, key, const(:purge_override)) 77 | nil 78 | end 79 | end 80 | end 81 | 82 | @doc """ 83 | Updates a collection of fields inside a cache entry. 84 | 85 | This is done in a single call due to the use of `:ets.update_element/3` which 86 | allows multiple changes in a group. This will return a boolean to represent 87 | whether the update was successful or not. 88 | 89 | Note that updates are atomic; either all updates will take place, or none will. 90 | """ 91 | @spec update(Cachex.t(), any, [tuple]) :: {:ok, boolean} 92 | def update(cache(name: name), key, changes), 93 | do: {:ok, :ets.update_element(name, key, changes)} 94 | 95 | @doc """ 96 | Writes a new entry into a cache. 97 | """ 98 | @spec write(Cachex.t(), Cachex.Spec.entries()) :: {:ok, boolean} 99 | def write(cache(name: name), entries), 100 | do: {:ok, :ets.insert(name, entries)} 101 | 102 | @doc """ 103 | Returns the operation used for a write based on a prior value. 104 | """ 105 | @spec write_op(atom) :: atom 106 | def write_op(nil), 107 | do: :put 108 | 109 | def write_op(_tag), 110 | do: :update 111 | end 112 | -------------------------------------------------------------------------------- /lib/cachex/actions/clear.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Clear do 2 | @moduledoc false 3 | # Command module to allow the clearing of a cache. 4 | # 5 | # Clearing a cache means removing all items from inside the cache, regardless 6 | # of whether they should have been evicted or not. 7 | alias Cachex.Actions.Size 8 | alias Cachex.Services.Locksmith 9 | 10 | # import needed macros 11 | import Cachex.Spec 12 | 13 | ############## 14 | # Public API # 15 | ############## 16 | 17 | @doc """ 18 | Clears all items in a cache. 19 | 20 | The number of items removed from the cache will be returned to the caller, to 21 | make it clear exactly how much work each call it doing. 22 | 23 | This action executes inside a transaction to ensure that there are no keys under 24 | a lock - thus ensuring consistency (any locks are executed sequentially). 25 | """ 26 | def execute(cache(name: name) = cache, _options) do 27 | Locksmith.transaction(cache, [], fn -> 28 | evicted = 29 | cache 30 | |> Size.execute([]) 31 | |> handle_evicted 32 | 33 | true = :ets.delete_all_objects(name) 34 | 35 | evicted 36 | end) 37 | end 38 | 39 | ############### 40 | # Private API # 41 | ############### 42 | 43 | # Handles the result of a size() call. 44 | # 45 | # This just verifies that we can safely return a size. Being realistic, 46 | # this will almost always hit the top case - the latter is just provided 47 | # in order to avoid crashing if something goes totally wrong. 48 | defp handle_evicted({:ok, _size} = res), 49 | do: res 50 | end 51 | -------------------------------------------------------------------------------- /lib/cachex/actions/del.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Del do 2 | @moduledoc false 3 | # Command module to allow removal of a cache entry. 4 | alias Cachex.Services.Locksmith 5 | 6 | # import required macros 7 | import Cachex.Spec 8 | 9 | ############## 10 | # Public API # 11 | ############## 12 | 13 | @doc """ 14 | Removes an entry from a cache by key. 15 | 16 | This command will always return a true value, signalling that the key no longer 17 | exists in the cache (regardless of whether it previously existed). 18 | 19 | Removal runs in a lock aware context, to ensure that we're not removing a key 20 | being used inside a transaction in other places in the codebase. 21 | """ 22 | def execute(cache(name: name) = cache, key, _options) do 23 | Locksmith.write(cache, [key], fn -> 24 | {:ok, :ets.delete(name, key)} 25 | end) 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /lib/cachex/actions/empty.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Empty do 2 | @moduledoc false 3 | # Command module to allow checking for cache population. 4 | # 5 | # This command is basically just sugar around the `size()` command by turning 6 | # the response into a boolean. This means that expiration of records is not 7 | # taken into account (lazy expiration has no effect here). 8 | alias Cachex.Actions.Size 9 | 10 | # import record set 11 | import Cachex.Spec 12 | 13 | ############## 14 | # Public API # 15 | ############## 16 | 17 | @doc """ 18 | Checks whether any entries exist in the cache. 19 | 20 | Emptiness is determined by the overall size of the cache, regardless of the 21 | expiration times set alongside keys. This means that you may have a non-empty 22 | cache, yet be unable to retrieve any keys due to having lazy expiration enabled. 23 | 24 | Internally this action is delegated through to the `size()` command and the 25 | returned numeric value is just "cast" to a boolean value. 26 | """ 27 | def execute(cache() = cache, _options) do 28 | {:ok, size} = Size.execute(cache, []) 29 | {:ok, size == 0} 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/cachex/actions/exists.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Exists do 2 | @moduledoc false 3 | # Command module to allow checking for entry existence. 4 | # 5 | # This is very straightforward, but is a little more than an `:ets.member/2` 6 | # call as we also need to validate expiration time to stay consistent. 7 | alias Cachex.Actions 8 | 9 | # add required macros 10 | import Cachex.Spec 11 | 12 | ############## 13 | # Public API # 14 | ############## 15 | 16 | @doc """ 17 | Checks whether an entry exists in a cache. 18 | 19 | This is a little more involved than a straight ETS call, as we need to take 20 | the expiration time of the entry into account. As such, we call via the main 21 | `Cachex.Actions` module and just cast the result to a boolean. 22 | """ 23 | def execute(cache() = cache, key, _options), 24 | do: {:ok, !!Actions.read(cache, key)} 25 | end 26 | -------------------------------------------------------------------------------- /lib/cachex/actions/expire.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Expire do 2 | @moduledoc false 3 | # Command module to allow setting entry expiration. 4 | # 5 | # This module is a little more involved than it would be as it's used as a 6 | # binding for other actions (such as removing expirations). As such, we have 7 | # to handle several edge cases with nil values. 8 | alias Cachex.Actions 9 | alias Cachex.Services.Locksmith 10 | 11 | # add required imports 12 | import Cachex.Spec 13 | 14 | ############## 15 | # Public API # 16 | ############## 17 | 18 | @doc """ 19 | Sets the expiration time on a given cache entry. 20 | 21 | If a negative expiration time is provided, the entry is immediately removed 22 | from the cache (as it means we have already expired). If a positive expiration 23 | time is provided, we update the touch time on the entry and update the expiration 24 | to the one provided. 25 | 26 | If the expiration provided is nil, we need to remove the expiration; so we update 27 | in the exact same way. This is done passively due to the fact that Erlang term order 28 | determines that `nil > -1 == true`. 29 | 30 | This command executes inside a lock aware context to ensure that the key isn't currently 31 | being used/modified/removed from another process in the application. 32 | """ 33 | def execute(cache() = cache, key, expiration, _options) do 34 | Locksmith.write(cache, [key], fn -> 35 | case expiration > -1 do 36 | true -> 37 | Actions.update(cache, key, entry_mod_now(expiration: expiration)) 38 | 39 | false -> 40 | Cachex.del(cache, key, const(:purge_override)) 41 | end 42 | end) 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /lib/cachex/actions/export.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Export do 2 | @moduledoc false 3 | # Command module to allow exporting all cache entries as a list. 4 | # 5 | # This command is extremely expensive as it turns the entire cache table into 6 | # a list, and so should be used sparingly. It's provided purely because it's 7 | # the backing implementation of the `Cachex.save/3` command. 8 | alias Cachex.Query 9 | 10 | # add required imports 11 | import Cachex.Spec 12 | 13 | ############## 14 | # Public API # 15 | ############## 16 | 17 | @doc """ 18 | Retrieves all cache entries as a list. 19 | 20 | This action should only be used in the case of exports and/or debugging, due 21 | to the memory overhead involved, as well as the large concatenations. 22 | """ 23 | def execute(cache() = cache, _options) do 24 | query = Query.build() 25 | options = const(:local) ++ const(:notify_false) 26 | 27 | with {:ok, stream} <- Cachex.stream(cache, query, options) do 28 | {:ok, Enum.to_list(stream)} 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/cachex/actions/fetch.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Fetch do 2 | @moduledoc false 3 | # Command module to enable fetching on cache misses. 4 | # 5 | # This is a replacement for the `get()` command in Cachex v2 which would accept 6 | # a `:fallback` option to fetch on cache miss. It operates in the same way, except 7 | # that the function to use when fetching is an explicit argument. 8 | # 9 | # If the fetch function is not provided, the `fetch()` command will try to lookup 10 | # a default fetch function from the cache state and use that instead. If neither 11 | # exist, an error will be returned. 12 | alias Cachex.Actions.Get 13 | alias Cachex.Services.Courier 14 | 15 | # provide needed macros 16 | import Cachex.Spec 17 | 18 | ############## 19 | # Public API # 20 | ############## 21 | 22 | @doc """ 23 | Retrieves an entry from a cache, falling back to fetch fetching on a miss. 24 | 25 | The fallback argument can be treated as optional if a default fetch function is 26 | attached to the global cache at startup, in which case it will be executed instead. 27 | 28 | The fallback function is only executed if the key being retrieved does not exist 29 | in the cache; otherwise it is immediately returned. Any fetched values will be 30 | placed in the cache in order to allow read-through caches. 31 | """ 32 | def execute(cache() = cache, key, fallback, _options) do 33 | with {:ok, nil} <- Get.execute(cache, key, []) do 34 | Courier.dispatch(cache, key, generate_task(fallback, key)) 35 | end 36 | end 37 | 38 | ############### 39 | # Private API # 40 | ############### 41 | 42 | # Generates a courier task based on the arity of the fallback function. 43 | defp generate_task(fallback, key) do 44 | case :erlang.fun_info(fallback)[:arity] do 45 | 0 -> fn -> fallback.() end 46 | 1 -> fn -> fallback.(key) end 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/cachex/actions/get.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Get do 2 | @moduledoc false 3 | # Command module to enable basic retrieval of cache entries. 4 | # 5 | # This command provides very little over the raw read actions provided by the 6 | # `Cachex.Actions` module, as most of the heavy lifting is done in there. The 7 | # only modification made is that the value is extracted, rather than returning 8 | # the entire entry. 9 | alias Cachex.Actions 10 | 11 | # we need our imports 12 | import Cachex.Spec 13 | 14 | ############## 15 | # Public API # 16 | ############## 17 | 18 | @doc """ 19 | Retrieves a value from inside the cache. 20 | """ 21 | def execute(cache() = cache, key, _options) do 22 | case Actions.read(cache, key) do 23 | entry(value: value) -> 24 | {:ok, value} 25 | 26 | nil -> 27 | {:ok, nil} 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /lib/cachex/actions/get_and_update.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.GetAndUpdate do 2 | @moduledoc false 3 | # Command module to enable transactional get/update semantics. 4 | # 5 | # This command is simply sugar, but is common enough that it deserved an explicit 6 | # implementation inside the API. It does take care of the transactional context 7 | # of the get/update semantics though, so it's potentially non-obvious. 8 | alias Cachex.Actions 9 | alias Cachex.Services.Locksmith 10 | 11 | # add needed imports 12 | import Cachex.Spec 13 | 14 | ############## 15 | # Public API # 16 | ############## 17 | 18 | @doc """ 19 | Retrieves an entry and updates it inside the cache. 20 | 21 | This is basically all sugar for `transaction -> set(fun(get()))` but it provides 22 | an easy-to-use way to update a value directly in the cache. Naturally this 23 | means that the key needs to be locked and so we use a transaction to provide 24 | this guarantee. 25 | 26 | If the key is not returned by the call to `:get`, then we have to set the new 27 | value in the cache directly. If it does exist, then we use the update actions 28 | to update the existing record. 29 | """ 30 | def execute(cache() = cache, key, update_fun, _options) do 31 | Locksmith.transaction(cache, [key], fn -> 32 | {_label, value} = Cachex.get(cache, key, []) 33 | 34 | formatted = 35 | value 36 | |> update_fun.() 37 | |> Actions.format_fetch_value() 38 | 39 | operation = Actions.write_op(value) 40 | normalized = Actions.normalize_commit(formatted) 41 | 42 | with {:commit, new_value, options} <- normalized do 43 | apply(Cachex, operation, [cache, key, new_value, options]) 44 | {:commit, new_value} 45 | end 46 | end) 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /lib/cachex/actions/import.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Import do 2 | @moduledoc false 3 | # Command module to allow import cache entries from a list. 4 | # 5 | # This command should be considered expensive and should be use sparingly. Due 6 | # to the requirement of being compatible with distributed caches, this cannot 7 | # use a simple `put_many/4` call; rather it needs to walk the full list. It's 8 | # provided as it's the backing implementation of the `Cachex.restore/3` command. 9 | import Cachex.Spec 10 | 11 | ############## 12 | # Public API # 13 | ############## 14 | 15 | @doc """ 16 | Imports all cache entries from a list into a cache. 17 | 18 | This action should only be used in the case of exports and/or debugging, due 19 | to the memory overhead involved, as well as the potential slowness of walking 20 | a large import set. 21 | """ 22 | def execute(cache() = cache, entries, _options), 23 | do: {:ok, Enum.reduce(entries, 0, &import(cache, &1, &2, now()))} 24 | 25 | ############### 26 | # Private API # 27 | ############### 28 | 29 | # Imports an entry directly when no TTL is included. 30 | # 31 | # As this is a direct import, we just use `Cachex.put/4` with the provided 32 | # key and value from the existing entry record - nothing special here. 33 | defp import(cache, entry(key: k, expiration: nil, value: v), c, _t) do 34 | Cachex.put!(cache, k, v, const(:notify_false)) 35 | c + 1 36 | end 37 | 38 | # Skips over entries which have already expired. 39 | # 40 | # This occurs in the case there was an existing modification time and expiration 41 | # but the expiration time would already have passed (so there's no point in 42 | # adding the record to the cache just to throw it away in future). 43 | defp import(_cache, entry(modified: m, expiration: e), c, t) when m + e < t, 44 | do: c 45 | 46 | # Imports an entry, using the current time to offset the TTL value. 47 | # 48 | # This is required to shift the TTLs set in a backup to match the current 49 | # import time, so that the rest of the lifetime of the key is the same. If 50 | # we didn't do this, the key would live longer in the cache than intended. 51 | defp import(cache, entry(key: k, modified: m, expiration: e, value: v), c, t) do 52 | Cachex.put!(cache, k, v, const(:notify_false) ++ [expire: m + e - t]) 53 | c + 1 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/cachex/actions/incr.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Incr do 2 | @moduledoc false 3 | # Command module to enable incrementing cache entries. 4 | # 5 | # This operates on an ETS level for the actual update calls, rather than using 6 | # a transactional context. The result is a faster throughput with the same 7 | # behaviour aspects (but we still lock the key temporarily). 8 | alias Cachex.Options 9 | alias Cachex.Services.Janitor 10 | alias Cachex.Services.Locksmith 11 | 12 | # we need some imports 13 | import Cachex.Spec 14 | import Cachex.Error 15 | 16 | ############## 17 | # Public API # 18 | ############## 19 | 20 | @doc """ 21 | Increments a numeric value inside the cache. 22 | 23 | Increment calls execute inside a write lock to ensure that there are no 24 | writes happening due to the existence check before the actual increment 25 | call. This is annoyingly expensive, but is required to communicate whether 26 | the key existed already. 27 | 28 | This command will return an error if called on a non-numeric value. 29 | """ 30 | def execute(cache(name: name) = cache, key, amount, options) do 31 | modify = entry_mod({:value, amount}) 32 | 33 | default = 34 | entry_now( 35 | key: key, 36 | value: Options.get(options, :default, &is_integer/1, 0), 37 | expiration: Janitor.expiration(cache, nil) 38 | ) 39 | 40 | Locksmith.write(cache, [key], fn -> 41 | try do 42 | {:ok, :ets.update_counter(name, key, modify, default)} 43 | rescue 44 | _ -> error(:non_numeric_value) 45 | end 46 | end) 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /lib/cachex/actions/invoke.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Invoke do 2 | @moduledoc false 3 | # Command module to enable custom command invocation. 4 | # 5 | # This module relies on commands attached to a cache at startup, and 6 | # does not allow for registration afterward. 7 | # 8 | # Invocations which require writes to the table are executed inside a 9 | # transactional context to ensure consistency. 10 | alias Cachex.Actions 11 | alias Cachex.Services.Locksmith 12 | 13 | # add our imports 14 | import Cachex.Spec 15 | import Cachex.Error 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Invokes a custom command on a cache. 23 | 24 | Command invocations allow a developer to attach common functions directly to a 25 | cache in order to easily share logic around a codebase. Values are passed through 26 | to a custom command for a given key, and based on the type of command might be 27 | written back into the cache table. 28 | """ 29 | def execute(cache(commands: commands) = cache, cmd, key, _options) do 30 | commands 31 | |> Map.get(cmd) 32 | |> invoke(cache, key) 33 | end 34 | 35 | ############### 36 | # Private API # 37 | ############### 38 | 39 | # Executes a read command on the backing cache table. 40 | # 41 | # Values read back will be passed directly to the custom command implementation. 42 | # It should be noted that expirations are taken into account, and nil will be 43 | # passed through in expired/missing cases. 44 | defp invoke(command(type: :read, execute: exec), cache, key) do 45 | {_status_, value} = Cachex.get(cache, key, []) 46 | {:ok, exec.(value)} 47 | end 48 | 49 | # Executes a write command on the backing cache table. 50 | # 51 | # This will initialize a transactional context to ensure that modifications are 52 | # kept in sync with other actions happening at the same time. The return format 53 | # is enforced per the documentation and will crash out if something unexpected 54 | # is returned (i.e. a non-Tuple, or a Tuple with invalid size). 55 | defp invoke(command(type: :write, execute: exec), cache() = cache, key) do 56 | Locksmith.transaction(cache, [key], fn -> 57 | {_label, value} = Cachex.get(cache, key, []) 58 | {return, tempv} = exec.(value) 59 | 60 | tempv == value || 61 | apply( 62 | Cachex, 63 | Actions.write_op(value), 64 | [cache, key, tempv, []] 65 | ) 66 | 67 | {:ok, return} 68 | end) 69 | end 70 | 71 | # Returns an error due to a missing command. 72 | defp invoke(_invalid, _cache, _key), 73 | do: error(:invalid_command) 74 | end 75 | -------------------------------------------------------------------------------- /lib/cachex/actions/keys.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Keys do 2 | @moduledoc false 3 | # Command module to allow retrieving keys from a cache. 4 | # 5 | # The execution of this command will be quite slow to execute. This is 6 | # to be expected and so it should be used wisely, or `stream()` should 7 | # be used instead. 8 | # 9 | # This command will take the expiration of entries into consideration. 10 | alias Cachex.Query 11 | 12 | # we need our imports 13 | import Cachex.Spec 14 | 15 | ############## 16 | # Public API # 17 | ############## 18 | 19 | @doc """ 20 | Retrieves a list of all keys in the cache. 21 | 22 | Only keys for entries which have not yet expired will be returned. This means 23 | that any entries currently inside the cache which are scheduled to be removed 24 | will not be included. 25 | """ 26 | def execute(cache(name: name), _options) do 27 | filter = Query.unexpired() 28 | clause = Query.build(where: filter, output: :key) 29 | 30 | {:ok, :ets.select(name, clause)} 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/cachex/actions/purge.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Purge do 2 | @moduledoc false 3 | # Command module to allow manual purging of expired records. 4 | # 5 | # This is highly optimized using native ETS behaviour to purge as many 6 | # entries as possible at a high rate. It is used internally by the Janitor 7 | # service when purging on a schedule. 8 | alias Cachex.Query 9 | alias Cachex.Services.Locksmith 10 | 11 | # we need our imports 12 | import Cachex.Spec 13 | 14 | ############## 15 | # Public API # 16 | ############## 17 | 18 | @doc """ 19 | Purges all expired records from the cache. 20 | 21 | This is optimizes to use native ETS batch deletes using match specifications, 22 | which are compiled using the utility functions found in `Cachex.Query`. 23 | 24 | This function is used by the Janitor process internally to sync behaviour in 25 | both places rather than reimplementing the same logic in two places. 26 | 27 | We naturally need a transaction context to ensure that we don't remove any 28 | records currently being used in a transaction block. 29 | """ 30 | def execute(cache(name: name) = cache, _options) do 31 | Locksmith.transaction(cache, [], fn -> 32 | filter = Query.expired() 33 | clause = Query.build(where: filter, output: true) 34 | 35 | {:ok, :ets.select_delete(name, clause)} 36 | end) 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /lib/cachex/actions/put.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Put do 2 | @moduledoc false 3 | # Command module to enable insertion of cache entries. 4 | # 5 | # This is the main entry point for adding new entries to the cache table. New 6 | # entries are inserted taking an optional expiration time into account. 7 | # 8 | # This command will use lock aware contexts to ensure that there are no key 9 | # clashes when writing values to the cache. 10 | alias Cachex.Actions 11 | alias Cachex.Options 12 | alias Cachex.Services.Janitor 13 | alias Cachex.Services.Locksmith 14 | 15 | # add our macros 16 | import Cachex.Spec 17 | 18 | ############## 19 | # Public API # 20 | ############## 21 | 22 | @doc """ 23 | Inserts a value into the cache. 24 | 25 | This takes expiration times into account before insertion and will operate 26 | inside a lock aware context to avoid clashing with other processes. 27 | """ 28 | def execute(cache() = cache, key, value, options) do 29 | expiration = Options.get(options, :expire, &is_integer/1) 30 | expiration = Janitor.expiration(cache, expiration) 31 | 32 | record = entry_now(key: key, expiration: expiration, value: value) 33 | 34 | Locksmith.write(cache, [key], fn -> 35 | Actions.write(cache, record) 36 | end) 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /lib/cachex/actions/put_many.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.PutMany do 2 | @moduledoc false 3 | # Command module to enable batch insertion of cache entries. 4 | # 5 | # This is an alternative entry point for adding new entries to the cache, 6 | # specifically in the case of multiple entries at the same time. Performance 7 | # is enhanced in this use case, but lowered in the case of single entries. 8 | # 9 | # This command will use lock aware contexts to ensure that there are no key 10 | # clashes when writing values to the cache. 11 | alias Cachex.Actions 12 | alias Cachex.Options 13 | alias Cachex.Services.Janitor 14 | alias Cachex.Services.Locksmith 15 | 16 | # add our macros 17 | import Cachex.Spec 18 | import Cachex.Error 19 | 20 | ############## 21 | # Public API # 22 | ############## 23 | 24 | @doc """ 25 | Inserts a batch of values into the cache. 26 | 27 | This takes expiration times into account before insertion and will operate 28 | inside a lock aware context to avoid clashing with other processes. 29 | """ 30 | def execute(cache() = cache, pairs, options) do 31 | expiration = Options.get(options, :expire, &is_integer/1) 32 | expiration = Janitor.expiration(cache, expiration) 33 | 34 | with {:ok, keys, entries} <- map_entries(expiration, pairs, [], []) do 35 | Locksmith.write(cache, keys, fn -> 36 | Actions.write(cache, entries) 37 | end) 38 | end 39 | end 40 | 41 | ############### 42 | # Private API # 43 | ############### 44 | 45 | # Generates keys/entries from the provided list of pairs. 46 | # 47 | # Pairs must be Tuples of two, a key and a value. The keys will be 48 | # buffered into a list to be used to handle locking, whilst entries 49 | # will also be buffered into a batch of writes. 50 | # 51 | # If an unexpected pair is hit, an error will be returned and no 52 | # values will be written to the backing table. 53 | defp map_entries(exp, [{key, value} | pairs], keys, entries) do 54 | entry = entry_now(key: key, expiration: exp, value: value) 55 | map_entries(exp, pairs, [key | keys], [entry | entries]) 56 | end 57 | 58 | defp map_entries(_exp, [], [], _entries), 59 | do: {:ok, false} 60 | 61 | defp map_entries(_exp, [], keys, entries), 62 | do: {:ok, keys, entries} 63 | 64 | defp map_entries(_exp, _inv, _keys, _entries), 65 | do: error(:invalid_pairs) 66 | end 67 | -------------------------------------------------------------------------------- /lib/cachex/actions/refresh.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Refresh do 2 | @moduledoc false 3 | # Command module to allow refreshing an expiration value. 4 | # 5 | # Refreshing an expiration is the notion of resetting an expiration time 6 | # as if it were just set. This is done by updating the modification time 7 | # (as this is used to calculate expiration offsets). 8 | # 9 | # The main advantage of this command is the ability to refresh an existing 10 | # expiration without knowing in advance what it was previously set to. 11 | alias Cachex.Actions 12 | alias Cachex.Services.Locksmith 13 | 14 | # we need our imports 15 | import Cachex.Spec 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Refreshes an expiration on a cache entry. 23 | 24 | If the entry currently has no expiration set, it is left unset. Otherwise the 25 | modified time of the entry is updated to the current time (as entry expiration is 26 | a function of modified time and expiration time). 27 | 28 | This operates inside a lock aware context to avoid clashing with other operations 29 | on the same key during execution. 30 | """ 31 | def execute(cache() = cache, key, _options) do 32 | Locksmith.write(cache, [key], fn -> 33 | Actions.update(cache, key, entry_mod_now()) 34 | end) 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/cachex/actions/reset.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Reset do 2 | @moduledoc false 3 | # Command module to enable complete reset of a cache. 4 | # 5 | # This command allows the caller to reset a cache to an empty state, reset 6 | # the hooks associated with a cache, or both. 7 | # 8 | # This is not executed inside an action context as there is no need to 9 | # notify on reset (as otherwise a reset would always be the first message). 10 | alias Cachex.Hook 11 | alias Cachex.Actions.Clear 12 | alias Cachex.Services.Locksmith 13 | 14 | # add the specification 15 | import Cachex.Spec 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Resets the internal cache state. 23 | 24 | This will either reset a list of cache hooks, all attached cache hooks, the 25 | backing cache table, or all of the aforementioned. This is done by reusing 26 | the `clear()` command to empty the table as needed, and by using the reset 27 | listener exposed by the hook servers. 28 | 29 | Nothing in here will notify any hooks of resets occurring as it's basically 30 | quite redundant and it's evident that a reset has happened when you see that 31 | your hook has reinitialized. 32 | """ 33 | def execute(cache() = cache, options) do 34 | Locksmith.transaction(cache, [], fn -> 35 | only = 36 | options 37 | |> Keyword.get(:only, [:cache, :hooks]) 38 | |> List.wrap() 39 | 40 | reset_cache(cache, only, options) 41 | reset_hooks(cache, only, options) 42 | 43 | {:ok, true} 44 | end) 45 | end 46 | 47 | ############### 48 | # Private API # 49 | ############### 50 | 51 | # Handles reset of the backing cache table. 52 | # 53 | # A cache is only emptied if the `:cache` property appears in the list of 54 | # cache components to reset. If not provided, this will short circuit and 55 | # leave the cache table exactly as-is. 56 | defp reset_cache(cache, only, _options) do 57 | with true <- :cache in only do 58 | Clear.execute(cache, []) 59 | end 60 | end 61 | 62 | # Handles reset of cache hooks. 63 | # 64 | # This has the ability to clear either all hooks or a subset of hooks. We have a small 65 | # optimization here to detect when we want to reset all hooks to avoid filtering without 66 | # a real need to. We also convert the list of hooks to a set to avoid O(N) lookups. 67 | defp reset_hooks(cache(hooks: hooks), only, opts) do 68 | if :hooks in only do 69 | hooks = Hook.concat(hooks) 70 | 71 | hooks = 72 | case Keyword.get(opts, :hooks) do 73 | nil -> 74 | hooks 75 | 76 | val -> 77 | Enum.filter(hooks, &should_reset?(&1, val)) 78 | end 79 | 80 | Enum.each(hooks, ¬ify_reset/1) 81 | end 82 | end 83 | 84 | # Determines if a hook should be reset. 85 | # 86 | # This is just sugar around set membership whilst unpacking a hook record, 87 | # used in Enum iterations to avoid inlining functions for readability. 88 | defp should_reset?(hook(module: module), hooks), 89 | do: module in hooks 90 | 91 | # Notifies a hook of a reset. 92 | # 93 | # This simply sends the hook state back to the hook alongside a reset 94 | # message to signal that the hook needs to reinitialize. Hooks have a 95 | # listener built into the server implementatnion in order to handle this 96 | # automatically, so there's nothing more we need to do. 97 | defp notify_reset(hook(args: args, name: name)), 98 | do: send(name, {:cachex_reset, args}) 99 | end 100 | -------------------------------------------------------------------------------- /lib/cachex/actions/restore.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Restore do 2 | @moduledoc false 3 | # Command module to allow deserialization of a cache from disk. 4 | # 5 | # Loading a cache from disk requires that it was previously saved using the 6 | # `Cachex.save/3` command (it does not support loading from DETS). 7 | alias Cachex.Options 8 | 9 | # we need our imports 10 | import Cachex.Error 11 | import Cachex.Spec 12 | 13 | ############## 14 | # Public API # 15 | ############## 16 | 17 | @doc """ 18 | Loads a previously saved cache from a file. 19 | 20 | If there are any issues reading the file, an error will be returned. Only files 21 | which were created via `Cachex.save/3` can be loaded, and the load will detect 22 | any disk compression automatically. 23 | 24 | Loading a backup will merge the file into the provided cache, overwriting any 25 | clashes. If you wish to empty the cache and then import your backup, you can 26 | use a transaction and clear the cache before loading the backup. 27 | """ 28 | def execute(cache() = cache, path, options) do 29 | option = 30 | case Options.get(options, :trust, &is_boolean/1, true) do 31 | true -> [] 32 | _any -> [:safe] 33 | end 34 | 35 | stream = 36 | Stream.resource( 37 | fn -> 38 | File.open!(path, [:read, :compressed]) 39 | end, 40 | &read_next_term(&1, option), 41 | &File.close/1 42 | ) 43 | 44 | Cachex.import(cache, stream, const(:local) ++ const(:notify_false)) 45 | rescue 46 | File.Error -> error(:unreachable_file) 47 | end 48 | 49 | ############### 50 | # Private API # 51 | ############### 52 | 53 | # Read the next term from a file handle cbased on the TLV flags. Each 54 | # term should be emitted back to the parent stream for processing. 55 | defp read_next_term(file, options) do 56 | case IO.binread(file, 3) do 57 | :eof -> 58 | {:halt, file} 59 | 60 | <> -> 61 | term = 62 | file 63 | |> IO.binread(size) 64 | |> :erlang.binary_to_term(options) 65 | 66 | {[term], file} 67 | end 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /lib/cachex/actions/save.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Save do 2 | @moduledoc false 3 | # Command module to allow serialization of a cache to disk. 4 | # 5 | # Rather than using DETS to back up the internal ETS table, this module will 6 | # serialize the entire table using a `Cachex.stream/3`. 7 | # 8 | # Backups can be imported again using the `Cachex.restore/3` command, and 9 | # should be ble to be transferred between processes and physical nodes. 10 | alias Cachex.Options 11 | alias Cachex.Query 12 | alias Cachex.Router.Local 13 | 14 | # import our macros 15 | import Cachex.Error 16 | import Cachex.Spec 17 | 18 | ############## 19 | # Public API # 20 | ############## 21 | 22 | @doc """ 23 | Dumps a cache to disk at the given location. 24 | 25 | This call will return an error if anything goes wrong with writing the file; 26 | it is up to the caller to ensure the file is writeable using the default `File` 27 | interfaces. 28 | """ 29 | def execute(cache(router: router(module: router)) = cache, path, options) do 30 | file = File.open!(path, [:write, :compressed]) 31 | buffer = Options.get(options, :buffer, &is_positive_integer/1, 25) 32 | 33 | {:ok, stream} = 34 | options 35 | |> Keyword.get(:local) 36 | |> init_stream(router, cache, buffer) 37 | 38 | stream 39 | |> Stream.chunk_every(buffer) 40 | |> Stream.map(&handle_batch/1) 41 | |> Enum.each(&IO.binwrite(file, &1)) 42 | 43 | with :ok <- File.close(file) do 44 | {:ok, true} 45 | end 46 | rescue 47 | File.Error -> error(:unreachable_file) 48 | end 49 | 50 | ############### 51 | # Private API # 52 | ############### 53 | 54 | # Use a local stream to lazily walk through records on a local cache. 55 | defp init_stream(local, router, cache, buffer) 56 | when local or router == Local do 57 | options = 58 | :local 59 | |> const() 60 | |> Enum.concat(const(:notify_false)) 61 | |> Enum.concat(buffer: buffer) 62 | 63 | Cachex.stream(cache, Query.build(), options) 64 | end 65 | 66 | # Generate an export of all nodes in a distributed cluster via `Cachex.export/2` 67 | defp init_stream(_local, _router, cache, _buffer), 68 | do: Cachex.export(cache, const(:notify_false)) 69 | 70 | # Handle a batch of records and generate a binary of each. 71 | defp handle_batch(buffer) do 72 | Enum.reduce(buffer, <<>>, fn tuple, acc -> 73 | binary = :erlang.term_to_binary(tuple) 74 | size = byte_size(binary) 75 | acc <> <> <> binary 76 | end) 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /lib/cachex/actions/size.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Size do 2 | @moduledoc false 3 | # Command module to allow cache size retrieval. 4 | # 5 | # This command uses the built in ETS utilities to retrieve the number of 6 | # entries currently in the backing cache table. 7 | # 8 | # A cache's size does not take expiration times into account by default, 9 | # as the true size can hold records which haven't been purged yet. This 10 | # can be controlled via options to this action. 11 | import Cachex.Spec 12 | 13 | # add some aliases 14 | alias Cachex.Options 15 | alias Cachex.Query 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Retrieves the size of the cache. 23 | 24 | You can use the `:expired` option to determine whether record expirations 25 | should be taken into account. The default value of this is `:true` as it's 26 | a much cheaper operation. 27 | """ 28 | def execute(cache(name: name), options) do 29 | options 30 | |> Options.get(:expired, &is_boolean/1, true) 31 | |> retrieve_count(name) 32 | end 33 | 34 | ############### 35 | # Private API # 36 | ############### 37 | 38 | # Retrieve the full table count. 39 | defp retrieve_count(true, name), 40 | do: {:ok, :ets.info(name, :size)} 41 | 42 | # Retrieve only the unexpired table count. 43 | defp retrieve_count(false, name) do 44 | filter = Query.unexpired() 45 | clause = Query.build(where: filter, output: true) 46 | 47 | {:ok, :ets.select_count(name, clause)} 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/cachex/actions/stats.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Stats do 2 | @moduledoc false 3 | # Command module to allow cache statistics retrieval. 4 | # 5 | # This module is only active if the statistics hook has been enabled in 6 | # the cache, either via the stats option at startup or by providing the 7 | # hook manually. 8 | alias Cachex.Stats 9 | import Cachex.Spec 10 | 11 | ############## 12 | # Public API # 13 | ############## 14 | 15 | @doc """ 16 | Retrieves statistics for a cache. 17 | 18 | This will return an error if statistics tracking has not been enabled, 19 | either via the options at cache startup, or manually by providing the hook. 20 | 21 | If the provided cache does not have statistics enabled, an error will be returned. 22 | """ 23 | @spec execute(Cachex.t(), Keyword.t()) :: 24 | {:ok, %{}} | {:error, :stats_disabled} 25 | def execute(cache() = cache, _options) do 26 | with {:ok, stats} <- Stats.for_cache(cache) do 27 | hits_count = Map.get(stats, :hits, 0) 28 | miss_count = Map.get(stats, :misses, 0) 29 | 30 | case hits_count + miss_count do 31 | 0 -> 32 | {:ok, stats} 33 | 34 | v -> 35 | v 36 | |> generate_rates(hits_count, miss_count) 37 | |> Map.merge(stats) 38 | |> wrap(:ok) 39 | end 40 | end 41 | end 42 | 43 | ############### 44 | # Private API # 45 | ############### 46 | 47 | # Generates request rates for statistics map. 48 | # 49 | # This will generate hit/miss rates as floats, even when they're integer 50 | # values to ensure consistency. This is separated out to easily handle the 51 | # potential to divide values by 0, avoiding a crash in the application. 52 | defp generate_rates(_reqs, 0, misses), 53 | do: %{ 54 | hits: 0, 55 | misses: misses, 56 | hit_rate: 0.0, 57 | miss_rate: 100.0 58 | } 59 | 60 | defp generate_rates(_reqs, hits, 0), 61 | do: %{ 62 | hits: hits, 63 | misses: 0, 64 | hit_rate: 100.0, 65 | miss_rate: 0.0 66 | } 67 | 68 | defp generate_rates(reqs, hits, misses), 69 | do: %{ 70 | hits: hits, 71 | misses: misses, 72 | hit_rate: hits / reqs * 100, 73 | miss_rate: misses / reqs * 100 74 | } 75 | end 76 | -------------------------------------------------------------------------------- /lib/cachex/actions/stream.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Stream do 2 | @moduledoc false 3 | # Command module to allow streaming of cache entries. 4 | # 5 | # A cache `Stream` is a lazy consumer of a cache in that it allows iteration 6 | # of a cache on an as-needed basis. It acts as any other `Stream` from Elixir, 7 | # and is fully compatible with the functions found in the `Enum` module. 8 | alias Cachex.Options 9 | 10 | # need our imports 11 | import Cachex.Error 12 | import Cachex.Spec 13 | 14 | # our test record for testing matches when a user provides a spec 15 | @test entry(key: "key", modified: now(), expiration: 1000, value: "value") 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Creates a new `Stream` for a given cache. 23 | 24 | Streams are a moving window of a cache, in that they will reflect the latest 25 | changes in a cache once they're consumed. For example, if you create a Stream 26 | and consume it 15 minutes later, you'll see all changes which occurred in those 27 | 15 minutes. 28 | 29 | We execute an `:ets.test_ms/2` call before doing anything to ensure the user 30 | has provided a valid return type. If they haven't, we return an error before 31 | creating a cursor or the `Stream` itself. 32 | """ 33 | def execute(cache(name: name), spec, options) do 34 | case :ets.test_ms(@test, spec) do 35 | {:ok, _result} -> 36 | options 37 | |> Options.get(:buffer, &is_positive_integer/1, 25) 38 | |> init_stream(name, spec) 39 | |> wrap(:ok) 40 | 41 | {:error, _result} -> 42 | error(:invalid_match) 43 | end 44 | end 45 | 46 | ############### 47 | # Private API # 48 | ############### 49 | 50 | # Initializes a `Stream` resource from an underlying ETS cursor. 51 | # 52 | # Each time more items are requested we pull another batch of entries until 53 | # the cursor is spent, in which case we halt the stream and kill the cursor. 54 | defp init_stream(buffer, name, spec) do 55 | Stream.resource( 56 | fn -> :"$start_of_table" end, 57 | fn 58 | # we're finished! 59 | :"$end_of_table" -> 60 | handle_continue(:"$end_of_table") 61 | 62 | # we're starting! 63 | :"$start_of_table" -> 64 | name 65 | |> :ets.select(spec, buffer) 66 | |> handle_continue() 67 | 68 | # we're continuing! 69 | continuation -> 70 | continuation 71 | |> :ets.select() 72 | |> handle_continue() 73 | end, 74 | & &1 75 | ) 76 | end 77 | 78 | # Handles a continuation from ETS to terminate a stream. 79 | defp handle_continue(:"$end_of_table"), 80 | do: {:halt, nil} 81 | 82 | defp handle_continue(continuation), 83 | do: continuation 84 | end 85 | -------------------------------------------------------------------------------- /lib/cachex/actions/take.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Take do 2 | @moduledoc false 3 | # Command module to allow taking of cache entries. 4 | # 5 | # The notion of taking a key is the act of retrieving a key and deleting it 6 | # in a single atomic action. It's useful when used to guarantee that a given 7 | # process retrieves the final value of an entry. 8 | # 9 | # Taking a key is clearly destructive, so it operates in a lock context. 10 | alias Cachex.Services.Informant 11 | alias Cachex.Services.Janitor 12 | alias Cachex.Services.Locksmith 13 | 14 | # we need our imports 15 | import Cachex.Spec 16 | 17 | ############## 18 | # Public API # 19 | ############## 20 | 21 | @doc """ 22 | Takes an entry from a cache. 23 | 24 | This will always remove the entry from the cache, ensuring that the entry no 25 | longer exists immediately after this call finishes. 26 | 27 | Expirations are lazily checked here, ensuring that even if a value is taken 28 | from the cache it's only returned if it has not yet expired. 29 | 30 | Taking a value happens in a lock aware context to ensure that the key isn't 31 | being currently locked by another write sequence. 32 | """ 33 | def execute(cache(name: name) = cache, key, _options) do 34 | Locksmith.write(cache, [key], fn -> 35 | name 36 | |> :ets.take(key) 37 | |> handle_take(cache) 38 | end) 39 | end 40 | 41 | ############### 42 | # Private API # 43 | ############### 44 | 45 | # Handles the result of taking a key from the backing table. 46 | # 47 | # If an entry comes back from the call, we check for expiration before returning 48 | # back to the caller. If the entry has expired, we broadcast the expiry (as the 49 | # entry was already removed when we took if from the cache). 50 | defp handle_take([entry(value: value) = entry], cache) do 51 | case Janitor.expired?(cache, entry) do 52 | false -> 53 | {:ok, value} 54 | 55 | true -> 56 | Informant.broadcast( 57 | cache, 58 | const(:purge_override_call), 59 | const(:purge_override_result) 60 | ) 61 | 62 | {:ok, nil} 63 | end 64 | end 65 | 66 | defp handle_take([], _cache), 67 | do: {:ok, nil} 68 | end 69 | -------------------------------------------------------------------------------- /lib/cachex/actions/touch.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Touch do 2 | @moduledoc false 3 | # Command module to update the touch time of cache entries. 4 | # 5 | # Touching an entry is the act of resetting the touch time to the current 6 | # time, without affecting the expiration set against the record. As such 7 | # it's incredibly useful for implementing least-recently used caching 8 | # systems without breaking expiration based contracts. 9 | alias Cachex.Actions 10 | alias Cachex.Services.Locksmith 11 | alias Actions.Ttl 12 | 13 | # we need our imports 14 | import Cachex.Spec 15 | 16 | ############## 17 | # Public API # 18 | ############## 19 | 20 | @doc """ 21 | Updates the touch time of an entry inside a cache. 22 | 23 | Touching an entry will update the write time of the entry, but without modifying any 24 | expirations set on the entry. This is done by reading back the current expiration, 25 | and then updating the record appropriately to modify the touch time and setting the 26 | expiration to the offset of the two. 27 | """ 28 | def execute(cache() = cache, key, _options) do 29 | Locksmith.transaction(cache, [key], fn -> 30 | cache 31 | |> Ttl.execute(key, []) 32 | |> handle_expiration(cache, key) 33 | end) 34 | end 35 | 36 | ############### 37 | # Private API # 38 | ############### 39 | 40 | # Handles the result of the TTL call. 41 | # 42 | # If the expiration if unset, we update just the touch time insude the entry 43 | # as we don't have to account for the offset. If an expiration is set, we 44 | # also update the expiration on the record to be the returned offset. 45 | defp handle_expiration({:ok, value}, cache, key) do 46 | Actions.update( 47 | cache, 48 | key, 49 | case value do 50 | nil -> entry_mod_now() 51 | exp -> entry_mod_now(expiration: exp) 52 | end 53 | ) 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/cachex/actions/transaction.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Transaction do 2 | @moduledoc false 3 | # Command module to enable transactional execution against a cache. 4 | # 5 | # This command handles the (very) small implementation of transactions. The 6 | # reason for it being so small is that we simply pass values through to the 7 | # Locksmith service to do the heavy lifting. All that's provided here is a 8 | # little bit of massaging. 9 | alias Cachex.Services.Locksmith 10 | 11 | # import records 12 | import Cachex.Spec 13 | 14 | ############## 15 | # Public API # 16 | ############## 17 | 18 | @doc """ 19 | Executes a transaction against the cache. 20 | 21 | The Locksmith does most of the work here, we just provide the cache state 22 | to the user-defined function. The results are wrapped in an `:ok` tagged 23 | Tuple just to protect against internally unwrapped values from bang functions. 24 | """ 25 | def execute(cache() = cache, keys, operation, _options) do 26 | Locksmith.transaction(cache, keys, fn -> 27 | case :erlang.fun_info(operation)[:arity] do 28 | 0 -> {:ok, operation.()} 29 | 1 -> {:ok, operation.(cache)} 30 | end 31 | end) 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /lib/cachex/actions/ttl.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Ttl do 2 | @moduledoc false 3 | # Command module to retrieve the TTL for a cache entry. 4 | # 5 | # TTL retrieval for cache records is determined by calculating the offset 6 | # between the touch time and the expiration set against an entry instance. 7 | # 8 | # Lazy expiration is also taken into account in this module to avoid giving 9 | # negative TTL values back to the caller. 10 | alias Cachex.Actions 11 | 12 | # we need our imports 13 | import Cachex.Spec 14 | 15 | ############## 16 | # Public API # 17 | ############## 18 | 19 | @doc """ 20 | Retrieves the remaining TTL for a cache item. 21 | 22 | If a cache entry has no expiration set a nil value will be returned, otherwise 23 | the offset is determined from the record fields and returned to the caller. 24 | """ 25 | def execute(cache() = cache, key, _options) do 26 | case Actions.read(cache, key) do 27 | entry(modified: modified, expiration: exp) when not is_nil(exp) -> 28 | {:ok, modified + exp - now()} 29 | 30 | _anything_else -> 31 | {:ok, nil} 32 | end 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /lib/cachex/actions/update.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Update do 2 | @moduledoc false 3 | # Command module to update existing cache entries. 4 | # 5 | # The only semantic difference between an `update()` call against a `set()` 6 | # call is that the expiration time remains unchanged during an update. If 7 | # you wish to have the expiration time modified, you can simply set your 8 | # new value over the top of the existing one. 9 | alias Cachex.Actions 10 | alias Cachex.Services.Locksmith 11 | 12 | # we need our imports 13 | import Cachex.Spec 14 | 15 | ############## 16 | # Public API # 17 | ############## 18 | 19 | @doc """ 20 | Updates an entry inside the cache. 21 | 22 | Updates do not affect the touch time of a record, which is what makes an update 23 | call useful. If you need to update the touch time you can either call `touch()` 24 | immediately after an update, or you can simply set a value over the top instead 25 | of doing an update. 26 | """ 27 | def execute(cache() = cache, key, value, _options) do 28 | Locksmith.write(cache, [key], fn -> 29 | Actions.update(cache, key, entry_mod(value: value)) 30 | end) 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/cachex/actions/warm.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.Warm do 2 | @moduledoc false 3 | # Command module to trigger manual cache warming. 4 | # 5 | # The only reason to call this command is the case in which you already 6 | # know the backing state of your cache has been updated and you need to 7 | # immediately refresh your warmed entries. 8 | import Cachex.Spec 9 | 10 | ############## 11 | # Public API # 12 | ############## 13 | 14 | @doc """ 15 | Triggers a manual warming in a cache. 16 | 17 | The warmers are fetched back out of the supervision tree, by calling out 18 | to our services module. This allows us to avoid having to track any special 19 | state in order to support manual warming. 20 | 21 | You can provide an `:only` option to restrict the warming to a specific set 22 | of warmer modules or names. The list can contain either the name of the 23 | module, or the name of the registered server. The list of warmer names which 24 | had a warming triggered will be returned in the result of this call. 25 | """ 26 | def execute(cache(warmers: warmers), options) do 27 | only = Keyword.get(options, :only, nil) 28 | wait = Keyword.get(options, :wait, false) 29 | 30 | warmed = 31 | warmers 32 | |> Enum.filter(&filter_mod(&1, only)) 33 | |> Enum.map(&spawn_call(&1, wait)) 34 | |> Task.yield_many(:infinity) 35 | |> Enum.map(&extract_name/1) 36 | 37 | {:ok, warmed} 38 | end 39 | 40 | ############### 41 | # Private API # 42 | ############### 43 | 44 | # Filters warmers based on the :only flag for module/name. 45 | defp filter_mod(warmer(module: mod, name: name), only), 46 | do: only == nil or mod in only or name in only 47 | 48 | # Spawns a task to invoke the call to the remote warmer. 49 | # 50 | # We have to manually set $callers because we support Elixir v1.7 and this 51 | # wasn't automated via the Task module at that point in time. 52 | defp spawn_call(warmer(name: name) = warmer, wait) do 53 | callers = [self() | callers()] 54 | 55 | Task.async(fn -> 56 | Process.put(:"$callers", callers) 57 | call_warmer(warmer, wait) 58 | name 59 | end) 60 | end 61 | 62 | # Invokes a warmer with blocking enabled. 63 | defp call_warmer(warmer(name: name), true), 64 | do: GenServer.call(name, {:cachex_warmer, callers()}, :infinity) 65 | 66 | # Invokes a warmer with blocking disabled. 67 | defp call_warmer(warmer(name: name), _), 68 | do: send(name, {:cachex_warmer, callers()}) 69 | 70 | # Converts a task result to a name reference. 71 | defp extract_name({_, {:ok, name}}), 72 | do: name 73 | end 74 | -------------------------------------------------------------------------------- /lib/cachex/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Application do 2 | @moduledoc false 3 | # Application callback to start any global services. 4 | # 5 | # This will start all needed services for Cachex using the `Cachex.Services` 6 | # module, rather than hardcoding any logic into this binding module. 7 | use Application 8 | 9 | @doc """ 10 | Starts the global services tree for Cachex. 11 | """ 12 | def start(_type, _args) do 13 | # Define child supervisors to be supervised 14 | services = Cachex.Services.app_spec() 15 | options = [strategy: :one_for_one, name: __MODULE__] 16 | 17 | # See http://elixir-lang.org/docs/stable/elixir/Supervisor.html 18 | # for other strategies and supported options 19 | Supervisor.start_link(services, options) 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /lib/cachex/error.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Error do 2 | @moduledoc """ 3 | Module containing all error definitions used in the codebase. 4 | 5 | This module allows users to catch Cachex errors in a separate block 6 | to other errors/exceptions rather than using stdlib errors: 7 | 8 | iex> try do 9 | ...> Cachex.put!(:cache, "key", "value") 10 | ...> rescue 11 | ...> e in Cachex.Error -> e 12 | ...> end 13 | 14 | All error messages (both shorthand and long form) can be found in this module, 15 | including the ability to convert from the short form to the long form using the 16 | `long_form/1` function. 17 | """ 18 | defexception message: "Error during cache action", stack: nil 19 | 20 | # all shorthands 21 | @known_errors [ 22 | :cross_slot, 23 | :invalid_command, 24 | :invalid_expiration, 25 | :invalid_hook, 26 | :invalid_limit, 27 | :invalid_match, 28 | :invalid_name, 29 | :invalid_option, 30 | :invalid_pairs, 31 | :invalid_router, 32 | :invalid_warmer, 33 | :janitor_disabled, 34 | :no_cache, 35 | :non_distributed, 36 | :non_numeric_value, 37 | :not_started, 38 | :stats_disabled, 39 | :unreachable_file 40 | ] 41 | 42 | ########## 43 | # Macros # 44 | ########## 45 | 46 | @doc """ 47 | Generates a tagged `:error` Tuple at compile time. 48 | 49 | The provided error key must be contained in the list of known 50 | identifiers returned be `known/0`, otherwise this call will fail. 51 | """ 52 | @spec error(shortname :: atom) :: {:error, shortname :: atom} 53 | defmacro error(key) when key in @known_errors, 54 | do: quote(do: {:error, unquote(key)}) 55 | 56 | ############## 57 | # Public API # 58 | ############## 59 | 60 | @doc """ 61 | Returns the list of known error keys. 62 | """ 63 | @spec known :: [shortname :: atom] 64 | def known, 65 | do: @known_errors 66 | 67 | @doc """ 68 | Converts an error identifier to it's longer form. 69 | 70 | Error identifiers should be atoms and should be contained in the 71 | list of errors returned by `known/0`. The return type from this 72 | function will always be a binary. 73 | """ 74 | @spec long_form(shortname :: atom) :: description :: binary 75 | def long_form(:cross_slot), 76 | do: "Target keys do not live on the same node" 77 | 78 | def long_form(:invalid_command), 79 | do: "Invalid command definition provided" 80 | 81 | def long_form(:invalid_expiration), 82 | do: "Invalid expiration definition provided" 83 | 84 | def long_form(:invalid_hook), 85 | do: "Invalid hook definition provided" 86 | 87 | def long_form(:invalid_limit), 88 | do: "Invalid limit fields provided" 89 | 90 | def long_form(:invalid_match), 91 | do: "Invalid match specification provided" 92 | 93 | def long_form(:invalid_name), 94 | do: "Invalid cache name provided" 95 | 96 | def long_form(:invalid_option), 97 | do: "Invalid option syntax provided" 98 | 99 | def long_form(:invalid_pairs), 100 | do: "Invalid insertion pairs provided" 101 | 102 | def long_form(:invalid_router), 103 | do: "Invalid router definition provided" 104 | 105 | def long_form(:invalid_warmer), 106 | do: "Invalid warmer definition provided" 107 | 108 | def long_form(:janitor_disabled), 109 | do: "Specified janitor process running" 110 | 111 | def long_form(:no_cache), 112 | do: "Specified cache not running" 113 | 114 | def long_form(:non_distributed), 115 | do: "Attempted to use a local function across nodes" 116 | 117 | def long_form(:non_numeric_value), 118 | do: "Attempted arithmetic operations on a non-numeric value" 119 | 120 | def long_form(:not_started), 121 | do: "Cache table not active, have you started the Cachex application?" 122 | 123 | def long_form(:stats_disabled), 124 | do: "Stats are not enabled for the specified cache" 125 | 126 | def long_form(:unreachable_file), 127 | do: "Unable to access provided file path" 128 | 129 | def long_form(error), 130 | do: error 131 | end 132 | -------------------------------------------------------------------------------- /lib/cachex/limit/accessed.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.Accessed do 2 | @moduledoc """ 3 | Access based touch tracking for LRW pruning. 4 | 5 | This module can be used to adapt `Cachex.prune/3` for LRU purposes instead of 6 | the typical LRW. This hook will update the modification time of a cache entry 7 | upon access by a read operation. This is a very basic way to provide LRU policies, 8 | but it should suffice for most cases. 9 | 10 | At the time of writing modification times are *not* updated when executing 11 | commands on multiple keys, such as `Cachex.keys/2` and `Cachex.stream/3`, for 12 | performance reasons. Again, this may change in future if necessary. 13 | """ 14 | use Cachex.Hook 15 | 16 | # touched actions 17 | @actions [ 18 | :decr, 19 | :exists?, 20 | :fetch, 21 | :get, 22 | :incr, 23 | :invoke, 24 | :ttl, 25 | :update 26 | ] 27 | 28 | ################## 29 | # Hook Behaviour # 30 | ################## 31 | 32 | @doc """ 33 | Returns the actions this hook should listen on. 34 | """ 35 | @spec actions :: [atom] 36 | def actions, 37 | do: @actions 38 | 39 | @doc """ 40 | Returns the provisions this hook requires. 41 | """ 42 | @spec provisions :: [atom] 43 | def provisions, 44 | do: [:cache] 45 | 46 | #################### 47 | # Server Callbacks # 48 | #################### 49 | 50 | @doc false 51 | # Handles notification of a cache action. 52 | # 53 | # This will update the modification time of a key if tracked in a successful cache 54 | # action. In combination with LRW caching, this provides a simple LRU policy. 55 | def handle_notify({_action, [key | _]}, _result, cache) do 56 | {:ok, _} = Cachex.touch(cache, key) 57 | {:ok, cache} 58 | end 59 | 60 | @doc false 61 | # Receives a provisioned cache instance. 62 | # 63 | # The provided cache is then stored in the cache and used for cache calls going 64 | # forwards, in order to skip the lookups inside the cache overseer for performance. 65 | def handle_provision({:cache, cache}, _cache), 66 | do: {:ok, cache} 67 | end 68 | -------------------------------------------------------------------------------- /lib/cachex/limit/evented.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.Evented do 2 | @moduledoc """ 3 | Evented least recently written eviction policy for Cachex. 4 | 5 | This module implements an evented LRW eviction policy for Cachex, using a hook 6 | to listen for new key additions to a cache and enforcing bounds in a reactive 7 | way. This policy enforces cache bounds and limits far more accurately than other 8 | scheduled implementations, but comes at a higher memory cost (due to the message 9 | passing between hooks). 10 | 11 | ## Initialization 12 | 13 | hook(module: Cachex.Limit.Evented, args: { 14 | 500, # setting cache max size 15 | [] # options for `Cachex.prune/3` 16 | }) 17 | 18 | """ 19 | use Cachex.Hook 20 | 21 | # actions which didn't trigger 22 | @ignored [:error, :ignore] 23 | 24 | ###################### 25 | # Hook Configuration # 26 | ###################### 27 | 28 | @doc """ 29 | Returns the actions this policy should listen on. 30 | """ 31 | @spec actions :: [atom] 32 | def actions, 33 | do: [ 34 | :put, 35 | :decr, 36 | :incr, 37 | :fetch, 38 | :update, 39 | :put_many, 40 | :get_and_update 41 | ] 42 | 43 | @doc """ 44 | Returns the provisions this policy requires. 45 | """ 46 | @spec provisions :: [atom] 47 | def provisions, 48 | do: [:cache] 49 | 50 | #################### 51 | # Server Callbacks # 52 | #################### 53 | 54 | @doc false 55 | # Initializes this policy using the limit being enforced. 56 | def init(args), 57 | do: {:ok, {nil, args}} 58 | 59 | @doc false 60 | # Handles notification of a cache action. 61 | # 62 | # This will check if the action can modify the size of the cache, and if so will 63 | # execute the boundary enforcement to trim the size as needed. 64 | # 65 | # Note that this will ignore error results and only operates on actions which are 66 | # able to cause a net gain in cache size (so removals are also ignored). 67 | def handle_notify(_message, {status, _value}, {cache, {size, options}} = opts) 68 | when status not in @ignored do 69 | {:ok, true} = Cachex.prune(cache, size, options) 70 | {:ok, opts} 71 | end 72 | 73 | def handle_notify(_message, _result, opts), 74 | do: {:ok, opts} 75 | 76 | @doc false 77 | # Receives a provisioned cache instance. 78 | # 79 | # The provided cache is then stored in the cache and used for cache calls going 80 | # forwards, in order to skip the lookups inside the cache overseer for performance. 81 | def handle_provision({:cache, cache}, {_cache, options}), 82 | do: {:ok, {cache, options}} 83 | end 84 | -------------------------------------------------------------------------------- /lib/cachex/limit/scheduled.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.Scheduled do 2 | @moduledoc """ 3 | Scheduled least recently written eviction policy for Cachex. 4 | 5 | This module implements a scheduled LRW eviction policy for Cachex, using a basic 6 | timer to trigger bound enforcement in a repeatable way. This has the same bound 7 | accuracy as `Cachex.Policy.LRW.Evented`, but has potential for some delay. The 8 | main advantage of this implementation is a far lower memory cost due to not 9 | using hook messages. 10 | 11 | ## Initialization 12 | 13 | hook(module: Cachex.Limit.Scheduled, args: { 14 | 500, # setting cache max size 15 | [], # options for `Cachex.prune/3` 16 | [] # options for `Cachex.Limit.Scheduled` 17 | }) 18 | 19 | ## Options 20 | 21 | * `:frequency` 22 | 23 | The polling frequency for this hook to use when scheduling cache pruning. 24 | This should be an non-negative number of milliseconds. Defaults to `1000`, 25 | which is once per second. 26 | 27 | """ 28 | use Cachex.Hook 29 | 30 | ###################### 31 | # Hook Configuration # 32 | ###################### 33 | 34 | @doc """ 35 | Returns the provisions this policy requires. 36 | """ 37 | @spec provisions :: [atom] 38 | def provisions, 39 | do: [:cache] 40 | 41 | #################### 42 | # Server Callbacks # 43 | #################### 44 | 45 | @doc false 46 | # Initializes this policy using the limit being enforced. 47 | def init({_size, _options, scheduling} = args), 48 | do: {schedule(scheduling), {nil, args}} 49 | 50 | @doc false 51 | # Handles notification of a cache action. 52 | # 53 | # This will execute a bounds check on a cache and schedule a new check. 54 | def handle_info(:policy_check, {cache, {size, options, scheduling}} = args) do 55 | {:ok, true} = Cachex.prune(cache, size, options) 56 | schedule(scheduling) && {:noreply, args} 57 | end 58 | 59 | @doc false 60 | # Receives a provisioned cache instance. 61 | # 62 | # The provided cache is then stored in the state and used for cache calls going 63 | # forwards, in order to skip the lookups inside the cache overseer for performance. 64 | def handle_provision({:cache, cache}, {_cache, args}), 65 | do: {:ok, {cache, args}} 66 | 67 | ############### 68 | # Private API # 69 | ############### 70 | 71 | # Schedules a check to occur after the designated interval. 72 | defp schedule(options) do 73 | options 74 | |> Keyword.get(:frequency, :timer.seconds(3)) 75 | |> :erlang.send_after(self(), :policy_check) 76 | 77 | :ok 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /lib/cachex/provision.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Provision do 2 | @moduledoc """ 3 | Module controlling provisioning behaviour definitions. 4 | 5 | This module defines the provisioning implementation for Cachex, allowing 6 | components such as hooks and warmers to tap into state changes in the cache 7 | table. By implementing `handle_provision/2` these components can be provided 8 | with new versions of state as they're created. 9 | """ 10 | 11 | ############# 12 | # Behaviour # 13 | ############# 14 | 15 | @doc """ 16 | Returns an enumerable of provisions this implementation requires. 17 | 18 | The current provisions available are: 19 | 20 | * `cache` - a cache instance used to make cache calls with lower overhead. 21 | 22 | This should always return an enumerable of atoms; in the case of no required 23 | provisions an empty enumerable should be returned. 24 | """ 25 | @callback provisions :: [type :: atom] 26 | 27 | @doc """ 28 | Handles a provisioning call. 29 | 30 | The provided argument will be a Tuple dictating the type of value being 31 | provisioned along with the value itself. This can be used to listen on 32 | states required for hook executions (such as cache records). 33 | """ 34 | @callback handle_provision( 35 | provison :: {type :: atom, value :: any}, 36 | state :: any 37 | ) :: {:ok, state :: any} 38 | 39 | ################## 40 | # Implementation # 41 | ################## 42 | 43 | @doc false 44 | defmacro __using__(_) do 45 | quote location: :keep, generated: true do 46 | # use the provision behaviour 47 | @behaviour Cachex.Provision 48 | 49 | ################# 50 | # Configuration # 51 | ################# 52 | 53 | @doc false 54 | def provisions, 55 | do: [] 56 | 57 | # config overrides 58 | defoverridable provisions: 0 59 | 60 | ######################### 61 | # Notification Handlers # 62 | ######################### 63 | 64 | @doc false 65 | def handle_provision(provision, state), 66 | do: {:ok, state} 67 | 68 | # listener override 69 | defoverridable handle_provision: 2 70 | 71 | ########################## 72 | # Private Implementation # 73 | ########################## 74 | 75 | @doc false 76 | def handle_info({:cachex_provision, provision}, state) do 77 | {:ok, new_state} = handle_provision(provision, state) 78 | {:noreply, new_state} 79 | end 80 | end 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /lib/cachex/router/jump.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.Jump do 2 | @moduledoc """ 3 | Routing implementation based on Jump Consistent Hash. 4 | 5 | This implementation backed Cachex's distribution in the v3.x lineage, and is 6 | suitable for clusters of a static size. Each key is hashed and then slotted 7 | against a node in the cluster. Please note that the hash algorithm should 8 | not be relied upon and is not considered part of the public API. 9 | 10 | The initialization of this router accepts a `:nodes` option which enables 11 | the user to define the nodes to route amongst. If this is not provided the 12 | router will default to detecting a cluster via `Node.self/0` and `Node.list/1`. 13 | 14 | For more information on the algorithm backing this router, please see the 15 | appropriate [publication](https://arxiv.org/pdf/1406.2294). 16 | """ 17 | use Cachex.Router 18 | alias Cachex.Router 19 | 20 | @doc """ 21 | Initialize a jump hash routing state for a cache. 22 | 23 | ## Options 24 | 25 | * `:nodes` 26 | 27 | The `:nodes` option allows a user to provide a list of nodes to treat 28 | as a cluster. If this is not provided, the cluster will be inferred 29 | by using `Node.self/0` and `Node.list/1`. 30 | 31 | """ 32 | @spec init(cache :: Cachex.t(), options :: Keyword.t()) :: [atom] 33 | def init(_cache, options) do 34 | options 35 | |> Keyword.get_lazy(:nodes, &Router.connected/0) 36 | |> Enum.uniq() 37 | |> Enum.sort() 38 | end 39 | 40 | @doc """ 41 | Retrieve the list of nodes from a jump hash routing state. 42 | """ 43 | @spec nodes(nodes :: [atom]) :: [atom] 44 | def nodes(nodes), 45 | do: nodes 46 | 47 | @doc """ 48 | Route a key to a node in a jump hash routing state. 49 | """ 50 | @spec route(nodes :: [atom], key :: any) :: atom 51 | def route(nodes, key) do 52 | slot = 53 | key 54 | |> :erlang.phash2() 55 | |> Jumper.slot(length(nodes)) 56 | 57 | Enum.at(nodes, slot) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/cachex/router/local.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.Local do 2 | @moduledoc """ 3 | Routing implementation for the local node. 4 | 5 | This module acts as the base implementation for routing when *not* being 6 | used in a distributed cache. All actions are routed to the current node. 7 | """ 8 | use Cachex.Router 9 | 10 | @doc """ 11 | Retrieve the list of nodes from a local routing state. 12 | """ 13 | @spec nodes(state :: nil) :: [atom] 14 | def nodes(_state), 15 | do: [node()] 16 | 17 | @doc """ 18 | Route a key to a node in a local routing state. 19 | """ 20 | @spec route(state :: nil, key :: any) :: atom 21 | def route(_state, _key), 22 | do: node() 23 | end 24 | -------------------------------------------------------------------------------- /lib/cachex/router/mod.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.Mod do 2 | @moduledoc """ 3 | Routing implementation based on basic hashing. 4 | 5 | This router provides the simplest (and quickest!) implementation for 6 | clusters of a static size. Provided keys are hashed and routed to a node 7 | via the modulo operation. Please note that the hash algorithm should 8 | not be relied upon and is not considered part of the public API. 9 | 10 | The initialization of this router accepts a `:nodes` option which enables 11 | the user to define the nodes to route amongst. If this is not provided the 12 | router will default to detecting a cluster via `Node.self/0` and `Node.list/1`. 13 | """ 14 | use Cachex.Router 15 | alias Cachex.Router 16 | 17 | @doc """ 18 | Initialize a modulo routing state for a cache. 19 | 20 | ## Options 21 | 22 | * `:nodes` 23 | 24 | The `:nodes` option allows a user to provide a list of nodes to treat 25 | as a cluster. If this is not provided, the cluster will be inferred 26 | by using `Node.self/0` and `Node.list/1`. 27 | 28 | """ 29 | @spec init(cache :: Cachex.t(), options :: Keyword.t()) :: [atom] 30 | def init(_cache, options) do 31 | options 32 | |> Keyword.get_lazy(:nodes, &Router.connected/0) 33 | |> Enum.uniq() 34 | |> Enum.sort() 35 | end 36 | 37 | @doc """ 38 | Retrieve the list of nodes from a modulo routing state. 39 | """ 40 | @spec nodes(nodes :: [atom]) :: [atom] 41 | def nodes(nodes), 42 | do: Enum.sort(nodes) 43 | 44 | @doc """ 45 | Route a key to a node in a modulo routing state. 46 | """ 47 | @spec route(nodes :: [atom], key :: any) :: atom 48 | def route(nodes, key) do 49 | slot = 50 | key 51 | |> :erlang.phash2() 52 | |> rem(length(nodes)) 53 | 54 | Enum.at(nodes, slot) 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/cachex/router/ring/monitor.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.Ring.Monitor do 2 | @moduledoc false 3 | # Small monitor implementation for ring nodes. 4 | # 5 | # This module will hook into `:net_kernel` to provide dynamic node 6 | # allocation within a Cachex ring router. There is very minimal 7 | # support for configuration in here; if you need to see options 8 | # please visit the parent `Cachex.Router.Ring`. 9 | # 10 | # All API in here should be considered private and not relied upon. 11 | use GenServer 12 | alias ExHashRing.Ring 13 | 14 | #################### 15 | # Server Callbacks # 16 | #################### 17 | 18 | @doc false 19 | # Initialize the monitor from the options. 20 | def init(options) do 21 | case Keyword.get(options, :monitor) do 22 | false -> 23 | :ignore 24 | 25 | true -> 26 | name = Keyword.get(options, :name) 27 | type = Keyword.get(options, :monitor_type) 28 | 29 | includes = Keyword.get(options, :monitor_includes, []) 30 | excludes = Keyword.get(options, :monitor_excludes, []) 31 | 32 | :ok = :net_kernel.monitor_nodes(true, node_type: type) 33 | 34 | {:ok, {name, {includes, excludes}}} 35 | end 36 | end 37 | 38 | @doc false 39 | # Adds newly detected nodes to the internal ring, if they match any 40 | # of the # provided monitoring patterns inside the options listing. 41 | def handle_info({:nodeup, node, _info}, {ring, {includes, excludes}} = state) do 42 | if Cachex.Router.Ring.included?(node, includes, excludes) do 43 | Ring.add_node(ring, node) 44 | end 45 | 46 | {:noreply, state} 47 | end 48 | 49 | @doc false 50 | # Removes recently dropped nodes from the internal ring, regardless of 51 | # whether it already exists in the ring or not for consistency. 52 | def handle_info({:nodedown, node, _info}, {ring, _patterns} = state) do 53 | Ring.remove_node(ring, node) 54 | {:noreply, state} 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/cachex/services/incubator.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.Incubator do 2 | @moduledoc false 3 | # Parent module for all warmer definitions for a cache. 4 | # 5 | # The Incubator will control the supervision tree for all warmers that 6 | # are associated with a cache. This is very minimal supervision, with 7 | # no linking back except via the `Supervisor` access functions. 8 | import Cachex.Spec 9 | 10 | ############## 11 | # Public API # 12 | ############## 13 | 14 | @doc """ 15 | Starts a new incubation service for a cache. 16 | 17 | This will start a Supervisor to hold all warmer processes as defined in 18 | the provided cache record. If no warmers are attached in the cache record, 19 | this will skip creation to avoid unnecessary processes running. 20 | """ 21 | @spec start_link(Cachex.t()) :: Supervisor.on_start() 22 | def start_link(cache(warmers: [])), 23 | do: :ignore 24 | 25 | def start_link(cache(warmers: warmers) = cache) do 26 | warmers 27 | |> Enum.map(&spec(&1, cache)) 28 | |> Supervisor.start_link(strategy: :one_for_one) 29 | end 30 | 31 | ############### 32 | # Private API # 33 | ############### 34 | 35 | # Generates a Supervisor specification for a warmer. 36 | defp spec(warmer(module: module, name: name) = warmer, cache) do 37 | options = 38 | case name do 39 | nil -> [module, {cache, warmer}] 40 | val -> [module, {cache, warmer}, [name: val]] 41 | end 42 | 43 | %{id: module, start: {GenServer, :start_link, options}} 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /lib/cachex/services/informant.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.Informant do 2 | @moduledoc false 3 | # Parent module for all child hook definitions for a cache. 4 | # 5 | # This module will control the supervision tree for all hooks that are 6 | # associated with a cache. The links inside will create a tree to hold 7 | # all hooks as children, as well as provide utility functions for new 8 | # notifications being sent to child hooks for a cache. 9 | alias Cachex.Hook 10 | import Cachex.Spec 11 | 12 | ############## 13 | # Public API # 14 | ############## 15 | 16 | @doc """ 17 | Starts a new Informant service for a cache. 18 | 19 | This will start a Supervisor to hold all hook processes as defined in 20 | the provided cache record. If no hooks are attached in the cache record, 21 | this will skip creating an unnecessary Supervisor process. 22 | """ 23 | @spec start_link(Cachex.t()) :: Supervisor.on_start() 24 | def start_link(cache(hooks: hooks)) do 25 | case Hook.concat(hooks) do 26 | [] -> 27 | :ignore 28 | 29 | li -> 30 | li 31 | |> Enum.map(&spec/1) 32 | |> Supervisor.start_link(strategy: :one_for_one) 33 | end 34 | end 35 | 36 | @doc """ 37 | Broadcasts an action to all pre-hooks in a cache. 38 | 39 | This will send a nil result, as the result does not yet exist. 40 | """ 41 | @spec broadcast(Cachex.t(), tuple) :: :ok 42 | def broadcast(cache(hooks: hooks(pre: pre)), action), 43 | do: broadcast_action(pre, action, nil) 44 | 45 | @doc """ 46 | Broadcasts an action and result to all post-hooks in a cache. 47 | """ 48 | @spec broadcast(Cachex.t(), tuple, any) :: :ok 49 | def broadcast(cache(hooks: hooks(post: post)), action, result), 50 | do: broadcast_action(post, action, result) 51 | 52 | @doc """ 53 | Notifies a set of hooks of the passed in data. 54 | 55 | This is the underlying implementation for `broadcast/2` and `broadcast/3`, 56 | but it's general purpose enough that it's exposed as part of the public API. 57 | """ 58 | @spec notify([Cachex.Spec.hook()], tuple, any) :: :ok 59 | def notify([], _action, _result), 60 | do: :ok 61 | 62 | def notify(hooks, action, result) when is_list(hooks) do 63 | # define the base payload, as all hooks get the same message 64 | message = {:cachex_notify, {action, result, [self() | callers()]}} 65 | 66 | # iterate hooks 67 | Enum.each(hooks, fn 68 | # not running, so skip 69 | hook(name: nil) -> 70 | nil 71 | 72 | # handling of running hooks 73 | hook(name: name, module: module) -> 74 | # skip notifying service hooks 75 | if module.type() != :service do 76 | case module.async?() do 77 | true -> send(name, message) 78 | false -> GenServer.call(name, message, :infinity) 79 | end 80 | end 81 | end) 82 | end 83 | 84 | ############### 85 | # Private API # 86 | ############### 87 | 88 | # Broadcasts an action to hooks listening for it. 89 | # 90 | # This will enforce the actions list inside a hook definition to ensure 91 | # that hooks only receive actions that they currently care about. 92 | defp broadcast_action(hooks, {action, _args} = msg, result) do 93 | actionable = 94 | Enum.filter(hooks, fn hook(module: module) -> 95 | case module.actions() do 96 | :all -> true 97 | enum -> action in enum 98 | end 99 | end) 100 | 101 | notify(actionable, msg, result) 102 | end 103 | 104 | # Generates a Supervisor specification for a hook. 105 | defp spec(hook(module: module, name: name, args: args)) do 106 | options = 107 | case name do 108 | nil -> [module, args] 109 | val -> [module, args, [name: val]] 110 | end 111 | 112 | %{id: module, start: {GenServer, :start_link, options}} 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /lib/cachex/services/locksmith/queue.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.Locksmith.Queue do 2 | @moduledoc false 3 | # Transaction queue backing a cache instance. 4 | # 5 | # This has to live outside of the `Cachex.Services.Locksmith` global process 6 | # as otherwise caches would then compete with each other for resources which 7 | # is far from optimal. 8 | # 9 | # Each cache will therefore have their own queue process, represented in this 10 | # module, and will operate using the utilities provided in the main Locksmith 11 | # service module (rather than using this module directly). 12 | import Cachex.Spec 13 | import Cachex.Services.Locksmith 14 | 15 | ############## 16 | # Public API # 17 | ############## 18 | 19 | @doc """ 20 | Starts the internal server process backing this queue. 21 | 22 | This is little more than starting a GenServer process using this module, 23 | although it does use the provided cache record to name the new server. 24 | """ 25 | @spec start_link(Cachex.t()) :: GenServer.on_start() 26 | def start_link(cache(name: name) = cache), 27 | do: GenServer.start_link(__MODULE__, cache, name: name(name, :locksmith)) 28 | 29 | @doc """ 30 | Executes a function in a lock-free context. 31 | """ 32 | @spec execute(Cachex.t(), (-> any)) :: any 33 | def execute(cache() = cache, func) when is_function(func, 0), 34 | do: service_call(cache, :locksmith, {:exec, func, callers()}) 35 | 36 | @doc """ 37 | Executes a function in a transactional context. 38 | """ 39 | @spec transaction(Cachex.t(), [any], (-> any)) :: any 40 | def transaction(cache() = cache, keys, func) when is_list(keys) and is_function(func, 0), 41 | do: service_call(cache, :locksmith, {:transaction, keys, func, callers()}) 42 | 43 | #################### 44 | # Server Callbacks # 45 | #################### 46 | 47 | @doc false 48 | # Initializes the new server process instance. 49 | # 50 | # This will signal the process as transactional, which 51 | # is used by the main Locksmith service for optimizations. 52 | def init(cache) do 53 | # signal transactional 54 | start_transaction() 55 | # cache is state 56 | {:ok, cache} 57 | end 58 | 59 | @doc false 60 | # Executes a function in a lock-free context. 61 | # 62 | # Because locks are handled sequentially inside this process, this execution 63 | # can guarantee that there are no locks set on the table when it fires. 64 | def handle_call({:exec, func, callers}, {caller, _tag}, cache), 65 | do: {:reply, safe_exec(func, [caller | callers]), cache} 66 | 67 | @doc false 68 | # Executes a function in a transactional context. 69 | # 70 | # This will lock any required keys before executing any writes, and remove the 71 | # locks after execution. The key here is that the locks set on a key will stop 72 | # other processes from writing them, and force them to queue their writes 73 | # inside this queue process instead. 74 | def handle_call({:transaction, keys, func, callers}, {caller, _tag}, cache) do 75 | true = lock(cache, keys) 76 | val = safe_exec(func, [caller | callers]) 77 | true = unlock(cache, keys) 78 | 79 | {:reply, val, cache} 80 | end 81 | 82 | ############### 83 | # Private API # 84 | ############### 85 | 86 | # Wraps a function in a rescue clause to provide safety. 87 | # 88 | # Any errors which occur are rescued and returned in an 89 | # `:error` tagged Tuple to avoid crashing the process. 90 | defp safe_exec(fun, chain) do 91 | Process.put(:"$callers", chain) 92 | 93 | try do 94 | fun.() 95 | rescue 96 | e -> {:error, Exception.message(e)} 97 | end 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /lib/cachex/services/steward.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.Steward do 2 | @moduledoc false 3 | # Service module overseeing cache provisions. 4 | # 5 | # This module controls state provision to Cachex components, such as hooks 6 | # and warmers. In previous versions of Cachex provisions were handled under 7 | # the `Cachex.Hook` behaviour, but the introduction of warmers meant that it 8 | # should be handled in a separate location. 9 | # 10 | # This service module will handle the provision of state to relevant components 11 | # attached to a cache, without the caller having to think about it. 12 | import Cachex.Spec 13 | 14 | # convenience alias 15 | alias Cachex.Hook 16 | alias Cachex.Services 17 | 18 | # recognised 19 | @provisions [ 20 | :cache 21 | ] 22 | 23 | ############## 24 | # Public API # 25 | ############## 26 | 27 | @doc """ 28 | Provides an state pair to relevant components. 29 | 30 | This will send updated state to all interest components, but does not 31 | wait for a response before returning. As provisions are handled in a 32 | base implementation, we can be sure of safe implementation here. 33 | """ 34 | @spec provide(Cachex.t(), {atom, any}) :: :ok 35 | def provide(cache() = cache, {key, _} = provision) when key in @provisions do 36 | cache(hooks: hooks, warmers: warmers) = cache 37 | 38 | services = 39 | cache 40 | |> Services.list() 41 | |> Enum.filter(&filter_services/1) 42 | 43 | provisioned = 44 | hooks 45 | |> Hook.concat() 46 | |> Enum.concat(services) 47 | |> Enum.concat(warmers) 48 | |> Enum.map(&map_names/1) 49 | 50 | for {name, mod} <- provisioned, key in mod.provisions() do 51 | send(name, {:cachex_provision, provision}) 52 | end 53 | end 54 | 55 | ############## 56 | # Private API # 57 | ############## 58 | 59 | # Map a hook into the name and module tuple 60 | defp map_names(hook(name: name, module: module)), 61 | do: {name, module} 62 | 63 | # Map a warmer into the name and module tuple 64 | defp map_names(warmer(name: name, module: module)), 65 | do: {name, module} 66 | 67 | # Map a service into the name and module tuple 68 | defp map_names({module, name, _tag, _id}), 69 | do: {name, module} 70 | 71 | # Filter out service modules if they don't have provisions 72 | defp filter_services({module, _name, _tag, _id}) do 73 | :attributes 74 | |> module.__info__() 75 | |> Keyword.get_values(:behaviour) 76 | |> Enum.member?([Cachex.Provision]) 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Mixfile do 2 | use Mix.Project 3 | 4 | @version "4.1.0" 5 | @url_docs "http://hexdocs.pm/cachex" 6 | @url_github "https://github.com/whitfin/cachex" 7 | 8 | def project do 9 | [ 10 | app: :cachex, 11 | name: "Cachex", 12 | description: "Powerful in-memory key/value storage for Elixir", 13 | package: %{ 14 | files: [ 15 | "lib", 16 | "mix.exs", 17 | "LICENSE" 18 | ], 19 | licenses: ["MIT"], 20 | links: %{ 21 | "Docs" => @url_docs, 22 | "GitHub" => @url_github 23 | }, 24 | maintainers: ["Isaac Whitfield"] 25 | }, 26 | version: @version, 27 | elixir: "~> 1.7", 28 | deps: deps(), 29 | docs: [ 30 | main: "overview", 31 | source_ref: "v#{@version}", 32 | source_url: @url_github, 33 | extra_section: "guides", 34 | extras: [ 35 | "docs/extensions/custom-commands.md", 36 | "docs/extensions/execution-lifecycle.md", 37 | "docs/general/batching-actions.md", 38 | "docs/general/local-persistence.md", 39 | "docs/general/streaming-records.md", 40 | "docs/management/limiting-caches.md", 41 | "docs/management/expiring-records.md", 42 | "docs/management/stats-gathering.md", 43 | "docs/migrations/migrating-to-v4.md", 44 | "docs/migrations/migrating-to-v3.md", 45 | "docs/migrations/migrating-to-v2.md", 46 | "docs/routing/cache-routers.md", 47 | "docs/routing/distributed-caches.md", 48 | "docs/warming/reactive-warming.md", 49 | "docs/warming/proactive-warming.md", 50 | "docs/overview.md" 51 | ], 52 | groups_for_extras: [ 53 | General: Path.wildcard("docs/general/*.md"), 54 | Management: Path.wildcard("docs/management/*.md"), 55 | Routing: Path.wildcard("docs/routing/*.md"), 56 | Warming: Path.wildcard("docs/warming/*.md"), 57 | Extensions: Path.wildcard("docs/extensions/*.md"), 58 | Migration: Path.wildcard("docs/migrations/*.md") 59 | ] 60 | ], 61 | test_coverage: [ 62 | tool: ExCoveralls 63 | ], 64 | preferred_cli_env: [ 65 | docs: :docs, 66 | bench: :bench, 67 | credo: :lint, 68 | cachex: :test, 69 | coveralls: :cover, 70 | "coveralls.html": :cover, 71 | "coveralls.travis": :cover 72 | ], 73 | aliases: [ 74 | bench: "run benchmarks/main.exs", 75 | test: [&start_epmd/1, "test"] 76 | ] 77 | ] 78 | end 79 | 80 | # Configuration for the OTP application 81 | # 82 | # Type "mix help compile.app" for more information 83 | def application do 84 | [ 85 | mod: {Cachex.Application, []}, 86 | extra_applications: [:unsafe] 87 | ] 88 | end 89 | 90 | # Dependencies can be Hex packages: 91 | # 92 | # {:mydep, "~> 0.3.0"} 93 | # 94 | # Or git/path repositories: 95 | # 96 | # {:mydep, git: "https://github.com/elixir-lang/mydep.git", tag: "0.1.0"} 97 | # 98 | # Type "mix help deps" for more examples and options 99 | defp deps do 100 | [ 101 | # Production dependencies 102 | {:eternal, "~> 1.2"}, 103 | {:ex_hash_ring, "~> 6.0"}, 104 | {:jumper, "~> 1.0"}, 105 | {:sleeplocks, "~> 1.1"}, 106 | {:unsafe, "~> 1.0"}, 107 | # Testing dependencies 108 | {:excoveralls, "~> 0.15", optional: true, only: [:cover]}, 109 | {:local_cluster, "~> 2.1", optional: true, only: [:cover, :test]}, 110 | # Linting dependencies 111 | {:credo, "~> 1.7", optional: true, only: [:lint]}, 112 | # Benchmarking dependencies 113 | {:benchee, "~> 1.1", optional: true, only: [:bench]}, 114 | {:benchee_html, "~> 1.0", optional: true, only: [:bench]}, 115 | # Documentation dependencies 116 | {:ex_doc, "~> 0.29", optional: true, only: [:docs]} 117 | ] 118 | end 119 | 120 | # Start epmd before test cases are run. 121 | defp start_epmd(_) do 122 | {_, 0} = System.cmd("epmd", ["-daemon"]) 123 | :ok 124 | end 125 | end 126 | -------------------------------------------------------------------------------- /scripts/overview.exs: -------------------------------------------------------------------------------- 1 | root = 2 | __ENV__.file 3 | |> Path.dirname() 4 | |> Path.dirname() 5 | 6 | content = 7 | root 8 | |> Path.join("README.md") 9 | |> File.read!() 10 | |> String.split("\n## Benchmarks") 11 | |> List.first() 12 | |> String.split("\n", parts: 3) 13 | |> List.last() 14 | 15 | root 16 | |> Path.join("docs") 17 | |> Path.join("overview.md") 18 | |> File.write!("# Getting Started\n" <> content) 19 | -------------------------------------------------------------------------------- /test/cachex/actions/clear_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ClearTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that a cache can be successfully cleared. We fill the cache 5 | # and clear it, verifying that the entries were removed successfully. We also 6 | # ensure that hooks were updated with the correct values. 7 | test "clearing a cache of all items" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # fill with some items 15 | {:ok, true} = Cachex.put(cache, 1, 1) 16 | {:ok, true} = Cachex.put(cache, 2, 2) 17 | {:ok, true} = Cachex.put(cache, 3, 3) 18 | 19 | # clear all hook 20 | TestUtils.flush() 21 | 22 | # clear the cache 23 | result = Cachex.clear(cache) 24 | 25 | # 3 items should have been removed 26 | assert(result == {:ok, 3}) 27 | 28 | # verify the hooks were updated with the clear 29 | assert_receive({{:clear, [[]]}, ^result}) 30 | 31 | # verify the size call never notified 32 | refute_receive({{:size, [[]]}, ^result}) 33 | 34 | # retrieve all items 35 | value1 = Cachex.get(cache, 1) 36 | value2 = Cachex.get(cache, 2) 37 | value3 = Cachex.get(cache, 3) 38 | 39 | # verify the items are gone 40 | assert(value1 == {:ok, nil}) 41 | assert(value2 == {:ok, nil}) 42 | assert(value3 == {:ok, nil}) 43 | end 44 | 45 | # This test verifies that the distributed router correctly controls 46 | # the clear/2 action in such a way that it can clean both a local 47 | # node as well as a remote node. We don't have to check functionality 48 | # of the entire action; just the actual routing of the action to the 49 | # target node(s) is of interest here. 50 | @tag distributed: true 51 | test "clearing a cache cluster of all items" do 52 | # create a new cache cluster for cleaning 53 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 54 | 55 | # we know that 1 & 2 hash to different nodes 56 | {:ok, true} = Cachex.put(cache, 1, 1) 57 | {:ok, true} = Cachex.put(cache, 2, 2) 58 | 59 | # retrieve the cache size, should be 2 60 | {:ok, 2} = Cachex.size(cache) 61 | 62 | # clear just the local cache to start with 63 | clear1 = Cachex.clear(cache, local: true) 64 | clear2 = Cachex.clear(cache, local: false) 65 | 66 | # check the local removed 1 67 | assert(clear1 == {:ok, 1}) 68 | assert(clear2 == {:ok, 1}) 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /test/cachex/actions/decr_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.DecrTest do 2 | use Cachex.Test.Case 3 | 4 | # This test covers various combinations of decrementing cache items, by tweaking 5 | # the options provided alongside the calls. We validate the flags and values 6 | # coming back, as well as the fact they're forwarded to the hooks correctly. 7 | test "decrementing cache items" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # define write options 15 | opts1 = [default: 10] 16 | 17 | # decrement some items 18 | decr1 = Cachex.decr(cache, "key1") 19 | decr2 = Cachex.decr(cache, "key1", 2) 20 | decr3 = Cachex.decr(cache, "key2", 1, opts1) 21 | 22 | # the first result should be -1 23 | assert(decr1 == {:ok, -1}) 24 | 25 | # the second result should be -3 26 | assert(decr2 == {:ok, -3}) 27 | 28 | # the third result should be 9 29 | assert(decr3 == {:ok, 9}) 30 | 31 | # verify the hooks were updated with the decrement 32 | assert_receive({{:decr, ["key1", 1, []]}, ^decr1}) 33 | assert_receive({{:decr, ["key1", 2, []]}, ^decr2}) 34 | assert_receive({{:decr, ["key2", 1, ^opts1]}, ^decr3}) 35 | 36 | # retrieve all items 37 | value1 = Cachex.get(cache, "key1") 38 | value2 = Cachex.get(cache, "key2") 39 | 40 | # verify the items match 41 | assert(value1 == {:ok, -3}) 42 | assert(value2 == {:ok, 9}) 43 | end 44 | 45 | # This test covers the negative case where a value exists but is not an integer, 46 | # which naturally means we can't decrement it properly. We just check for an 47 | # error flag in this case. 48 | test "decrementing a non-numeric value" do 49 | # create a test cache 50 | cache = TestUtils.create_cache() 51 | 52 | # set a non-numeric value 53 | {:ok, true} = Cachex.put(cache, "key", "value") 54 | 55 | # try to increment the value 56 | result = Cachex.decr(cache, "key", 1) 57 | 58 | # we should receive an error 59 | assert(result == {:error, :non_numeric_value}) 60 | end 61 | 62 | # This test verifies that this action is correctly distributed across 63 | # a cache cluster, instead of just the local node. We're not concerned 64 | # about the actual behaviour here, only the routing of the action. 65 | @tag distributed: true 66 | test "decrementing items in a cache cluster" do 67 | # create a new cache cluster for cleaning 68 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 69 | 70 | # we know that 1 & 2 hash to different nodes 71 | {:ok, -1} = Cachex.decr(cache, 1, 1) 72 | {:ok, -2} = Cachex.decr(cache, 2, 2) 73 | 74 | # check the results of the calls across nodes 75 | size1 = Cachex.size(cache, local: true) 76 | size2 = Cachex.size(cache, local: false) 77 | 78 | # one local, two total 79 | assert(size1 == {:ok, 1}) 80 | assert(size2 == {:ok, 2}) 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /test/cachex/actions/del_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.DelTest do 2 | use Cachex.Test.Case 3 | 4 | # This case tests that we can safely remove items from the cache. We test the 5 | # removal of both existing and missing keys, as the behaviour is the same for 6 | # both. We also ensure that hooks receive the delete notification successfully. 7 | test "removing entries from a cache" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # add some cache entries 15 | {:ok, true} = Cachex.put(cache, 1, 1) 16 | 17 | # delete some entries 18 | result1 = Cachex.del(cache, 1) 19 | result2 = Cachex.del(cache, 2) 20 | 21 | # verify both are true 22 | assert(result1 == {:ok, true}) 23 | assert(result2 == {:ok, true}) 24 | 25 | # verify the hooks were updated with the delete 26 | assert_receive({{:del, [1, []]}, ^result1}) 27 | assert_receive({{:del, [2, []]}, ^result2}) 28 | 29 | # retrieve all items 30 | value1 = Cachex.get(cache, 1) 31 | value2 = Cachex.get(cache, 2) 32 | 33 | # verify the items are gone 34 | assert(value1 == {:ok, nil}) 35 | assert(value2 == {:ok, nil}) 36 | end 37 | 38 | # This test verifies that this action is correctly distributed across 39 | # a cache cluster, instead of just the local node. We're not concerned 40 | # about the actual behaviour here, only the routing of the action. 41 | @tag distributed: true 42 | test "removing entries from a cache cluster" do 43 | # create a new cache cluster for cleaning 44 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 45 | 46 | # we know that 1 & 2 hash to different nodes 47 | {:ok, true} = Cachex.put(cache, 1, 1) 48 | {:ok, true} = Cachex.put(cache, 2, 2) 49 | 50 | # check the results of the calls across nodes 51 | size1 = Cachex.size(cache, local: true) 52 | size2 = Cachex.size(cache, local: false) 53 | 54 | # one local, two total 55 | assert(size1 == {:ok, 1}) 56 | assert(size2 == {:ok, 2}) 57 | 58 | # delete each item from the cache cluster 59 | {:ok, true} = Cachex.del(cache, 1) 60 | {:ok, true} = Cachex.del(cache, 2) 61 | 62 | # check the results of the calls across nodes 63 | size3 = Cachex.size(cache, local: true) 64 | size4 = Cachex.size(cache, local: false) 65 | 66 | # no records are left 67 | assert(size3 == {:ok, 0}) 68 | assert(size4 == {:ok, 0}) 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /test/cachex/actions/empty_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.EmptyTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that a cache is empty. We first check that it is before 5 | # adding any items, and after we add some we check that it's no longer empty. 6 | # Hook messages are represented as size calls, as empty is purely sugar on top 7 | # of the size functionality. 8 | test "checking if a cache is empty" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # check if the cache is empty 16 | result1 = Cachex.empty?(cache) 17 | 18 | # it should be 19 | assert(result1 == {:ok, true}) 20 | 21 | # verify the hooks were updated with the message 22 | assert_receive({{:empty?, [[]]}, ^result1}) 23 | 24 | # add some cache entries 25 | {:ok, true} = Cachex.put(cache, 1, 1) 26 | 27 | # check if the cache is empty 28 | result2 = Cachex.empty?(cache) 29 | 30 | # it shouldn't be 31 | assert(result2 == {:ok, false}) 32 | 33 | # verify the hooks were updated with the message 34 | assert_receive({{:empty?, [[]]}, ^result2}) 35 | end 36 | 37 | # This test verifies that the distributed router correctly controls 38 | # the empty?/2 action in such a way that it can clean both a local 39 | # node as well as a remote node. We don't have to check functionality 40 | # of the entire action; just the actual routing of the action to the 41 | # target node(s) is of interest here. 42 | @tag distributed: true 43 | test "checking if a cache cluster is empty" do 44 | # create a new cache cluster for cleaning 45 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 46 | 47 | # we know that 1 & 2 hash to different nodes 48 | {:ok, true} = Cachex.put(cache, 1, 1) 49 | {:ok, true} = Cachex.put(cache, 2, 2) 50 | 51 | # check if the cache is empty, locally and remote 52 | empty1 = Cachex.empty?(cache, local: true) 53 | empty2 = Cachex.empty?(cache, local: false) 54 | 55 | # both should be non-empty 56 | assert(empty1 == {:ok, false}) 57 | assert(empty2 == {:ok, false}) 58 | 59 | # delete the key on the local node 60 | {:ok, 1} = Cachex.clear(cache, local: true) 61 | 62 | # check again as to whether the cache is empty 63 | empty3 = Cachex.empty?(cache, local: true) 64 | empty4 = Cachex.empty?(cache, local: false) 65 | 66 | # only the local node is now empty 67 | assert(empty3 == {:ok, true}) 68 | assert(empty4 == {:ok, false}) 69 | 70 | # finally delete all keys in the cluster 71 | {:ok, 1} = Cachex.clear(cache, local: false) 72 | 73 | # check again as to whether the cache is empty 74 | empty5 = Cachex.empty?(cache, local: true) 75 | empty6 = Cachex.empty?(cache, local: false) 76 | 77 | # both should now show empty 78 | assert(empty5 == {:ok, true}) 79 | assert(empty6 == {:ok, true}) 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /test/cachex/actions/execute_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ExecuteTest do 2 | use Cachex.Test.Case 3 | 4 | # This test just makes sure that execution blocks are handled correctly in that 5 | # they can carry out many actions and return a joint result without having to 6 | # go back to the cache table. 7 | test "execution blocks can carry out many actions" do 8 | # create a test cache 9 | cache = TestUtils.create_cache() 10 | 11 | # start an execution block 12 | result = 13 | Cachex.execute(cache, fn cache -> 14 | [ 15 | Cachex.put!(cache, 1, 1), 16 | Cachex.put!(cache, 2, 2), 17 | Cachex.put!(cache, 3, 3) 18 | ] 19 | end) 20 | 21 | # verify the block returns correct values 22 | assert(result == {:ok, [true, true, true]}) 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /test/cachex/actions/exists_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ExistsTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies whether a key exists in a cache. If it does, we return 5 | # true. If not we return false. If the key has expired, we return false and 6 | # evict it on demand using the generic read action. 7 | test "checking if a key exists" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # add some keys to the cache 15 | {:ok, true} = Cachex.put(cache, 1, 1) 16 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 17 | 18 | # let TTLs clear 19 | :timer.sleep(2) 20 | 21 | # clear messages 22 | TestUtils.flush() 23 | 24 | # check if several keys exist 25 | exists1 = Cachex.exists?(cache, 1) 26 | exists2 = Cachex.exists?(cache, 2) 27 | exists3 = Cachex.exists?(cache, 3) 28 | 29 | # the first result should exist 30 | assert(exists1 == {:ok, true}) 31 | 32 | # the next two should be missing 33 | assert(exists2 == {:ok, false}) 34 | assert(exists3 == {:ok, false}) 35 | 36 | # verify the hooks were updated with the message 37 | assert_receive({{:exists?, [1, []]}, ^exists1}) 38 | assert_receive({{:exists?, [2, []]}, ^exists2}) 39 | assert_receive({{:exists?, [3, []]}, ^exists3}) 40 | 41 | # check we received valid purge actions for the TTL 42 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 43 | 44 | # retrieve all values from the cache 45 | value1 = Cachex.get(cache, 1) 46 | value2 = Cachex.get(cache, 2) 47 | value3 = Cachex.get(cache, 3) 48 | 49 | # verify the second was removed 50 | assert(value1 == {:ok, 1}) 51 | assert(value2 == {:ok, nil}) 52 | assert(value3 == {:ok, nil}) 53 | end 54 | 55 | # This test verifies that this action is correctly distributed across 56 | # a cache cluster, instead of just the local node. We're not concerned 57 | # about the actual behaviour here, only the routing of the action. 58 | @tag distributed: true 59 | test "checking if a key exists in a cluster" do 60 | # create a new cache cluster for cleaning 61 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 62 | 63 | # we know that 1 & 2 hash to different nodes 64 | {:ok, true} = Cachex.put(cache, 1, 1) 65 | {:ok, true} = Cachex.put(cache, 2, 2) 66 | 67 | # check the results of the calls across nodes 68 | exists1 = Cachex.exists?(cache, 1) 69 | exists2 = Cachex.exists?(cache, 2) 70 | 71 | # both exist in the cluster 72 | assert(exists1 == {:ok, true}) 73 | assert(exists2 == {:ok, true}) 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /test/cachex/actions/expire_at_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ExpireAtTest do 2 | use Cachex.Test.Case 3 | 4 | # This test updates the expire time on a key to expire at a given timestamp. 5 | # We make sure that TTLs are updated accordingly. If a date in the past is 6 | # given, the key is immediately removed. We also make sure that we can handle 7 | # setting expire times on missing keys. 8 | test "setting a key to expire at a given time" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # add some keys to the cache 16 | {:ok, true} = Cachex.put(cache, 1, 1) 17 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 10) 18 | {:ok, true} = Cachex.put(cache, 3, 3, expire: 10) 19 | 20 | # clear messages 21 | TestUtils.flush() 22 | 23 | # grab current time 24 | ctime = now() 25 | 26 | # set the expire time 27 | f_expire_time = ctime + 10000 28 | p_expire_time = ctime - 10000 29 | 30 | # expire several keys 31 | result1 = Cachex.expire_at(cache, 1, f_expire_time) 32 | result2 = Cachex.expire_at(cache, 2, f_expire_time) 33 | result3 = Cachex.expire_at(cache, 3, p_expire_time) 34 | result4 = Cachex.expire_at(cache, 4, f_expire_time) 35 | 36 | # the first two should succeed 37 | assert(result1 == {:ok, true}) 38 | assert(result2 == {:ok, true}) 39 | 40 | # the third should succeed and remove the key 41 | assert(result3 == {:ok, true}) 42 | 43 | # the last one is missing and should fail 44 | assert(result4 == {:ok, false}) 45 | 46 | # verify the hooks were updated with the message 47 | assert_receive({{:expire_at, [1, ^f_expire_time, []]}, ^result1}) 48 | assert_receive({{:expire_at, [2, ^f_expire_time, []]}, ^result2}) 49 | assert_receive({{:expire_at, [3, ^p_expire_time, []]}, ^result3}) 50 | assert_receive({{:expire_at, [4, ^f_expire_time, []]}, ^result4}) 51 | 52 | # check we received valid purge actions for the removed key 53 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 54 | 55 | # retrieve all TTLs from the cache 56 | ttl1 = Cachex.ttl!(cache, 1) 57 | ttl2 = Cachex.ttl!(cache, 2) 58 | ttl3 = Cachex.ttl(cache, 3) 59 | ttl4 = Cachex.ttl(cache, 4) 60 | 61 | # verify the new TTL has taken effect 62 | assert_in_delta(ttl1, 10000, 25) 63 | assert_in_delta(ttl2, 10000, 25) 64 | 65 | # assert the last two keys don't exist 66 | assert(ttl3 == {:ok, nil}) 67 | assert(ttl4 == {:ok, nil}) 68 | end 69 | 70 | # This test verifies that this action is correctly distributed across 71 | # a cache cluster, instead of just the local node. We're not concerned 72 | # about the actual behaviour here, only the routing of the action. 73 | @tag distributed: true 74 | test "setting a key to expire at a given time in a cluster" do 75 | # create a new cache cluster 76 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 77 | 78 | # we know that 1 & 2 hash to different nodes 79 | {:ok, true} = Cachex.put(cache, 1, 1) 80 | {:ok, true} = Cachex.put(cache, 2, 2) 81 | 82 | # set expirations on both keys 83 | {:ok, true} = Cachex.expire_at(cache, 1, now() + 5000) 84 | {:ok, true} = Cachex.expire_at(cache, 2, now() + 5000) 85 | 86 | # check the expiration of each key in the cluster 87 | {:ok, expiration1} = Cachex.ttl(cache, 1) 88 | {:ok, expiration2} = Cachex.ttl(cache, 2) 89 | 90 | # both have an expiration 91 | assert(expiration1 != nil) 92 | assert(expiration2 != nil) 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /test/cachex/actions/expire_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ExpireTest do 2 | use Cachex.Test.Case 3 | 4 | # This test updates the expire time on a key to expire after a given period. 5 | # We make sure that TTLs are updated accordingly. If the period is negative, 6 | # the key is immediately removed. We also make sure that we can handle setting 7 | # expire times on missing keys. 8 | test "setting a key to expire after a given period" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # add some keys to the cache 16 | {:ok, true} = Cachex.put(cache, 1, 1) 17 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 10) 18 | {:ok, true} = Cachex.put(cache, 3, 3, expire: 10) 19 | 20 | # clear messages 21 | TestUtils.flush() 22 | 23 | # set the expire time 24 | f_expire_time = 10000 25 | p_expire_time = -10000 26 | 27 | # expire several keys 28 | result1 = Cachex.expire(cache, 1, f_expire_time) 29 | result2 = Cachex.expire(cache, 2, f_expire_time) 30 | result3 = Cachex.expire(cache, 3, p_expire_time) 31 | result4 = Cachex.expire(cache, 4, f_expire_time) 32 | 33 | # the first two should succeed 34 | assert(result1 == {:ok, true}) 35 | assert(result2 == {:ok, true}) 36 | 37 | # the third should succeed and remove the key 38 | assert(result3 == {:ok, true}) 39 | 40 | # the last one is missing and should fail 41 | assert(result4 == {:ok, false}) 42 | 43 | # verify the hooks were updated with the message 44 | assert_receive({{:expire, [1, ^f_expire_time, []]}, ^result1}) 45 | assert_receive({{:expire, [2, ^f_expire_time, []]}, ^result2}) 46 | assert_receive({{:expire, [3, ^p_expire_time, []]}, ^result3}) 47 | assert_receive({{:expire, [4, ^f_expire_time, []]}, ^result4}) 48 | 49 | # check we received valid purge actions for the removed key 50 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 51 | 52 | # retrieve all TTLs from the cache 53 | ttl1 = Cachex.ttl!(cache, 1) 54 | ttl2 = Cachex.ttl!(cache, 2) 55 | ttl3 = Cachex.ttl(cache, 3) 56 | ttl4 = Cachex.ttl(cache, 4) 57 | 58 | # verify the new TTL has taken effect 59 | assert_in_delta(ttl1, 10000, 25) 60 | assert_in_delta(ttl2, 10000, 25) 61 | 62 | # assert the last two keys don't exist 63 | assert(ttl3 == {:ok, nil}) 64 | assert(ttl4 == {:ok, nil}) 65 | end 66 | 67 | # This test verifies that this action is correctly distributed across 68 | # a cache cluster, instead of just the local node. We're not concerned 69 | # about the actual behaviour here, only the routing of the action. 70 | @tag distributed: true 71 | test "setting a key to expire after a given period in a cluster" do 72 | # create a new cache cluster 73 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 74 | 75 | # we know that 1 & 2 hash to different nodes 76 | {:ok, true} = Cachex.put(cache, 1, 1) 77 | {:ok, true} = Cachex.put(cache, 2, 2) 78 | 79 | # set expirations on both keys 80 | {:ok, true} = Cachex.expire(cache, 1, 5000) 81 | {:ok, true} = Cachex.expire(cache, 2, 5000) 82 | 83 | # check the expiration of each key in the cluster 84 | {:ok, expiration1} = Cachex.ttl(cache, 1) 85 | {:ok, expiration2} = Cachex.ttl(cache, 2) 86 | 87 | # both have an expiration 88 | assert(expiration1 != nil) 89 | assert(expiration2 != nil) 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /test/cachex/actions/export_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ExportTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that it's possible to export the entries from a cache. 5 | test "exporting records from a cache" do 6 | # create a test cache 7 | cache = TestUtils.create_cache() 8 | 9 | # fill with some items 10 | {:ok, true} = Cachex.put(cache, 1, 1) 11 | {:ok, true} = Cachex.put(cache, 2, 2) 12 | {:ok, true} = Cachex.put(cache, 3, 3) 13 | 14 | # export the items 15 | {:ok, export} = Cachex.export(cache) 16 | 17 | # check the exported count 18 | assert length(export) == 3 19 | end 20 | 21 | # This test verifies that the distributed router correctly controls 22 | # the export/2 action in such a way that it can clean both a local 23 | # node as well as a remote node. We don't have to check functionality 24 | # of the entire action; just the actual routing of the action to the 25 | # target node(s) is of interest here. 26 | @tag distributed: true 27 | test "exporting records from a cache cluster" do 28 | # create a new cache cluster for cleaning 29 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 30 | 31 | # we know that 1 & 2 hash to different nodes 32 | {:ok, true} = Cachex.put(cache, 1, 1) 33 | {:ok, true} = Cachex.put(cache, 2, 2) 34 | 35 | # retrieve the keys from both local & remote 36 | {:ok, export1} = Cachex.export(cache, local: true) 37 | {:ok, export2} = Cachex.export(cache, local: false) 38 | 39 | # local just one, cluster has two 40 | assert(length(export1) == 1) 41 | assert(length(export2) == 2) 42 | 43 | # delete the single local key 44 | {:ok, 1} = Cachex.clear(cache, local: true) 45 | 46 | # retrieve the keys again from both local & remote 47 | {:ok, export3} = Cachex.export(cache, local: true) 48 | {:ok, export4} = Cachex.export(cache, local: false) 49 | 50 | # now local has no keys 51 | assert(length(export3) == 0) 52 | assert(length(export4) == 1) 53 | 54 | # delete the remaining key inside the cluster 55 | {:ok, 1} = Cachex.clear(cache, local: false) 56 | 57 | # retrieve the keys again from both local & remote 58 | {:ok, export5} = Cachex.keys(cache, local: true) 59 | {:ok, export6} = Cachex.keys(cache, local: false) 60 | 61 | # now both don't have any keys 62 | assert(length(export5) == 0) 63 | assert(length(export6) == 0) 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /test/cachex/actions/get_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.GetTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that we can retrieve keys from the cache. 5 | # If a key has expired, the value is not returned and the hooks 6 | # are updated with an eviction. If the key is missing, we return 7 | # a message stating as such. 8 | test "retrieving keys from a cache" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache1 = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # set some keys in the cache 16 | {:ok, true} = Cachex.put(cache1, 1, 1) 17 | {:ok, true} = Cachex.put(cache1, 2, 2, expire: 1) 18 | 19 | # wait for the TTL to pass 20 | :timer.sleep(2) 21 | 22 | # flush all existing messages 23 | TestUtils.flush() 24 | 25 | # take the first and second key 26 | result1 = Cachex.get(cache1, 1) 27 | result2 = Cachex.get(cache1, 2) 28 | 29 | # take a missing key with no fallback 30 | result3 = Cachex.get(cache1, 3) 31 | 32 | # verify the first key is retrieved 33 | assert(result1 == {:ok, 1}) 34 | 35 | # verify the second and third keys are missing 36 | assert(result2 == {:ok, nil}) 37 | assert(result3 == {:ok, nil}) 38 | 39 | # assert we receive valid notifications 40 | assert_receive({{:get, [1, []]}, ^result1}) 41 | assert_receive({{:get, [2, []]}, ^result2}) 42 | assert_receive({{:get, [3, []]}, ^result3}) 43 | 44 | # check we received valid purge actions for the TTL 45 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 46 | end 47 | 48 | # This test verifies that this action is correctly distributed across 49 | # a cache cluster, instead of just the local node. We're not concerned 50 | # about the actual behaviour here, only the routing of the action. 51 | @tag distributed: true 52 | test "retrieving keys from a cache cluster" do 53 | # create a new cache cluster for cleaning 54 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 55 | 56 | # we know that 1 & 2 hash to different nodes 57 | {:ok, true} = Cachex.put(cache, 1, 1) 58 | {:ok, true} = Cachex.put(cache, 2, 2) 59 | 60 | # try to retrieve both of the set keys 61 | get1 = Cachex.get(cache, 1) 62 | get2 = Cachex.get(cache, 2) 63 | 64 | # both should come back 65 | assert(get1 == {:ok, 1}) 66 | assert(get2 == {:ok, 2}) 67 | end 68 | end 69 | -------------------------------------------------------------------------------- /test/cachex/actions/import_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.ImportTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that it's possible to import entries into a cache. 5 | test "importing records into a cache" do 6 | # create a test cache 7 | cache = TestUtils.create_cache() 8 | start = now() 9 | 10 | # add some cache entries 11 | {:ok, true} = Cachex.put(cache, 1, 1) 12 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 13 | {:ok, true} = Cachex.put(cache, 3, 3, expire: 10_000) 14 | 15 | # export the cache to a list 16 | result1 = Cachex.export(cache) 17 | result2 = Cachex.clear(cache) 18 | result3 = Cachex.size(cache) 19 | 20 | # verify the clearance 21 | assert(result2 == {:ok, 3}) 22 | assert(result3 == {:ok, 0}) 23 | 24 | # wait a while before re-load 25 | :timer.sleep(50) 26 | 27 | # load the cache from the export 28 | result4 = Cachex.import(cache, elem(result1, 1)) 29 | result5 = Cachex.size(cache) 30 | result6 = Cachex.ttl!(cache, 3) 31 | 32 | # verify that the import was ok 33 | assert(result4 == {:ok, 2}) 34 | assert(result5 == {:ok, 2}) 35 | 36 | # verify TTL offsetting happens 37 | assert_in_delta(result6, 10_000 - (now() - start), 5) 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /test/cachex/actions/incr_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.IncrTest do 2 | use Cachex.Test.Case 3 | 4 | # This test covers various combinations of incrementing cache items, by tweaking 5 | # the options provided alongside the calls. We validate the flags and values 6 | # coming back, as well as the fact they're forwarded to the hooks correctly. 7 | test "incrementing cache items" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # define write options 15 | opts1 = [default: 10] 16 | 17 | # increment some items 18 | incr1 = Cachex.incr(cache, "key1") 19 | incr2 = Cachex.incr(cache, "key1", 2) 20 | incr3 = Cachex.incr(cache, "key2", 1, opts1) 21 | 22 | # the first result should be 1 23 | assert(incr1 == {:ok, 1}) 24 | 25 | # the second result should be 3 26 | assert(incr2 == {:ok, 3}) 27 | 28 | # the third result should be 11 29 | assert(incr3 == {:ok, 11}) 30 | 31 | # verify the hooks were updated with the increment 32 | assert_receive({{:incr, ["key1", 1, []]}, ^incr1}) 33 | assert_receive({{:incr, ["key1", 2, []]}, ^incr2}) 34 | assert_receive({{:incr, ["key2", 1, ^opts1]}, ^incr3}) 35 | 36 | # retrieve all items 37 | value1 = Cachex.get(cache, "key1") 38 | value2 = Cachex.get(cache, "key2") 39 | 40 | # verify the items match 41 | assert(value1 == {:ok, 3}) 42 | assert(value2 == {:ok, 11}) 43 | end 44 | 45 | # This test covers the negative case where a value exists but is not an integer, 46 | # which naturally means we can't increment it properly. We just check for an 47 | # error flag in this case. 48 | test "incrementing a non-numeric value" do 49 | # create a test cache 50 | cache = TestUtils.create_cache() 51 | 52 | # set a non-numeric value 53 | {:ok, true} = Cachex.put(cache, "key", "value") 54 | 55 | # try to increment the value 56 | result = Cachex.incr(cache, "key") 57 | 58 | # we should receive an error 59 | assert(result == {:error, :non_numeric_value}) 60 | end 61 | 62 | # This test verifies that this action is correctly distributed across 63 | # a cache cluster, instead of just the local node. We're not concerned 64 | # about the actual behaviour here, only the routing of the action. 65 | @tag distributed: true 66 | test "incrementing items in a cache cluster" do 67 | # create a new cache cluster for cleaning 68 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 69 | 70 | # we know that 1 & 2 hash to different nodes 71 | {:ok, 1} = Cachex.incr(cache, 1, 1) 72 | {:ok, 2} = Cachex.incr(cache, 2, 2) 73 | 74 | # check the results of the calls across nodes 75 | size1 = Cachex.size(cache, local: true) 76 | size2 = Cachex.size(cache, local: false) 77 | 78 | # one local, two total 79 | assert(size1 == {:ok, 1}) 80 | assert(size2 == {:ok, 2}) 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /test/cachex/actions/keys_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.KeysTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that it's possible to retrieve the keys inside a cache. 5 | # It should be noted that the keys function takes TTL into account and only 6 | # returns the keys of those records which have not expired. Order is not in 7 | # any way guaranteed, even with no cache modification. 8 | test "retrieving the keys inside the cache" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # fill with some items 16 | {:ok, true} = Cachex.put(cache, 1, 1) 17 | {:ok, true} = Cachex.put(cache, 2, 2) 18 | {:ok, true} = Cachex.put(cache, 3, 3) 19 | 20 | # add some expired items 21 | {:ok, true} = Cachex.put(cache, 4, 4, expire: 1) 22 | {:ok, true} = Cachex.put(cache, 5, 5, expire: 1) 23 | {:ok, true} = Cachex.put(cache, 6, 6, expire: 1) 24 | 25 | # let entries expire 26 | :timer.sleep(2) 27 | 28 | # clear all hook 29 | TestUtils.flush() 30 | 31 | # retrieve the keys 32 | {status, keys} = Cachex.keys(cache) 33 | 34 | # ensure the status is ok 35 | assert(status == :ok) 36 | 37 | # sort the keys 38 | result = Enum.sort(keys) 39 | 40 | # only 3 items should come back 41 | assert(result == [1, 2, 3]) 42 | 43 | # verify the hooks were updated with the count 44 | assert_receive({{:keys, [[]]}, {^status, ^keys}}) 45 | end 46 | 47 | # This test verifies that the distributed router correctly controls 48 | # the keys/2 action in such a way that it can clean both a local 49 | # node as well as a remote node. We don't have to check functionality 50 | # of the entire action; just the actual routing of the action to the 51 | # target node(s) is of interest here. 52 | @tag distributed: true 53 | test "checking if a cache cluster is empty" do 54 | # create a new cache cluster for cleaning 55 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 56 | 57 | # we know that 1 & 2 hash to different nodes 58 | {:ok, true} = Cachex.put(cache, 1, 1) 59 | {:ok, true} = Cachex.put(cache, 2, 2) 60 | 61 | # retrieve the keys from both local & remote 62 | {:ok, keys1} = Cachex.keys(cache, local: true) 63 | {:ok, keys2} = Cachex.keys(cache, local: false) 64 | 65 | # local just one, cluster has two 66 | assert(length(keys1) == 1) 67 | assert(length(keys2) == 2) 68 | 69 | # delete the single local key 70 | {:ok, 1} = Cachex.clear(cache, local: true) 71 | 72 | # retrieve the keys again from both local & remote 73 | {:ok, keys3} = Cachex.keys(cache, local: true) 74 | {:ok, keys4} = Cachex.keys(cache, local: false) 75 | 76 | # now local has no keys 77 | assert(length(keys3) == 0) 78 | assert(length(keys4) == 1) 79 | 80 | # delete the remaining key inside the cluster 81 | {:ok, 1} = Cachex.clear(cache, local: false) 82 | 83 | # retrieve the keys again from both local & remote 84 | {:ok, keys5} = Cachex.keys(cache, local: true) 85 | {:ok, keys6} = Cachex.keys(cache, local: false) 86 | 87 | # now both don't have any keys 88 | assert(length(keys5) == 0) 89 | assert(length(keys6) == 0) 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /test/cachex/actions/persist_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.PersistTest do 2 | use Cachex.Test.Case 3 | 4 | # This test just ensures that we can safely remove expiration times from a key. 5 | # We set a TTL on a key and then persist it and verify that there is then no 6 | # TTL associated with the key going forwards. 7 | test "removing the TTL on a key" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # add some keys to the cache 15 | {:ok, true} = Cachex.put(cache, 1, 1) 16 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1000) 17 | 18 | # clear messages 19 | TestUtils.flush() 20 | 21 | # retrieve all TTLs from the cache 22 | ttl1 = Cachex.ttl!(cache, 1) 23 | ttl2 = Cachex.ttl!(cache, 2) 24 | 25 | # the first TTL should be nil 26 | assert(ttl1 == nil) 27 | 28 | # the second TTL should be roughly 1000 29 | assert_in_delta(ttl2, 995, 6) 30 | 31 | # remove the TTLs 32 | persist1 = Cachex.persist(cache, 1) 33 | persist2 = Cachex.persist(cache, 2) 34 | persist3 = Cachex.persist(cache, 3) 35 | 36 | # the first two writes should succeed 37 | assert(persist1 == {:ok, true}) 38 | assert(persist2 == {:ok, true}) 39 | 40 | # the third shouldn't, as it's missing 41 | assert(persist3 == {:ok, false}) 42 | 43 | # verify the hooks were updated with the message 44 | assert_receive({{:persist, [1, []]}, ^persist1}) 45 | assert_receive({{:persist, [2, []]}, ^persist2}) 46 | assert_receive({{:persist, [3, []]}, ^persist3}) 47 | 48 | # retrieve all TTLs from the cache 49 | ttl3 = Cachex.ttl!(cache, 1) 50 | ttl4 = Cachex.ttl!(cache, 2) 51 | 52 | # both TTLs should now be nil 53 | assert(ttl3 == nil) 54 | assert(ttl4 == nil) 55 | end 56 | 57 | # This test verifies that this action is correctly distributed across 58 | # a cache cluster, instead of just the local node. We're not concerned 59 | # about the actual behaviour here, only the routing of the action. 60 | @tag distributed: true 61 | test "removing the TTL on a key in a cluster" do 62 | # create a new cache cluster 63 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 64 | 65 | # we know that 1 & 2 hash to different nodes 66 | {:ok, true} = Cachex.put(cache, 1, 1, expire: 5000) 67 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 5000) 68 | 69 | # remove expirations on both keys 70 | {:ok, true} = Cachex.persist(cache, 1) 71 | {:ok, true} = Cachex.persist(cache, 2) 72 | 73 | # check the expiration of each key in the cluster 74 | {:ok, expiration1} = Cachex.ttl(cache, 1) 75 | {:ok, expiration2} = Cachex.ttl(cache, 2) 76 | 77 | # both have an expiration 78 | assert(expiration1 == nil) 79 | assert(expiration2 == nil) 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /test/cachex/actions/prune_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.PruneTest do 2 | use Cachex.Test.Case 3 | 4 | test "pruning a cache to a size" do 5 | # create a new test cache 6 | cache = TestUtils.create_cache() 7 | 8 | # insert 100 keys 9 | for i <- 1..100 do 10 | Cachex.put!(cache, i, i) 11 | end 12 | 13 | # guarantee we have 100 keys in the cache 14 | assert Cachex.size(cache) == {:ok, 100} 15 | 16 | # trigger a pruning down to 50 keys 17 | assert Cachex.prune(cache, 50) == {:ok, true} 18 | 19 | # verify that we're down to 50 keys 20 | assert Cachex.size(cache) == {:ok, 45} 21 | end 22 | 23 | test "pruning a cache to a size with a custom reclaim" do 24 | # create a new test cache 25 | cache = TestUtils.create_cache() 26 | 27 | # insert 100 keys 28 | for i <- 1..100 do 29 | Cachex.put!(cache, i, i) 30 | end 31 | 32 | # guarantee we have 100 keys in the cache 33 | assert Cachex.size(cache) == {:ok, 100} 34 | 35 | # trigger a pruning down to 50 keys, reclaiming 10% 36 | assert Cachex.prune(cache, 50, reclaim: 0) == {:ok, true} 37 | 38 | # verify that we're down to 50 keys 39 | assert Cachex.size(cache) == {:ok, 50} 40 | end 41 | 42 | # This test ensures that the cache eviction policy will evict any expired values 43 | # before removing the oldest. This is to make sure that we don't remove anything 44 | # without good reason. To verify this we add 50 keys with a TTL more recently 45 | # than those without and cross the cache limit. We then validate that all expired 46 | # keys have been purged, and no other keys have been removed as the purge takes 47 | # the cache size back under the maximum size. 48 | test "evicting by removing expired keys" do 49 | # create a new test cache 50 | cache = TestUtils.create_cache() 51 | 52 | # retrieve the cache state 53 | state = Services.Overseer.retrieve(cache) 54 | 55 | # set 50 keys without ttl 56 | for x <- 1..50 do 57 | # set the key 58 | {:ok, true} = Cachex.put(state, x, x) 59 | 60 | # tick to make sure each has a new touch time 61 | :timer.sleep(1) 62 | end 63 | 64 | # set a more recent 50 keys 65 | for x <- 51..100 do 66 | # set the key 67 | {:ok, true} = Cachex.put(state, x, x, expire: 1) 68 | 69 | # tick to make sure each has a new touch time 70 | :timer.sleep(1) 71 | end 72 | 73 | # retrieve the cache size 74 | size1 = Cachex.size!(cache) 75 | 76 | # verify the cache size 77 | assert(size1 == 100) 78 | 79 | # add a new key to the cache to trigger oversize 80 | {:ok, true} = Cachex.put(state, 101, 101) 81 | 82 | # trigger the cache pruning down to 100 records 83 | {:ok, true} = Cachex.prune(cache, 100, reclaim: 0.3, buffer: -1) 84 | 85 | # verify the cache shrinks to 51% 86 | assert Cachex.size(state) == {:ok, 51} 87 | 88 | # our validation step 89 | validate = fn range, expected -> 90 | # iterate all keys in the range 91 | for x <- range do 92 | # retrieve whether the key exists 93 | exists = Cachex."exists?!"(state, x) 94 | 95 | # verify whether it exists 96 | assert(exists == expected) 97 | end 98 | end 99 | 100 | # verify the first 50 keys are retained 101 | validate.(1..50, true) 102 | 103 | # verify the second 50 are removed 104 | validate.(51..100, false) 105 | 106 | # verify the last key added is retained 107 | validate.(101..101, true) 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /test/cachex/actions/purge_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.PurgeTest do 2 | use Cachex.Test.Case 3 | 4 | # This test makes sure that we can manually purge expired records from the cache. 5 | # We attempt to purge before a key has expired and verify that it has not been 6 | # removed. We then wait until after the TTL has passed and ensure that it is 7 | # removed by the purge call. Finally we make sure to check the hook notifications. 8 | test "purging expired records in a cache" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # add a new cache entry 16 | {:ok, true} = Cachex.put(cache, "key", "value", expire: 25) 17 | 18 | # flush messages 19 | TestUtils.flush() 20 | 21 | # purge before the entry expires 22 | purge1 = Cachex.purge(cache) 23 | 24 | # verify that the purge removed nothing 25 | assert(purge1 == {:ok, 0}) 26 | 27 | # ensure we received a message 28 | assert_receive({{:purge, [[]]}, {:ok, 0}}) 29 | 30 | # wait until the entry has expired 31 | :timer.sleep(50) 32 | 33 | # purge after the entry expires 34 | purge2 = Cachex.purge(cache) 35 | 36 | # verify that the purge removed the key 37 | assert(purge2 == {:ok, 1}) 38 | 39 | # ensure we received a message 40 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 41 | 42 | # check whether the key exists 43 | exists = Cachex.exists?(cache, "key") 44 | 45 | # verify that the key is gone 46 | assert(exists == {:ok, false}) 47 | end 48 | 49 | # This test verifies that the distributed router correctly controls 50 | # the purge/2 action in such a way that it can clean both a local 51 | # node as well as a remote node. We don't have to check functionality 52 | # of the entire action; just the actual routing of the action to the 53 | # target node(s) is of interest here. 54 | @tag distributed: true 55 | test "purging expired records in a cache cluster" do 56 | # create a new cache cluster for cleaning 57 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 58 | 59 | # we know that 1 & 2 hash to different nodes 60 | {:ok, true} = Cachex.put(cache, 1, 1, expire: 1) 61 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 62 | 63 | # retrieve the cache size, should be 2 64 | {:ok, 2} = Cachex.size(cache) 65 | 66 | # give it a few ms to expire... 67 | :timer.sleep(5) 68 | 69 | # purge just the local cache to start with 70 | purge1 = Cachex.purge(cache, local: true) 71 | purge2 = Cachex.purge(cache, local: false) 72 | 73 | # check the local removed 1 74 | assert(purge1 == {:ok, 1}) 75 | assert(purge2 == {:ok, 1}) 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /test/cachex/actions/put_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.PutTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies the addition of new entries to the cache. We ensure that 5 | # values can be added and can be given expiration values. We also test the case 6 | # in which a cache has a default expiration value, and the ability to override 7 | # this as necessary. 8 | test "adding new values to the cache" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache1 = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # create a test cache with a default ttl 16 | cache2 = 17 | TestUtils.create_cache( 18 | hooks: [hook], 19 | expiration: expiration(default: 10000) 20 | ) 21 | 22 | # set some values in the cache 23 | set1 = Cachex.put(cache1, 1, 1) 24 | set2 = Cachex.put(cache1, 2, 2, expire: 5000) 25 | set3 = Cachex.put(cache2, 1, 1) 26 | set4 = Cachex.put(cache2, 2, 2, expire: 5000) 27 | 28 | # ensure all set actions worked 29 | assert(set1 == {:ok, true}) 30 | assert(set2 == {:ok, true}) 31 | assert(set3 == {:ok, true}) 32 | assert(set4 == {:ok, true}) 33 | 34 | # verify the hooks were updated with the message 35 | assert_receive({{:put, [1, 1, []]}, ^set1}) 36 | assert_receive({{:put, [1, 1, []]}, ^set3}) 37 | assert_receive({{:put, [2, 2, [expire: 5000]]}, ^set2}) 38 | assert_receive({{:put, [2, 2, [expire: 5000]]}, ^set4}) 39 | 40 | # read back all values from the cache 41 | value1 = Cachex.get(cache1, 1) 42 | value2 = Cachex.get(cache1, 2) 43 | value3 = Cachex.get(cache2, 1) 44 | value4 = Cachex.get(cache2, 2) 45 | 46 | # verify all values exist 47 | assert(value1 == {:ok, 1}) 48 | assert(value2 == {:ok, 2}) 49 | assert(value3 == {:ok, 1}) 50 | assert(value4 == {:ok, 2}) 51 | 52 | # read back all key TTLs 53 | ttl1 = Cachex.ttl!(cache1, 1) 54 | ttl2 = Cachex.ttl!(cache1, 2) 55 | ttl3 = Cachex.ttl!(cache2, 1) 56 | ttl4 = Cachex.ttl!(cache2, 2) 57 | 58 | # the first should have no TTL 59 | assert(ttl1 == nil) 60 | 61 | # the second should have a TTL around 5s 62 | assert_in_delta(ttl2, 5000, 10) 63 | 64 | # the second should have a TTL around 10s 65 | assert_in_delta(ttl3, 10000, 10) 66 | 67 | # the fourth should have a TTL around 5s 68 | assert_in_delta(ttl4, 5000, 10) 69 | end 70 | 71 | # This test verifies that this action is correctly distributed across 72 | # a cache cluster, instead of just the local node. We're not concerned 73 | # about the actual behaviour here, only the routing of the action. 74 | @tag distributed: true 75 | test "adding new entries to a cache cluster" do 76 | # create a new cache cluster for cleaning 77 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 78 | 79 | # we know that 1 & 2 hash to different nodes 80 | {:ok, true} = Cachex.put(cache, 1, 1) 81 | {:ok, true} = Cachex.put(cache, 2, 2) 82 | 83 | # check the results of the calls across nodes 84 | size1 = Cachex.size(cache, local: true) 85 | size2 = Cachex.size(cache, local: false) 86 | 87 | # one local, two total 88 | assert(size1 == {:ok, 1}) 89 | assert(size2 == {:ok, 2}) 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /test/cachex/actions/refresh_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.RefreshTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that we can reset the TTL time on a key. We check this 5 | # by settings keys with and without a TTL, waiting for some time to pass, and 6 | # then check and refresh the TTL. This ensures that the TTL is reset after we 7 | # refresh the key. 8 | test "refreshing the TTL time on a key" do 9 | # create a forwarding hook 10 | hook = ForwardHook.create() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache(hooks: [hook]) 14 | 15 | # add some keys to the cache 16 | {:ok, true} = Cachex.put(cache, 1, 1) 17 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1000) 18 | 19 | # clear messages 20 | TestUtils.flush() 21 | 22 | # wait for 25ms 23 | :timer.sleep(25) 24 | 25 | # retrieve all TTLs from the cache 26 | ttl1 = Cachex.ttl!(cache, 1) 27 | ttl2 = Cachex.ttl!(cache, 2) 28 | 29 | # the first TTL should be nil 30 | assert(ttl1 == nil) 31 | 32 | # the second TTL should be roughly 975 33 | assert_in_delta(ttl2, 970, 6) 34 | 35 | # refresh some TTLs 36 | refresh1 = Cachex.refresh(cache, 1) 37 | refresh2 = Cachex.refresh(cache, 2) 38 | refresh3 = Cachex.refresh(cache, 3) 39 | 40 | # the first two writes should succeed 41 | assert(refresh1 == {:ok, true}) 42 | assert(refresh2 == {:ok, true}) 43 | 44 | # the third shouldn't, as it's missing 45 | assert(refresh3 == {:ok, false}) 46 | 47 | # verify the hooks were updated with the message 48 | assert_receive({{:refresh, [1, []]}, ^refresh1}) 49 | assert_receive({{:refresh, [2, []]}, ^refresh2}) 50 | assert_receive({{:refresh, [3, []]}, ^refresh3}) 51 | 52 | # retrieve all TTLs from the cache 53 | ttl3 = Cachex.ttl!(cache, 1) 54 | ttl4 = Cachex.ttl!(cache, 2) 55 | 56 | # the first TTL should still be nil 57 | assert(ttl3 == nil) 58 | 59 | # the second TTL should be reset to 1000 60 | assert_in_delta(ttl4, 995, 10) 61 | end 62 | 63 | # This test verifies that this action is correctly distributed across 64 | # a cache cluster, instead of just the local node. We're not concerned 65 | # about the actual behaviour here, only the routing of the action. 66 | @tag distributed: true 67 | test "refreshing the TTL on a key in a cluster" do 68 | # create a new cache cluster 69 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 70 | 71 | # we know that 1 & 2 hash to different nodes 72 | {:ok, true} = Cachex.put(cache, 1, 1, expire: 500) 73 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 500) 74 | 75 | # pause to reduce the TTL a little 76 | :timer.sleep(250) 77 | 78 | # check the expiration of each key in the cluster 79 | {:ok, expiration1} = Cachex.ttl(cache, 1) 80 | {:ok, expiration2} = Cachex.ttl(cache, 2) 81 | 82 | # check the delta changed 83 | assert(expiration1 < 300) 84 | assert(expiration2 < 300) 85 | 86 | # refresh the TTL on both keys 87 | refresh1 = Cachex.refresh(cache, 1) 88 | refresh2 = Cachex.refresh(cache, 2) 89 | 90 | # check the refresh results 91 | assert(refresh1 == {:ok, true}) 92 | assert(refresh2 == {:ok, true}) 93 | 94 | # check the expiration of each key in the cluster 95 | {:ok, expiration3} = Cachex.ttl(cache, 1) 96 | {:ok, expiration4} = Cachex.ttl(cache, 2) 97 | 98 | # check the time reset 99 | assert(expiration3 > 300) 100 | assert(expiration4 > 300) 101 | end 102 | end 103 | -------------------------------------------------------------------------------- /test/cachex/actions/restore_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.RestoreTest do 2 | use Cachex.Test.Case 3 | 4 | # This test covers the backing up of a cache to a local disk location. We set 5 | # a value, save to disk, then clear the cache. We then load the backup file to 6 | # verify that the values come back. We also verify that bad reads correctly pass 7 | # their errors straight back through to the calling function. 8 | test "restoring a cache backup from a local disk" do 9 | # locate the temporary directory 10 | tmp = System.tmp_dir!() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache() 14 | start = now() 15 | 16 | # add some cache entries 17 | {:ok, true} = Cachex.put(cache, 1, 1) 18 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 19 | {:ok, true} = Cachex.put(cache, 3, 3, expire: 10_000) 20 | 21 | # create a local path to write to 22 | path = Path.join(tmp, TestUtils.gen_rand_bytes(8)) 23 | 24 | # save the cache to a local file 25 | result1 = Cachex.save(cache, path) 26 | result2 = Cachex.clear(cache) 27 | result3 = Cachex.size(cache) 28 | 29 | # verify the result and clearance 30 | assert(result1 == {:ok, true}) 31 | assert(result2 == {:ok, 3}) 32 | assert(result3 == {:ok, 0}) 33 | 34 | # wait a while before re-load 35 | :timer.sleep(50) 36 | 37 | # load the cache from the disk 38 | result4 = Cachex.restore(cache, path) 39 | result5 = Cachex.size(cache) 40 | result6 = Cachex.ttl!(cache, 3) 41 | 42 | # verify that the load was ok 43 | assert(result4 == {:ok, 2}) 44 | assert(result5 == {:ok, 2}) 45 | 46 | # verify TTL offsetting happens 47 | assert_in_delta(result6, 10_000 - (now() - start), 5) 48 | 49 | # reload a bad file from disk (should not be trusted) 50 | result7 = Cachex.restore(cache, tmp, trust: false) 51 | 52 | # verify the result failed 53 | assert(result7 == {:error, :unreachable_file}) 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /test/cachex/actions/save_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.SaveTest do 2 | use Cachex.Test.Case 3 | 4 | # This test covers the backing up of a cache to a local disk location. We only 5 | # cover the happy path as there are separate tests covering issues with the 6 | # path provided to the save. We set a value, save to disk, then clear the cache. 7 | # We then load the backup file to verify that the values come back. 8 | test "saving a cache to a local disk" do 9 | # locate the temporary directory 10 | tmp = System.tmp_dir!() 11 | 12 | # create a test cache 13 | cache = TestUtils.create_cache() 14 | 15 | # add some cache entries 16 | {:ok, true} = Cachex.put(cache, 1, 1) 17 | 18 | # create a local path to write to 19 | path = Path.join(tmp, TestUtils.gen_rand_bytes(8)) 20 | 21 | # save the cache to a local file 22 | result1 = Cachex.save(cache, path) 23 | result2 = Cachex.clear(cache) 24 | result3 = Cachex.size(cache) 25 | 26 | # verify the result and clearance 27 | assert(result1 == {:ok, true}) 28 | assert(result2 == {:ok, 1}) 29 | assert(result3 == {:ok, 0}) 30 | 31 | # load the cache from the disk 32 | result4 = Cachex.restore(cache, path) 33 | result5 = Cachex.size(cache) 34 | 35 | # verify that the load was ok 36 | assert(result4 == {:ok, 1}) 37 | assert(result5 == {:ok, 1}) 38 | end 39 | 40 | # This test covers the backing up of a cache cluster to a local disk location. We 41 | # basically copy the local example, except that we run it against a cluster with 42 | # several nodes - the entire cluster should be backed up to the local disk. 43 | @tag distributed: true 44 | test "backing up a cache cluster to a local disk" do 45 | # locate the temporary directory 46 | tmp = System.tmp_dir!() 47 | 48 | # create a new cache cluster for cleaning 49 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 50 | 51 | # we know that 1 & 2 hash to different nodes 52 | {:ok, true} = Cachex.put(cache, 1, 1) 53 | {:ok, true} = Cachex.put(cache, 2, 2) 54 | 55 | # create a local path to write to 56 | path1 = Path.join(tmp, TestUtils.gen_rand_bytes(8)) 57 | path2 = Path.join(tmp, TestUtils.gen_rand_bytes(8)) 58 | 59 | # save the cache to a local file for local/remote 60 | save1 = Cachex.save(cache, path1, local: true) 61 | save2 = Cachex.save(cache, path2, local: false) 62 | 63 | # verify the save results 64 | assert(save1 == {:ok, true}) 65 | assert(save2 == {:ok, true}) 66 | 67 | # clear the cache to remove all 68 | {:ok, 2} = Cachex.clear(cache) 69 | 70 | # load the local cache from the disk 71 | load1 = Cachex.restore(cache, path1) 72 | size1 = Cachex.size(cache) 73 | 74 | # verify that the load was ok 75 | assert(load1 == {:ok, 1}) 76 | assert(size1 == {:ok, 1}) 77 | 78 | # clear the cache again 79 | {:ok, 1} = Cachex.clear(cache) 80 | 81 | # load the full cache from the disk 82 | load2 = Cachex.restore(cache, path2) 83 | size2 = Cachex.size(cache) 84 | 85 | # verify that the load was ok 86 | assert(load2 == {:ok, 2}) 87 | assert(size2 == {:ok, 2}) 88 | end 89 | 90 | test "returning an error on invalid output path" do 91 | # create a test cache 92 | cache = TestUtils.create_cache() 93 | 94 | # verify that saving to the invalid path gives an error 95 | assert Cachex.save(cache, "") == {:error, :unreachable_file} 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /test/cachex/actions/size_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.SizeTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies the size of a cache, by checking both with 5 | # and without expired records. This is controlled by `:expired`. 6 | test "checking the total size of a cache" do 7 | # create a test cache 8 | cache = TestUtils.create_cache() 9 | 10 | # retrieve the cache size 11 | result1 = Cachex.size(cache) 12 | 13 | # it should be empty 14 | assert(result1 == {:ok, 0}) 15 | 16 | # add some cache entries 17 | {:ok, true} = Cachex.put(cache, 1, 1) 18 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 19 | 20 | # wait 2 ms to expire 21 | :timer.sleep(2) 22 | 23 | # retrieve the cache size 24 | result2 = Cachex.size(cache) 25 | result3 = Cachex.size(cache, expired: false) 26 | 27 | # it should show the new key 28 | assert(result2 == {:ok, 2}) 29 | assert(result3 == {:ok, 1}) 30 | end 31 | 32 | # This test verifies that the distributed router correctly controls 33 | # the size/2 action in such a way that it can clean both a local 34 | # node as well as a remote node. We don't have to check functionality 35 | # of the entire action; just the actual routing of the action to the 36 | # target node(s) is of interest here. 37 | @tag distributed: true 38 | test "checking the total size of a cache cluster" do 39 | # create a new cache cluster for cleaning 40 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 41 | 42 | # we know that 1 & 2 hash to different nodes 43 | {:ok, true} = Cachex.put(cache, 1, 1) 44 | {:ok, true} = Cachex.put(cache, 2, 2) 45 | 46 | # retrieve the cache size, should be 2 47 | size1 = Cachex.size(cache) 48 | 49 | # check the size of the cache 50 | assert(size1 == {:ok, 2}) 51 | 52 | # clear just the local cache to start with 53 | {:ok, 1} = Cachex.clear(cache, local: true) 54 | 55 | # fetch the size of local and remote 56 | size2 = Cachex.size(cache, local: true) 57 | size3 = Cachex.size(cache, local: false) 58 | 59 | # check that the local is 0, remote is 1 60 | assert(size2 == {:ok, 0}) 61 | assert(size3 == {:ok, 1}) 62 | 63 | # clear the entire cluster at this point 64 | {:ok, 1} = Cachex.clear(cache) 65 | 66 | # fetch the size of local and remote (again) 67 | size4 = Cachex.size(cache, local: true) 68 | size5 = Cachex.size(cache, local: false) 69 | 70 | # check that both are now 0 71 | assert(size4 == {:ok, 0}) 72 | assert(size5 == {:ok, 0}) 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /test/cachex/actions/stream_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.StreamTest do 2 | use Cachex.Test.Case 3 | 4 | # This test ensures that a default cache stream will stream cache entries 5 | # back in record form back to the calling process. This test just makes sure 6 | # that the Stream correctly forms these Tuples using the default structure. 7 | test "streaming cache entries" do 8 | # create a test cache 9 | cache = TestUtils.create_cache() 10 | 11 | # add some keys to the cache 12 | {:ok, true} = Cachex.put(cache, "key1", "value1") 13 | {:ok, true} = Cachex.put(cache, "key2", "value2") 14 | {:ok, true} = Cachex.put(cache, "key3", "value3") 15 | 16 | # grab the raw versions of each record 17 | {:ok, entry1} = Cachex.inspect(cache, {:entry, "key1"}) 18 | {:ok, entry2} = Cachex.inspect(cache, {:entry, "key2"}) 19 | {:ok, entry3} = Cachex.inspect(cache, {:entry, "key3"}) 20 | 21 | # create a cache stream 22 | {:ok, stream} = Cachex.stream(cache) 23 | 24 | # consume the stream 25 | result = Enum.sort(stream) 26 | 27 | # verify the results are the ordered entries 28 | assert(result == [entry1, entry2, entry3]) 29 | end 30 | 31 | # This test covers the use case of custom match patterns, by testing various 32 | # pattern combinations. We stream custom record formats, as well as a single 33 | # field in order to test this properly. 34 | test "streaming custom patterns" do 35 | # create a test cache 36 | cache = TestUtils.create_cache() 37 | 38 | # add some keys to the cache 39 | {:ok, true} = Cachex.put(cache, "key1", "value1") 40 | {:ok, true} = Cachex.put(cache, "key2", "value2") 41 | {:ok, true} = Cachex.put(cache, "key3", "value3") 42 | 43 | # create our query filter 44 | filter = Cachex.Query.unexpired() 45 | 46 | # create two test queries 47 | query1 = Cachex.Query.build(where: filter, output: {:key, :value}) 48 | query2 = Cachex.Query.build(where: filter, output: :key) 49 | 50 | # create cache streams 51 | {:ok, stream1} = Cachex.stream(cache, query1) 52 | {:ok, stream2} = Cachex.stream(cache, query2) 53 | 54 | # consume the streams 55 | result1 = Enum.sort(stream1) 56 | result2 = Enum.sort(stream2) 57 | 58 | # verify the first results 59 | assert( 60 | result1 == [ 61 | {"key1", "value1"}, 62 | {"key2", "value2"}, 63 | {"key3", "value3"} 64 | ] 65 | ) 66 | 67 | # verify the second results 68 | assert(result2 == ["key1", "key2", "key3"]) 69 | end 70 | 71 | # If an invalid match spec is provided in the of option, an error is returned. 72 | # We just ensure that this breaks accordingly and returns an invalid match error. 73 | test "streaming invalid patterns" do 74 | # create a test cache 75 | cache = TestUtils.create_cache() 76 | 77 | # create cache stream 78 | result = Cachex.stream(cache, {:invalid}) 79 | 80 | # verify the stream fails 81 | assert(result == {:error, :invalid_match}) 82 | end 83 | 84 | # This test verifies that this action is correctly disabled in a cluster, 85 | # as it's currently unsupported - so just check for disabled flags. 86 | @tag distributed: true 87 | test "streaming is disabled in a cache cluster" do 88 | # create a new cache cluster for cleaning 89 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 90 | 91 | # we shouldn't be able to stream a cache on multiple nodes 92 | assert Cachex.stream(cache) == {:error, :non_distributed} 93 | end 94 | 95 | # We can force local: true to get a stream against the local node 96 | @tag distributed: true 97 | test "streaming is enabled in a cache cluster with local: true" do 98 | # create a new cache cluster for cleaning 99 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 100 | 101 | # build a generic query to use later 102 | query = Cachex.Query.build() 103 | 104 | # create a cache stream with the local flag 105 | stream = Cachex.stream(cache, query, local: true) 106 | 107 | # we should be able to stream the local node 108 | assert stream != {:error, :non_distributed} 109 | end 110 | end 111 | -------------------------------------------------------------------------------- /test/cachex/actions/take_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.TakeTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies that we can take keys from the cache. If a key has expired, 5 | # the value is not returned and the hooks are updated with an eviction. If the 6 | # key is missing, we return a message stating as such. 7 | test "taking keys from a cache" do 8 | # create a forwarding hook 9 | hook = ForwardHook.create() 10 | 11 | # create a test cache 12 | cache = TestUtils.create_cache(hooks: [hook]) 13 | 14 | # set some keys in the cache 15 | {:ok, true} = Cachex.put(cache, 1, 1) 16 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 1) 17 | 18 | # wait for the TTL to pass 19 | :timer.sleep(2) 20 | 21 | # flush all existing messages 22 | TestUtils.flush() 23 | 24 | # take the first and second key 25 | result1 = Cachex.take(cache, 1) 26 | result2 = Cachex.take(cache, 2) 27 | 28 | # take a missing key 29 | result3 = Cachex.take(cache, 3) 30 | 31 | # verify the first key is retrieved 32 | assert(result1 == {:ok, 1}) 33 | 34 | # verify the second and third keys are missing 35 | assert(result2 == {:ok, nil}) 36 | assert(result3 == {:ok, nil}) 37 | 38 | # assert we receive valid notifications 39 | assert_receive({{:take, [1, []]}, ^result1}) 40 | assert_receive({{:take, [2, []]}, ^result2}) 41 | assert_receive({{:take, [3, []]}, ^result3}) 42 | 43 | # check we received valid purge actions for the TTL 44 | assert_receive({{:purge, [[]]}, {:ok, 1}}) 45 | 46 | # ensure that the keys no longer exist in the cache 47 | exists1 = Cachex.exists?(cache, 1) 48 | exists2 = Cachex.exists?(cache, 2) 49 | exists3 = Cachex.exists?(cache, 3) 50 | 51 | # none should exist 52 | assert(exists1 == {:ok, false}) 53 | assert(exists2 == {:ok, false}) 54 | assert(exists3 == {:ok, false}) 55 | end 56 | 57 | # This test verifies that this action is correctly distributed across 58 | # a cache cluster, instead of just the local node. We're not concerned 59 | # about the actual behaviour here, only the routing of the action. 60 | @tag distributed: true 61 | test "taking entries from a cache cluster" do 62 | # create a new cache cluster for cleaning 63 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 64 | 65 | # we know that 1 & 2 hash to different nodes 66 | {:ok, true} = Cachex.put(cache, 1, 1) 67 | {:ok, true} = Cachex.put(cache, 2, 2) 68 | 69 | # check the results of the calls across nodes 70 | size1 = Cachex.size(cache, local: true) 71 | size2 = Cachex.size(cache, local: false) 72 | 73 | # one local, two total 74 | assert(size1 == {:ok, 1}) 75 | assert(size2 == {:ok, 2}) 76 | 77 | # take each item from the cache cluster 78 | take1 = Cachex.take(cache, 1) 79 | take2 = Cachex.take(cache, 2) 80 | 81 | # check both records are taken 82 | assert(take1 == {:ok, 1}) 83 | assert(take2 == {:ok, 2}) 84 | 85 | # check the results of the calls across nodes 86 | size3 = Cachex.size(cache, local: true) 87 | size4 = Cachex.size(cache, local: false) 88 | 89 | # no records are left 90 | assert(size3 == {:ok, 0}) 91 | assert(size4 == {:ok, 0}) 92 | end 93 | end 94 | -------------------------------------------------------------------------------- /test/cachex/actions/ttl_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.TtlTest do 2 | use Cachex.Test.Case 3 | 4 | # This test verifies the responses of checking TTLs inside the cache. We make 5 | # sure that TTLs are calculated correctly based on nil and set TTLs. If the 6 | # key is missing, we return a tuple signalling such. 7 | test "retrieving a key TTL" do 8 | # create a test cache 9 | cache = TestUtils.create_cache() 10 | 11 | # set several keys in the cache 12 | {:ok, true} = Cachex.put(cache, 1, 1) 13 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 10000) 14 | 15 | # verify the TTL of both keys 16 | ttl1 = Cachex.ttl(cache, 1) 17 | ttl2 = Cachex.ttl!(cache, 2) 18 | 19 | # verify the TTL of a missing key 20 | ttl3 = Cachex.ttl(cache, 3) 21 | 22 | # the first TTL should be nil 23 | assert(ttl1 == {:ok, nil}) 24 | 25 | # the second should be close to 10s 26 | assert_in_delta(ttl2, 10000, 10) 27 | 28 | # the third should return a missing value 29 | assert(ttl3 == {:ok, nil}) 30 | end 31 | 32 | # This test verifies that this action is correctly distributed across 33 | # a cache cluster, instead of just the local node. We're not concerned 34 | # about the actual behaviour here, only the routing of the action. 35 | @tag distributed: true 36 | test "retrieving a key TTL in a cluster" do 37 | # create a new cache cluster 38 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 39 | 40 | # we know that 1 & 2 hash to different nodes 41 | {:ok, true} = Cachex.put(cache, 1, 1, expire: 500) 42 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 500) 43 | 44 | # check the expiration of each key in the cluster 45 | {:ok, expiration1} = Cachex.ttl(cache, 1) 46 | {:ok, expiration2} = Cachex.ttl(cache, 2) 47 | 48 | # check the delta changed 49 | assert(expiration1 > 450) 50 | assert(expiration2 > 450) 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /test/cachex/actions/update_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.UpdateTest do 2 | use Cachex.Test.Case 3 | 4 | # This test just ensures that we can update the value associated with a key 5 | # when the value already exists inside the cache. We make sure that any TTL 6 | # associated with the key remains unchanged (as the record is being modified, 7 | # not overwritten). 8 | test "updates against an existing key" do 9 | # create a test cache 10 | cache = TestUtils.create_cache() 11 | 12 | # set a value with no TTL inside the cache 13 | {:ok, true} = Cachex.put(cache, 1, 1) 14 | 15 | # set a value with a TTL in the cache 16 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 10000) 17 | 18 | # attempt to update both keys 19 | update1 = Cachex.update(cache, 1, 3) 20 | update2 = Cachex.update(cache, 2, 3) 21 | 22 | # ensure both succeeded 23 | assert(update1 == {:ok, true}) 24 | assert(update2 == {:ok, true}) 25 | 26 | # retrieve the modified keys 27 | value1 = Cachex.get(cache, 1) 28 | value2 = Cachex.get(cache, 2) 29 | 30 | # verify the updates 31 | assert(value1 == {:ok, 3}) 32 | assert(value2 == {:ok, 3}) 33 | 34 | # pull back the TTLs 35 | ttl1 = Cachex.ttl!(cache, 1) 36 | ttl2 = Cachex.ttl!(cache, 2) 37 | 38 | # the first TTL should still be unset 39 | assert(ttl1 == nil) 40 | 41 | # the second should still be set 42 | assert_in_delta(ttl2, 10000, 10) 43 | end 44 | 45 | # This test just verifies that we successfully return an error when we try to 46 | # update a value which does not exist inside the cache. 47 | test "updates against a missing key" do 48 | # create a test cache 49 | cache = TestUtils.create_cache() 50 | 51 | # attempt to update a missing key in the cache 52 | update1 = Cachex.update(cache, 1, 3) 53 | update2 = Cachex.update(cache, 2, 3) 54 | 55 | # ensure both failed 56 | assert(update1 == {:ok, false}) 57 | assert(update2 == {:ok, false}) 58 | end 59 | 60 | # This test verifies that this action is correctly distributed across 61 | # a cache cluster, instead of just the local node. We're not concerned 62 | # about the actual behaviour here, only the routing of the action. 63 | @tag distributed: true 64 | test "updating a key in a cache cluster" do 65 | # create a new cache cluster 66 | {cache, _nodes, _cluster} = TestUtils.create_cache_cluster(2) 67 | 68 | # we know that 1 & 2 hash to different nodes 69 | {:ok, true} = Cachex.put(cache, 1, 1, expire: 500) 70 | {:ok, true} = Cachex.put(cache, 2, 2, expire: 500) 71 | 72 | # run updates against both keys 73 | {:ok, true} = Cachex.update(cache, 1, -1) 74 | {:ok, true} = Cachex.update(cache, 2, -2) 75 | 76 | # try to retrieve both of the set keys 77 | updated1 = Cachex.get(cache, 1) 78 | updated2 = Cachex.get(cache, 2) 79 | 80 | # check the update occurred 81 | assert(updated1 == {:ok, -1}) 82 | assert(updated2 == {:ok, -2}) 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /test/cachex/actions/warm_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Actions.WarmTest do 2 | use Cachex.Test.Case 3 | 4 | # This test covers the basic case of manually rewarming a cache, 5 | # after manually clearing it but checking again before the schedule. 6 | test "manually warming a cache" do 7 | # create a test warmer to pass to the cache 8 | TestUtils.create_warmer(:manual_warmer1, fn _ -> 9 | {:ok, [{1, 1}]} 10 | end) 11 | 12 | # create a cache instance with a warmer 13 | cache = 14 | TestUtils.create_cache( 15 | warmers: [ 16 | warmer( 17 | module: :manual_warmer1, 18 | name: :manual_warmer1 19 | ) 20 | ] 21 | ) 22 | 23 | # check that the key was warmed 24 | assert Cachex.get!(cache, 1) == 1 25 | 26 | # clean out our cache entries 27 | assert Cachex.clear!(cache) == 1 28 | assert Cachex.get!(cache, 1) == nil 29 | 30 | # manually trigger a cache warming of all modules 31 | assert Cachex.warm(cache) == {:ok, [:manual_warmer1]} 32 | 33 | # wait for the warming 34 | :timer.sleep(50) 35 | 36 | # check that our key has been put back 37 | assert Cachex.get!(cache, 1) == 1 38 | end 39 | 40 | test "manually warming a cache and awaiting results" do 41 | # create a test warmer to pass to the cache 42 | TestUtils.create_warmer(:manual_warmer2, fn _ -> 43 | {:ok, [{1, 1}]} 44 | end) 45 | 46 | # create a cache instance with a warmer 47 | cache = 48 | TestUtils.create_cache( 49 | warmers: [ 50 | warmer( 51 | module: :manual_warmer2, 52 | name: :manual_warmer2 53 | ) 54 | ] 55 | ) 56 | 57 | # check that the key was warmed 58 | assert Cachex.get!(cache, 1) == 1 59 | 60 | # clean out our cache entries 61 | assert Cachex.clear!(cache) == 1 62 | assert Cachex.get!(cache, 1) == nil 63 | 64 | # manually trigger a cache warming of all modules 65 | assert Cachex.warm(cache, wait: true) == {:ok, [:manual_warmer2]} 66 | assert Cachex.get!(cache, 1) == 1 67 | end 68 | 69 | # This test covers the case where you manually specify a list of modules 70 | # to use for the warming. It also covers cases where no modules match the 71 | # provided list, and therefore no cache warming actually executes. 72 | test "manually warming a cache using specific warmers" do 73 | # create a test warmer to pass to the cache 74 | TestUtils.create_warmer(:manual_warmer3, fn _ -> 75 | {:ok, [{1, 1}]} 76 | end) 77 | 78 | # create a cache instance with a warmer 79 | cache = 80 | TestUtils.create_cache( 81 | warmers: [ 82 | warmer( 83 | module: :manual_warmer3, 84 | name: :manual_warmer3 85 | ) 86 | ] 87 | ) 88 | 89 | # check that the key was warmed 90 | assert Cachex.get!(cache, 1) == 1 91 | 92 | # clean out our cache entries 93 | assert Cachex.clear!(cache) == 1 94 | assert Cachex.get!(cache, 1) == nil 95 | 96 | # manually trigger a cache warming 97 | assert Cachex.warm(cache, only: []) == {:ok, []} 98 | 99 | # wait for the warming 100 | :timer.sleep(50) 101 | 102 | # check that our key was never put back 103 | assert Cachex.get!(cache, 1) == nil 104 | 105 | # manually trigger a cache warming, specifying our module 106 | assert Cachex.warm(cache, only: [:manual_warmer3]) == 107 | {:ok, [:manual_warmer3]} 108 | 109 | # wait for the warming 110 | :timer.sleep(50) 111 | 112 | # check that our key has been put back 113 | assert Cachex.get!(cache, 1) == 1 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /test/cachex/error_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.ErrorTest do 2 | use Cachex.Test.Case 3 | 4 | # This test ensures the integrity of all Error functions and forms. We iterate 5 | # all errors and check that the long form of the error is as expected. 6 | test "error generation and mapping" do 7 | # define all recognised errors 8 | errors = [ 9 | cross_slot: "Target keys do not live on the same node", 10 | invalid_command: "Invalid command definition provided", 11 | invalid_expiration: "Invalid expiration definition provided", 12 | invalid_hook: "Invalid hook definition provided", 13 | invalid_limit: "Invalid limit fields provided", 14 | invalid_match: "Invalid match specification provided", 15 | invalid_name: "Invalid cache name provided", 16 | invalid_option: "Invalid option syntax provided", 17 | invalid_pairs: "Invalid insertion pairs provided", 18 | invalid_router: "Invalid router definition provided", 19 | invalid_warmer: "Invalid warmer definition provided", 20 | janitor_disabled: "Specified janitor process running", 21 | no_cache: "Specified cache not running", 22 | non_numeric_value: "Attempted arithmetic operations on a non-numeric value", 23 | non_distributed: "Attempted to use a local function across nodes", 24 | not_started: "Cache table not active, have you started the Cachex application?", 25 | stats_disabled: "Stats are not enabled for the specified cache", 26 | unreachable_file: "Unable to access provided file path" 27 | ] 28 | 29 | # validate all error pairs 30 | for {err, msg} <- errors do 31 | # retrieve the long form 32 | long_form = Cachex.Error.long_form(err) 33 | 34 | # verify the message returned 35 | assert(long_form == msg) 36 | end 37 | 38 | # make sure we're not missing any error definitions 39 | assert(length(Cachex.Error.known()) == length(errors)) 40 | end 41 | 42 | # This just ensures that unrecognised errors are simply 43 | # echoed back without change, in case of unknown errors. 44 | test "unknown error echoing" do 45 | assert(Cachex.Error.long_form(:nodedown) == :nodedown) 46 | end 47 | 48 | # This test just validates the default error message against an Error. 49 | # There is nothing more to validate beyond the returned message. 50 | test "raising a default error" do 51 | raise Cachex.Error 52 | rescue 53 | e -> 54 | # capture the error message 55 | msg = Exception.message(e) 56 | 57 | # ensure the message is valid 58 | assert(msg == "Error during cache action") 59 | end 60 | end 61 | -------------------------------------------------------------------------------- /test/cachex/hook_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.HookTest do 2 | use Cachex.Test.Case 3 | 4 | setup_all do 5 | ForwardHook.bind( 6 | concat_hook_1: [type: :pre], 7 | concat_hook_2: [type: :post], 8 | concat_hook_3: [type: :service] 9 | ) 10 | 11 | :ok 12 | end 13 | 14 | test "concatenating hooks in a cache" do 15 | # create a set of 3 hooks to test with 16 | hook1 = ForwardHook.create(:concat_hook_1) 17 | hook2 = ForwardHook.create(:concat_hook_2) 18 | hook3 = ForwardHook.create(:concat_hook_3) 19 | 20 | # create a cache with our hooks 21 | cache = 22 | TestUtils.create_cache( 23 | hooks: [ 24 | hook1, 25 | hook2, 26 | hook3 27 | ] 28 | ) 29 | 30 | # turn the cache into a cache state 31 | cache1 = Services.Overseer.retrieve(cache) 32 | 33 | # compare the order and all hooks listed 34 | assert [ 35 | {:hook, :concat_hook_3, _, _}, 36 | {:hook, :concat_hook_2, _, _}, 37 | {:hook, :concat_hook_1, _, _} 38 | ] = Cachex.Hook.concat(cache1) 39 | end 40 | 41 | test "locating hooks in a cache" do 42 | # create a set of 3 hooks to test with 43 | hook1 = ForwardHook.create(:concat_hook_1) 44 | hook2 = ForwardHook.create(:concat_hook_2) 45 | hook3 = ForwardHook.create(:concat_hook_3) 46 | 47 | # create a cache with our hooks 48 | cache = 49 | TestUtils.create_cache( 50 | hooks: [ 51 | hook1, 52 | hook2, 53 | hook3 54 | ] 55 | ) 56 | 57 | # turn the cache into a cache state 58 | cache1 = Services.Overseer.retrieve(cache) 59 | 60 | # locate each of the hooks (as they're different types) 61 | locate1 = Cachex.Hook.locate(cache1, :concat_hook_1) 62 | locate2 = Cachex.Hook.locate(cache1, :concat_hook_1, :pre) 63 | locate3 = Cachex.Hook.locate(cache1, :concat_hook_2, :post) 64 | locate4 = Cachex.Hook.locate(cache1, :concat_hook_3, :service) 65 | 66 | # verify they all come back just as expected 67 | assert {:hook, :concat_hook_1, _, _} = locate1 68 | assert {:hook, :concat_hook_1, _, _} = locate2 69 | assert {:hook, :concat_hook_2, _, _} = locate3 70 | assert {:hook, :concat_hook_3, _, _} = locate4 71 | 72 | # check that locating with the wrong type finds nothing 73 | assert Cachex.Hook.locate(cache1, :concat_hook_1, :post) == nil 74 | end 75 | 76 | # This test covers whether $callers is correctly propagated through to hooks 77 | # when triggered by a parent process. We validate this by sending the value 78 | # back through to the test process to validate it contains the process identifier. 79 | test "accessing $callers in hooks" do 80 | # create a test cache and execution hook 81 | cache = TestUtils.create_cache(hooks: [ExecuteHook.create()]) 82 | 83 | # find the hook (with the populated runtime process identifier) 84 | cache(hooks: hooks(post: [hook])) = Services.Overseer.get(cache) 85 | 86 | # notify and fetch callers in order to send them back to this parent process 87 | Services.Informant.notify([hook], {:exec, fn -> Process.get(:"$callers") end}, nil) 88 | 89 | # process chain 90 | parent = self() 91 | 92 | # check callers are just us 93 | assert_receive([^parent]) 94 | end 95 | end 96 | -------------------------------------------------------------------------------- /test/cachex/limit/accessed_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.AccessedTest do 2 | use Cachex.Test.Case 3 | 4 | # Basic coverage of the access hook to ensure that modification 5 | # times are refreshed on a read call for a key. More specific 6 | # tests will be added as new test cases and issues arise. 7 | test "updating modification times on read access" do 8 | # create a test cache using the LRW access hook to refresh modification 9 | cache = TestUtils.create_cache(hooks: [hook(module: Cachex.Limit.Accessed)]) 10 | 11 | # create a new key to check against 12 | {:ok, true} = Cachex.put(cache, "key", 1) 13 | 14 | # fetch the raw modification time of the cache entry 15 | entry(modified: modified1) = Cachex.inspect!(cache, {:entry, "key"}) 16 | 17 | # wait a while... 18 | :timer.sleep(50) 19 | 20 | # fetch back the key again 21 | {:ok, 1} = Cachex.get(cache, "key") 22 | 23 | # the modification time should update... 24 | TestUtils.poll(250, true, fn -> 25 | cache 26 | |> Cachex.inspect!({:entry, "key"}) 27 | |> entry(:modified) != modified1 28 | end) 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /test/cachex/limit/evented_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.EventedTest do 2 | use Cachex.Test.Case 3 | 4 | # This test just ensures that there are no artificial limits placed on a cache 5 | # by adding 5000 keys and making sure they're not evicted. It simply serves as 6 | # validation that there are no bad defaults set anywhere. 7 | test "evicting with no upper bound" do 8 | # create a cache with no max size 9 | cache = TestUtils.create_cache() 10 | 11 | # retrieve the cache state 12 | state = Services.Overseer.retrieve(cache) 13 | 14 | # add 5000 keys to the cache 15 | for x <- 1..5000 do 16 | {:ok, true} = Cachex.put(state, x, x) 17 | end 18 | 19 | # retrieve the cache size 20 | count = Cachex.size!(state) 21 | 22 | # make sure all keys are there 23 | assert(count == 5000) 24 | end 25 | 26 | # This test ensures that a cache will cap caches at a given limit by trimming 27 | # caches by a given size once they cross a given threshold. We ensure that the 28 | # size is trimmed properly and the oldest entries are evicted first, with the 29 | # newest entries kept in the cache. Finally we make sure that all hooks are 30 | # notified of the evictions that occurred. 31 | test "evicting when a cache crosses a limit" do 32 | # create a forwarding hook 33 | hook = ForwardHook.create() 34 | 35 | # create a cache with a max size 36 | cache = 37 | TestUtils.create_cache( 38 | hooks: [ 39 | hook, 40 | hook( 41 | module: Cachex.Limit.Evented, 42 | args: { 43 | 100, 44 | [ 45 | buffer: 25, 46 | reclaim: 0.75 47 | ] 48 | } 49 | ) 50 | ] 51 | ) 52 | 53 | # retrieve the cache state 54 | state = Services.Overseer.retrieve(cache) 55 | 56 | # add 100 keys to the cache 57 | for x <- 1..100 do 58 | # add the entry to the cache 59 | {:ok, true} = Cachex.put(state, x, x) 60 | 61 | # tick to make sure each has a new touch time 62 | :timer.sleep(1) 63 | end 64 | 65 | # retrieve the cache size 66 | size1 = Cachex.size!(cache) 67 | 68 | # verify the cache size 69 | assert(size1 == 100) 70 | 71 | # flush all existing hook events 72 | TestUtils.flush() 73 | 74 | # run a no-op fetch to verify no change 75 | {:ignore, nil} = 76 | Cachex.fetch(state, 101, fn -> 77 | {:ignore, nil} 78 | end) 79 | 80 | # retrieve the cache size 81 | size2 = Cachex.size!(cache) 82 | 83 | # verify the cache size 84 | assert(size2 == 100) 85 | 86 | # add a new key to the cache to trigger evictions 87 | {:ok, true} = Cachex.put(state, 101, 101) 88 | 89 | # verify the cache shrinks to 25% 90 | TestUtils.poll(250, 25, fn -> 91 | Cachex.size!(state) 92 | end) 93 | 94 | # our validation step 95 | validate = fn range, expected -> 96 | # iterate all keys in the range 97 | for x <- range do 98 | # retrieve whether the key exists 99 | exists = Cachex."exists?!"(state, x) 100 | 101 | # verify whether it exists 102 | assert(exists == expected) 103 | end 104 | end 105 | 106 | # verify the first 76 keys are removed 107 | validate.(1..76, false) 108 | 109 | # verify the latest 25 are retained 110 | validate.(77..101, true) 111 | 112 | # finally, verify hooks are notified 113 | assert_receive({{:clear, [[]]}, {:ok, 76}}) 114 | 115 | # retrieve the policy hook definition 116 | cache(hooks: hooks(post: [hook1 | _])) = state 117 | 118 | # just ensure that notifying errors to the policy doesn't cause a crash 119 | Services.Informant.notify([hook1], {:action, []}, {:error, false}) 120 | end 121 | end 122 | -------------------------------------------------------------------------------- /test/cachex/limit/scheduled_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Limit.ScheduledTest do 2 | use Cachex.Test.Case 3 | 4 | # This test just ensures that there are no artificial limits placed on a cache 5 | # by adding 5000 keys and making sure they're not evicted. It simply serves as 6 | # validation that there are no bad defaults set anywhere. 7 | test "evicting with no upper bound" do 8 | # create a cache with no max size 9 | cache = TestUtils.create_cache() 10 | 11 | # retrieve the cache state 12 | state = Services.Overseer.retrieve(cache) 13 | 14 | # add 5000 keys to the cache 15 | for x <- 1..5000 do 16 | {:ok, true} = Cachex.put(state, x, x) 17 | end 18 | 19 | # retrieve the cache size 20 | count = Cachex.size!(state) 21 | 22 | # make sure all keys are there 23 | assert(count == 5000) 24 | end 25 | 26 | # This test ensures that a cache will cap caches at a given limit by trimming 27 | # caches by a given size once they cross a given threshold. We ensure that the 28 | # size is trimmed properly and the oldest entries are evicted first, with the 29 | # newest entries kept in the cache. Finally we make sure that all hooks are 30 | # notified of the evictions that occurred. 31 | test "evicting when a cache crosses a limit" do 32 | # create a forwarding hook 33 | hook = ForwardHook.create() 34 | 35 | # create a cache with a max size 36 | cache = 37 | TestUtils.create_cache( 38 | hooks: [ 39 | hook, 40 | hook( 41 | module: Cachex.Limit.Scheduled, 42 | args: { 43 | 100, 44 | [ 45 | buffer: 25, 46 | reclaim: 0.75 47 | ], 48 | [ 49 | frequency: 100 50 | ] 51 | } 52 | ) 53 | ] 54 | ) 55 | 56 | # retrieve the cache state 57 | state = Services.Overseer.retrieve(cache) 58 | 59 | # add 1000 keys to the cache 60 | for x <- 1..100 do 61 | # add the entry to the cache 62 | {:ok, true} = Cachex.put(state, x, x) 63 | 64 | # tick to make sure each has a new touch time 65 | :timer.sleep(1) 66 | end 67 | 68 | # retrieve the cache size 69 | size1 = Cachex.size!(cache) 70 | 71 | # verify the cache size 72 | assert(size1 == 100) 73 | 74 | # flush all existing hook events 75 | TestUtils.flush() 76 | 77 | # add a new key to the cache to trigger evictions 78 | {:ok, true} = Cachex.put(state, 101, 101) 79 | 80 | # verify the cache shrinks to 25% 81 | TestUtils.poll(250, 25, fn -> 82 | Cachex.size!(state) 83 | end) 84 | 85 | # our validation step 86 | validate = fn range, expected -> 87 | # iterate all keys in the range 88 | for x <- range do 89 | # retrieve whether the key exists 90 | exists = Cachex."exists?!"(state, x) 91 | 92 | # verify whether it exists 93 | assert(exists == expected) 94 | end 95 | end 96 | 97 | # verify the first 76 keys are removed 98 | validate.(1..76, false) 99 | 100 | # verify the latest 25 are retained 101 | validate.(77..101, true) 102 | 103 | # finally, verify hooks are notified 104 | assert_receive({{:clear, [[]]}, {:ok, 76}}) 105 | 106 | # retrieve the policy hook definition 107 | cache(hooks: hooks(post: [hook1 | _])) = state 108 | 109 | # just ensure that notifying errors to the policy doesn't cause a crash 110 | Services.Informant.notify([hook1], {:action, []}, {:error, false}) 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /test/cachex/query_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.QueryTest do 2 | use Cachex.Test.Case 3 | 4 | test "creating basic queries" do 5 | # create a query with not filter 6 | query1 = Cachex.Query.build() 7 | query2 = Cachex.Query.build(output: :key) 8 | 9 | # verify the mapping of both queries 10 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = query1 11 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = query2 12 | 13 | # unpack clauses of both queries 14 | [{_, [true], _}] = query1 15 | [{_, [true], _}] = query2 16 | 17 | # verify the returns of both queries 18 | assert [{_, _, [:"$_"]}] = query1 19 | assert [{_, _, [:"$1"]}] = query2 20 | end 21 | 22 | test "creating expired queries" do 23 | # create base expired filter 24 | filter1 = Cachex.Query.expired() 25 | filter2 = Cachex.Query.expired(false) 26 | 27 | # create a couple of expired queries 28 | clause1 = Cachex.Query.build(where: filter1) 29 | clause2 = Cachex.Query.build(where: filter1, output: :key) 30 | clause3 = Cachex.Query.build(where: filter2) 31 | 32 | # verify the mapping of both queries 33 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause1 34 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause2 35 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause3 36 | 37 | # unpack clauses of both queries 38 | [{_, [{:not, c1}], _}] = clause1 39 | [{_, [{:not, c2}], _}] = clause2 40 | [{_, [{:andalso, {:not, c3}, false}], _}] = clause3 41 | 42 | # verify the queries of both queries 43 | assert {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}} = c1 44 | assert {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}} = c2 45 | assert {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}} = c3 46 | 47 | # verify the returns of both queries 48 | assert [{_, _, [:"$_"]}] = clause1 49 | assert [{_, _, [:"$1"]}] = clause2 50 | assert [{_, _, [:"$_"]}] = clause3 51 | end 52 | 53 | test "creating unexpired queries" do 54 | # create base unexpired filter 55 | filter1 = Cachex.Query.unexpired() 56 | filter2 = Cachex.Query.unexpired(false) 57 | 58 | # create a couple of unexpired queries 59 | clause1 = Cachex.Query.build(where: filter1) 60 | clause2 = Cachex.Query.build(where: filter1, output: :key) 61 | clause3 = Cachex.Query.build(where: filter2) 62 | 63 | # verify the mapping of both queries 64 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause1 65 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause2 66 | assert [{{:_, :"$1", :"$2", :"$3", :"$4"}, _, _}] = clause3 67 | 68 | # unpack clauses of both queries 69 | [{_, [c1], _}] = clause1 70 | [{_, [c2], _}] = clause2 71 | [{_, [c3], _}] = clause3 72 | 73 | # verify the queries of all queries 74 | assert {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}} = c1 75 | assert {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}} = c2 76 | assert {:andalso, {:orelse, {:==, :"$4", nil}, {:>, {:+, :"$3", :"$4"}, _now}}, false} = c3 77 | 78 | # verify the returns of both queries 79 | assert [{_, _, [:"$_"]}] = clause1 80 | assert [{_, _, [:"$1"]}] = clause2 81 | assert [{_, _, [:"$_"]}] = clause3 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /test/cachex/router/jump_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.JumpTest do 2 | use Cachex.Test.Case 3 | 4 | test "routing keys via a jump router" do 5 | # create a test cache cluster for nodes 6 | {cache, nodes, _cluster} = 7 | TestUtils.create_cache_cluster(3, 8 | router: Cachex.Router.Jump 9 | ) 10 | 11 | # convert the name to a cache and sort 12 | cache = Services.Overseer.retrieve(cache) 13 | nodes = Enum.sort(nodes) 14 | 15 | # fetch the router state after initialize 16 | cache(router: router(state: state)) = cache 17 | 18 | # test that we can route to expected nodes 19 | assert Cachex.Router.nodes(cache) == {:ok, nodes} 20 | assert Cachex.Router.Jump.route(state, "elixir") == Enum.at(nodes, 1) 21 | assert Cachex.Router.Jump.route(state, "erlang") == Enum.at(nodes, 2) 22 | end 23 | 24 | test "routing keys via a jump router with defined nodes" do 25 | # create our nodes 26 | nodes = [:a, :b, :c] 27 | 28 | # create router from nodes 29 | router = 30 | router( 31 | module: Cachex.Router.Jump, 32 | options: [nodes: nodes] 33 | ) 34 | 35 | # create a test cache and fetch back 36 | cache = TestUtils.create_cache(router: router) 37 | cache = Services.Overseer.retrieve(cache) 38 | 39 | # fetch the router state after initialize 40 | cache(router: router(state: state)) = cache 41 | 42 | # test that we can route to expected nodes 43 | assert Cachex.Router.nodes(cache) == {:ok, nodes} 44 | assert Cachex.Router.Jump.route(state, "elixir") == Enum.at(nodes, 1) 45 | assert Cachex.Router.Jump.route(state, "erlang") == Enum.at(nodes, 2) 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /test/cachex/router/local_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.LocalTest do 2 | use Cachex.Test.Case 3 | 4 | test "routing keys via a local router" do 5 | # create a test cache 6 | cache = TestUtils.create_cache(router: Cachex.Router.Local) 7 | 8 | # convert the name to a cache and sort 9 | cache = Services.Overseer.retrieve(cache) 10 | 11 | # fetch the router state after initialize 12 | cache(router: router(state: state)) = cache 13 | 14 | # test that we can route to expected nodes 15 | assert Cachex.Router.nodes(cache) == {:ok, [node()]} 16 | assert Cachex.Router.Local.route(state, "elixir") == node() 17 | assert Cachex.Router.Local.route(state, "erlang") == node() 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /test/cachex/router/mod_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Router.ModTest do 2 | use Cachex.Test.Case 3 | 4 | test "routing keys via a modulo router" do 5 | # create a test cache cluster for nodes 6 | {cache, nodes, _cluster} = 7 | TestUtils.create_cache_cluster(3, 8 | router: Cachex.Router.Mod 9 | ) 10 | 11 | # convert the name to a cache and sort 12 | cache = Services.Overseer.retrieve(cache) 13 | nodes = Enum.sort(nodes) 14 | 15 | # fetch the router state after initialize 16 | cache(router: router(state: state)) = cache 17 | 18 | # test that we can route to expected nodes 19 | assert Cachex.Router.nodes(cache) == {:ok, nodes} 20 | assert Cachex.Router.Mod.route(state, "elixir") == Enum.at(nodes, 1) 21 | assert Cachex.Router.Mod.route(state, "erlang") == Enum.at(nodes, 0) 22 | end 23 | 24 | test "routing keys via a modulo router with defined nodes" do 25 | # create our nodes 26 | nodes = [:a, :b, :c] 27 | 28 | # create router from nodes 29 | router = 30 | router( 31 | module: Cachex.Router.Jump, 32 | options: [nodes: nodes] 33 | ) 34 | 35 | # create a test cache and fetch back 36 | cache = TestUtils.create_cache(router: router) 37 | cache = Services.Overseer.retrieve(cache) 38 | 39 | # fetch the router state after initialize 40 | cache(router: router(state: state)) = cache 41 | 42 | # test that we can route to expected nodes 43 | assert Cachex.Router.nodes(cache) == {:ok, nodes} 44 | assert Cachex.Router.Mod.route(state, "elixir") == Enum.at(nodes, 1) 45 | assert Cachex.Router.Mod.route(state, "erlang") == Enum.at(nodes, 0) 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /test/cachex/router_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.RouterTest do 2 | use Cachex.Test.Case 3 | end 4 | -------------------------------------------------------------------------------- /test/cachex/services/courier_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.CourierTest do 2 | use Cachex.Test.Case 3 | 4 | test "dispatching tasks" do 5 | # start a new cache 6 | cache = TestUtils.create_cache() 7 | cache = Services.Overseer.retrieve(cache) 8 | 9 | # dispatch an arbitrary task 10 | result = 11 | Services.Courier.dispatch(cache, "my_key", fn -> 12 | "my_value" 13 | end) 14 | 15 | # check the returned value 16 | assert result == {:commit, "my_value"} 17 | 18 | # check the key was placed in the table 19 | retrieved = Cachex.get(cache, "my_key") 20 | 21 | # the retrieved value should match 22 | assert retrieved == {:ok, "my_value"} 23 | end 24 | 25 | test "dispatching tasks from multiple processes" do 26 | # create a hook for forwarding 27 | {:ok, agent} = Agent.start_link(fn -> :ok end) 28 | 29 | # define our task function 30 | task = fn -> 31 | :timer.sleep(500) 32 | {:commit, "my_value", expire: :timer.seconds(60)} 33 | end 34 | 35 | # start a new cache 36 | cache = TestUtils.create_cache() 37 | cache = Services.Overseer.retrieve(cache) 38 | parent = self() 39 | 40 | # dispatch an arbitrary task from the agent process 41 | Agent.cast(agent, fn _ -> 42 | send(parent, Services.Courier.dispatch(cache, "my_key", task)) 43 | end) 44 | 45 | # dispatch an arbitrary task from the current process 46 | result = Services.Courier.dispatch(cache, "my_key", task) 47 | 48 | # check the returned value with the options set 49 | assert result == {:commit, "my_value"} 50 | 51 | # check the forwarded task completed (no options) 52 | assert_receive({:ok, "my_value"}) 53 | 54 | # check the key was placed in the table 55 | retrieved = Cachex.get(cache, "my_key") 56 | 57 | # the retrieved value should match 58 | assert retrieved == {:ok, "my_value"} 59 | end 60 | 61 | test "gracefully handling crashes inside tasks" do 62 | # start a new cache 63 | cache = TestUtils.create_cache() 64 | cache = Services.Overseer.retrieve(cache) 65 | 66 | # dispatch an arbitrary task 67 | result = 68 | Services.Courier.dispatch(cache, "my_key", fn -> 69 | raise ArgumentError 70 | end) 71 | 72 | # check the returned value contains the error and the stack trace 73 | assert match?({:error, %Cachex.Error{}}, result) 74 | assert elem(result, 1).message == "argument error" 75 | end 76 | 77 | test "recovering from failed tasks" do 78 | # start a new cache 79 | cache = TestUtils.create_cache() 80 | cache = Services.Overseer.retrieve(cache) 81 | 82 | # kill in flight task 83 | parent = 84 | spawn(fn -> 85 | receive do 86 | pid -> Process.exit(pid, :kill) 87 | end 88 | end) 89 | 90 | # dispatch a long running task 91 | result = 92 | Services.Courier.dispatch(cache, "my_key", fn -> 93 | send(parent, self()) 94 | :timer.sleep(60000) 95 | end) 96 | 97 | # check we caught the killed task 98 | assert result == {:error, :killed} 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /test/cachex/services/janitor_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.JanitorTest do 2 | use Cachex.Test.Case 3 | 4 | # We have a common utility to check whether a TTL has passed or not based on 5 | # an input of a write time and a TTL length. This test ensures that this returns 6 | # true or false based on whether we should expire or not. There's also additional 7 | # logic that a cache can have expiration disabled, and so if we pass a state with 8 | # it disabled, it should return false regardless of the date deltas. 9 | test "checking whether an expiration has passed" do 10 | # this combination has expired 11 | modified1 = 5000 12 | expiration1 = 5000 13 | 14 | # this combination has not 15 | modified2 = :os.system_time(:milli_seconds) 16 | expiration2 = 100_000_000 17 | 18 | # define both an enabled and disabled state 19 | state1 = cache(expiration: expiration(lazy: true)) 20 | state2 = cache(expiration: expiration(lazy: false)) 21 | 22 | # expired combination regardless of state 23 | result1 = 24 | Services.Janitor.expired?(entry(modified: modified1, expiration: expiration1)) 25 | 26 | # unexpired combination regardless of state 27 | result2 = 28 | Services.Janitor.expired?(entry(modified: modified2, expiration: expiration2)) 29 | 30 | # expired combination with state enabled 31 | result3 = 32 | Services.Janitor.expired?( 33 | state1, 34 | entry(modified: modified1, expiration: expiration1) 35 | ) 36 | 37 | # expired combination with state disabled 38 | result4 = 39 | Services.Janitor.expired?( 40 | state2, 41 | entry(modified: modified1, expiration: expiration1) 42 | ) 43 | 44 | # only the first and third should have expired 45 | assert(result1) 46 | assert(result3) 47 | 48 | # the second and fourth should not have 49 | refute(result2) 50 | refute(result4) 51 | end 52 | 53 | # The Janitor process can run on a schedule too, to automatically purge records. 54 | # This test should verify a Janitor running on a schedule, as well as make sure 55 | # that the Janitor sends a notification to hooks whenever the process removes 56 | # some keys, as Janitor actions should be subscribable. This test will also 57 | # verify that the metadata of the last run is updated alongside the changes. 58 | test "purging records on a schedule" do 59 | # create our forwarding hook 60 | hooks = ForwardHook.create() 61 | 62 | # set our interval values 63 | ttl_interval = 50 64 | ttl_value = div(ttl_interval, 2) 65 | ttl_wait = round(ttl_interval * 1.5) 66 | 67 | # create a test cache 68 | cache = 69 | TestUtils.create_cache( 70 | hooks: hooks, 71 | expiration: expiration(interval: ttl_interval) 72 | ) 73 | 74 | cache = Services.Overseer.retrieve(cache) 75 | 76 | # add a new cache entry 77 | {:ok, true} = Cachex.put(cache, "key", "value", expire: ttl_value) 78 | 79 | # check that the key exists 80 | exists1 = Cachex.exists?(cache, "key") 81 | 82 | # before the schedule, the key should exist 83 | assert(exists1 == {:ok, true}) 84 | 85 | # wait for the schedule 86 | :timer.sleep(ttl_wait) 87 | 88 | # check that the key exists 89 | exists2 = Cachex.exists?(cache, "key") 90 | 91 | # the key should have been removed 92 | assert(exists2 == {:ok, false}) 93 | 94 | # retrieve the metadata 95 | {:ok, metadata1} = Services.Janitor.last_run(cache) 96 | 97 | # verify the count was updated 98 | assert(metadata1[:count] == 1) 99 | 100 | # verify the duration is valid 101 | assert(is_integer(metadata1[:duration])) 102 | 103 | # windows will round to nearest millis (0) 104 | assert(metadata1[:duration] >= 0) 105 | 106 | # verify the start time was set 107 | assert(is_integer(metadata1[:started])) 108 | assert(metadata1[:started] > 0) 109 | assert(metadata1[:started] <= :os.system_time(:milli_seconds)) 110 | 111 | # ensure we receive(d) the hook notification 112 | assert_receive({{:purge, [[{:local, true}]]}, {:ok, 1}}) 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /test/cachex/services/steward_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Services.StewardTest do 2 | use Cachex.Test.Case 3 | 4 | test "provisioning cache state" do 5 | # bind our hook 6 | ForwardHook.bind( 7 | steward_forward_hook_provisions: [ 8 | provisions: [:cache] 9 | ] 10 | ) 11 | 12 | # create our hook with the provisions forwarded through to it 13 | hook = ForwardHook.create(:steward_forward_hook_provisions) 14 | 15 | # start a new cache using our forwarded hook 16 | cache = TestUtils.create_cache(hooks: [hook]) 17 | cache = Services.Overseer.retrieve(cache) 18 | 19 | # the provisioned value should match 20 | assert_receive({:cache, ^cache}) 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /test/cachex/services_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Cachex.ServicesTest do 2 | use Cachex.Test.Case 3 | 4 | test "generating application service definitions" do 5 | assert [ 6 | %{id: Services.Overseer, start: {Services.Overseer, _, _}}, 7 | %{id: Services.Locksmith, start: {Services.Locksmith, _, _}} 8 | ] = Services.app_spec() 9 | end 10 | 11 | test "generating default cache specifications" do 12 | # generate the test cache state 13 | name = TestUtils.create_cache() 14 | cache = Services.Overseer.retrieve(name) 15 | 16 | # validate the services 17 | assert [ 18 | %{id: Eternal, start: {Eternal, _, _}}, 19 | %{ 20 | id: Services.Locksmith.Queue, 21 | start: {Services.Locksmith.Queue, _, _} 22 | }, 23 | %{id: Services.Informant, start: {Services.Informant, _, _}}, 24 | %{id: Services.Incubator, start: {Services.Incubator, _, _}}, 25 | %{id: Services.Courier, start: {Services.Courier, _, _}}, 26 | %{id: Services.Janitor, start: {Services.Janitor, _, _}} 27 | ] = Services.cache_spec(cache) 28 | end 29 | 30 | test "generating cache specifications with routing" do 31 | # generate the test cache state using an async router 32 | name = TestUtils.create_cache(router: Cachex.Router.Ring) 33 | cache = Services.Overseer.retrieve(name) 34 | 35 | # validate the services 36 | assert [ 37 | %{id: Eternal, start: {Eternal, _, _}}, 38 | %{id: ExHashRing.Ring, start: {ExHashRing.Ring, _, _}}, 39 | %{ 40 | id: Cachex.Router.Ring.Monitor, 41 | start: {GenServer, _, _} 42 | }, 43 | %{ 44 | id: Services.Locksmith.Queue, 45 | start: {Services.Locksmith.Queue, _, _} 46 | }, 47 | %{id: Services.Informant, start: {Services.Informant, _, _}}, 48 | %{id: Services.Incubator, start: {Services.Incubator, _, _}}, 49 | %{id: Services.Courier, start: {Services.Courier, _, _}}, 50 | %{id: Services.Janitor, start: {Services.Janitor, _, _}} 51 | ] = Services.cache_spec(cache) 52 | end 53 | 54 | test "skipping cache janitor specifications" do 55 | # generate the test cache state with the Janitor disabled 56 | name = TestUtils.create_cache(expiration: expiration(interval: nil)) 57 | cache = Services.Overseer.retrieve(name) 58 | 59 | # validate the services 60 | assert [ 61 | %{id: Eternal, start: {Eternal, _, _}}, 62 | %{ 63 | id: Services.Locksmith.Queue, 64 | start: {Services.Locksmith.Queue, _, _} 65 | }, 66 | %{id: Services.Informant, start: {Services.Informant, _, _}}, 67 | %{id: Services.Incubator, start: {Services.Incubator, _, _}}, 68 | %{id: Services.Courier, start: {Services.Courier, _, _}} 69 | ] = Services.cache_spec(cache) 70 | end 71 | 72 | test "locating running services" do 73 | # generate the test cache state with the Janitor disabled 74 | name = TestUtils.create_cache(expiration: expiration(interval: nil)) 75 | cache = Services.Overseer.retrieve(name) 76 | 77 | # validate the service locations 78 | assert Services.locate(cache, Services.Courier) != nil 79 | assert Services.locate(cache, Services.Janitor) == nil 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /test/cachex/test/case.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Test.Case do 2 | @doc false 3 | defmacro __using__(_) do 4 | quote location: :keep do 5 | use ExUnit.Case, async: false 6 | 7 | alias Cachex.Test.Hook.Execute, as: ExecuteHook 8 | alias Cachex.Test.Hook.Forward, as: ForwardHook 9 | alias Cachex.Test.Utils, as: TestUtils 10 | alias Cachex.Services 11 | 12 | import Cachex.Spec 13 | import Cachex.Error 14 | import ExUnit.CaptureLog 15 | 16 | require ExecuteHook 17 | require ForwardHook 18 | require TestUtils 19 | end 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /test/cachex/test/hook/execute.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Test.Hook.Execute do 2 | @moduledoc false 3 | # This module provides a Cachex hook interface which will execute 4 | # forwarded functions. 5 | # 6 | # This is useful to testing timeouts inside hooks, as well as any 7 | # error handling and crash states (from inside the hook). 8 | use Cachex.Hook 9 | import Cachex.Spec 10 | 11 | @doc """ 12 | Returns a hook definition for the default execute hook. 13 | """ 14 | def create, 15 | do: create(:default_execute_hook) 16 | 17 | @doc """ 18 | Returns a hook definition for a custom execute hook. 19 | """ 20 | def create(module, name \\ nil) when is_atom(module), 21 | do: hook(module: module, args: self(), name: name) 22 | 23 | @doc """ 24 | Binds a module for a given name and provided overrides. 25 | 26 | This is used to generate module definitions with a custom 27 | implementation and option set for the hook interfaces. 28 | """ 29 | defmacro bind(pairs) do 30 | for {name, opts} <- pairs do 31 | # pull out all options, allowing their defaults 32 | async = Keyword.get(opts, :async, true) 33 | actions = Keyword.get(opts, :actions, :all) 34 | provisions = Keyword.get(opts, :provisions, []) 35 | timeout = Keyword.get(opts, :timeout, nil) 36 | type = Keyword.get(opts, :type, :post) 37 | 38 | quote do 39 | # define the module by name 40 | defmodule unquote(name) do 41 | use Cachex.Hook 42 | 43 | def async?, 44 | do: unquote(async) 45 | 46 | def actions, 47 | do: unquote(actions) 48 | 49 | def provisions, 50 | do: unquote(provisions) 51 | 52 | def timeout, 53 | do: unquote(timeout) 54 | 55 | def type, 56 | do: unquote(type) 57 | 58 | @doc """ 59 | Executes a received function and forwards to the state process. 60 | """ 61 | def handle_notify({_tag, fun}, _results, proc) when is_function(fun, 0), 62 | do: handle_info(fun.(), proc) && {:ok, proc} 63 | 64 | @doc """ 65 | Forwards received messages to the state process. 66 | """ 67 | def handle_info(msg, proc), 68 | do: send(proc, msg) && {:noreply, proc} 69 | end 70 | end 71 | end 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /test/cachex/test/hook/forward.ex: -------------------------------------------------------------------------------- 1 | defmodule Cachex.Test.Hook.Forward do 2 | @moduledoc false 3 | # This module provides a Cachex hook interface which simply forwards all messages 4 | # to the calling process. 5 | # 6 | # This is useful to validate that messages sent actually do arrive as intended, 7 | # without having to trust assertions inside the hooks themselves. 8 | use Cachex.Hook 9 | import Cachex.Spec 10 | 11 | @doc """ 12 | Returns a hook definition for the default forward hook. 13 | """ 14 | def create, 15 | do: create(:default_forward_hook) 16 | 17 | @doc """ 18 | Returns a hook definition for a custom forward hook. 19 | """ 20 | def create(module, name \\ nil) when is_atom(module), 21 | do: hook(module: module, args: self(), name: name) 22 | 23 | @doc """ 24 | Binds a module for a given name and provided overrides. 25 | 26 | This is used to generate module definitions with a custom 27 | implementation and option set for the hook interfaces. 28 | """ 29 | defmacro bind(pairs) do 30 | for {name, opts} <- pairs do 31 | # pull out all options, allowing their defaults 32 | async = Keyword.get(opts, :async, true) 33 | actions = Keyword.get(opts, :actions, :all) 34 | provisions = Keyword.get(opts, :provisions, []) 35 | timeout = Keyword.get(opts, :timeout, nil) 36 | type = Keyword.get(opts, :type, :post) 37 | 38 | quote do 39 | # define the module by name 40 | defmodule unquote(name) do 41 | use Cachex.Hook 42 | 43 | def async?, 44 | do: unquote(async) 45 | 46 | def actions, 47 | do: unquote(actions) 48 | 49 | def provisions, 50 | do: unquote(provisions) 51 | 52 | def timeout, 53 | do: unquote(timeout) 54 | 55 | def type, 56 | do: unquote(type) 57 | 58 | @doc """ 59 | Forwards received messages to the state process. 60 | """ 61 | def handle_notify(msg, results, proc) do 62 | {:ok, handle_info({msg, results}, proc) && proc} 63 | end 64 | 65 | @doc """ 66 | Forwards received messages to the state process. 67 | """ 68 | def handle_provision(provision, proc) do 69 | {:ok, handle_info(provision, proc) && proc} 70 | end 71 | 72 | @doc """ 73 | Forwards received messages to the state process. 74 | """ 75 | def handle_info(msg, proc) do 76 | {:noreply, send(proc, msg) && proc} 77 | end 78 | end 79 | end 80 | end 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | # ensure that Cachex has been started 2 | Application.ensure_all_started(:cachex) 3 | 4 | # pattern to find test library files 5 | "#{Path.dirname(__ENV__.file)}/**/*.ex" 6 | # locate via Path 7 | |> Path.wildcard() 8 | # roughly sort by most nested 9 | |> Enum.sort_by(&String.length/1) 10 | # nested first 11 | |> Enum.reverse() 12 | # only attempt to import files 13 | |> Enum.filter(&(!File.dir?(&1))) 14 | # load each found module via Code 15 | |> Enum.each(&Code.require_file/1) 16 | 17 | # start ExUnit with skips 18 | ExUnit.start(exclude: [:skip]) 19 | --------------------------------------------------------------------------------