├── .pongo └── pongorc ├── .busted ├── .gitignore ├── .editorconfig ├── .luacheckrc ├── spec ├── kong_tests.conf ├── 01-schema_spec.lua ├── 05-cache_key_spec.lua ├── 04-invalidations_spec.lua ├── 03-api_spec.lua └── 02-access_spec.lua ├── kong-proxy-cache-redis-plugin-2.0.1-0.rockspec ├── .travis.yml ├── kong └── plugins │ └── proxy-cache-redis │ ├── api.lua │ ├── schema.lua │ ├── cache_key.lua │ ├── redis.lua │ └── handler.lua ├── README.md └── LICENSE /.pongo/pongorc: -------------------------------------------------------------------------------- 1 | --postgres 2 | --cassandra 3 | -------------------------------------------------------------------------------- /.busted: -------------------------------------------------------------------------------- 1 | return { 2 | default = { 3 | verbose = true, 4 | coverage = false, 5 | output = "gtest", 6 | }, 7 | } 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # servroot is typically the nginx/Kong workingdirectory when testing 2 | servroot 3 | 4 | # packed distribution format for LuaRocks 5 | *.rock 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | trim_trailing_whitespace = true 7 | charset = utf-8 8 | 9 | [*.lua] 10 | indent_style = space 11 | indent_size = 2 12 | 13 | [kong/templates/nginx*] 14 | indent_style = space 15 | indent_size = 4 16 | 17 | [*.template] 18 | indent_style = space 19 | indent_size = 4 20 | 21 | [Makefile] 22 | indent_style = tab 23 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | std = "ngx_lua" 2 | unused_args = false 3 | redefined = false 4 | max_line_length = false 5 | 6 | 7 | globals = { 8 | "_KONG", 9 | "kong", 10 | "ngx.IS_CLI", 11 | } 12 | 13 | 14 | not_globals = { 15 | "string.len", 16 | "table.getn", 17 | } 18 | 19 | 20 | ignore = { 21 | "6.", -- ignore whitespace warnings 22 | } 23 | 24 | 25 | exclude_files = { 26 | --"spec/fixtures/invalid-module.lua", 27 | --"spec-old-api/fixtures/invalid-module.lua", 28 | } 29 | 30 | 31 | files["spec/**/*.lua"] = { 32 | std = "ngx_lua+busted", 33 | } 34 | -------------------------------------------------------------------------------- /spec/kong_tests.conf: -------------------------------------------------------------------------------- 1 | # 1st digit is 9 for our test instances 2 | admin_listen = 127.0.0.1:9001 3 | proxy_listen = 0.0.0.0:9000, 0.0.0.0:9443 ssl 4 | stream_listen = off 5 | 6 | ssl_cert = spec/fixtures/kong_spec.crt 7 | ssl_cert_key = spec/fixtures/kong_spec.key 8 | 9 | admin_ssl_cert = spec/fixtures/kong_spec.crt 10 | admin_ssl_cert_key = spec/fixtures/kong_spec.key 11 | 12 | dns_resolver = 8.8.8.8 13 | database = postgres 14 | pg_host = 127.0.0.1 15 | pg_port = 5432 16 | pg_timeout = 10000 17 | pg_database = kong_tests 18 | cassandra_keyspace = kong_tests 19 | cassandra_timeout = 10000 20 | anonymous_reports = off 21 | 22 | dns_hostsfile = spec/fixtures/hosts 23 | 24 | nginx_worker_processes = 1 25 | nginx_optimizations = off 26 | 27 | plugins=bundled,dummy,rewriter 28 | custom_plugins = proxy-cache-redis 29 | 30 | prefix = servroot 31 | log_level = debug 32 | lua_package_path=./spec/fixtures/custom_plugins/?.lua 33 | -------------------------------------------------------------------------------- /kong-proxy-cache-redis-plugin-2.0.1-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "kong-proxy-cache-redis-plugin" 2 | version = "2.0.1-0" 3 | 4 | source = { 5 | url = "git://github.com/ligreman/kong-proxy-cache-redis-plugin" 6 | } 7 | 8 | supported_platforms = {"linux", "macosx"} 9 | 10 | description = { 11 | summary = "HTTP Redis Proxy Caching for Kong", 12 | license = "Apache 2.0", 13 | } 14 | 15 | dependencies = { 16 | "lua >= 5.1", 17 | } 18 | 19 | build = { 20 | type = "builtin", 21 | modules = { 22 | ["kong.plugins.proxy-cache-redis.handler"] = "kong/plugins/proxy-cache-redis/handler.lua", 23 | ["kong.plugins.proxy-cache-redis.cache_key"] = "kong/plugins/proxy-cache-redis/cache_key.lua", 24 | ["kong.plugins.proxy-cache-redis.schema"] = "kong/plugins/proxy-cache-redis/schema.lua", 25 | ["kong.plugins.proxy-cache-redis.api"] = "kong/plugins/proxy-cache-redis/api.lua", 26 | ["kong.plugins.proxy-cache-redis.redis"] = "kong/plugins/proxy-cache-redis/redis.lua", 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: bionic 2 | 3 | jobs: 4 | include: 5 | - name: Kong CE 2.0.x 6 | env: KONG_VERSION=2.0.x 7 | - name: Nightly EE-master 8 | env: KONG_VERSION=nightly-ee POSTGRES=latest CASSANDRA=latest 9 | #- name: Nightly CE-master 10 | # env: KONG_VERSION=nightly POSTGRES=latest CASSANDRA=latest 11 | 12 | env: 13 | global: 14 | # for Enterprise images 15 | - BINTRAY_USERNAME=kong-automation@kong 16 | - BINTRAY_REPO=kong 17 | - secure: FInjXOxflcTleTRu+fk/51rjkwV8U7rbzr/I8ApEe5IV6QqnpAlQcFkf5Qq31p34xgDOpqdT8+N6xrVdGtD1BhGobT/lzYiqg1sNWRJo/Wacxyj9E4oZxhwfZbG9ymKtyGxAn2eBce5AF8mkXkxXjwpMGmF5cQfZFh5PuaC7CQqUHIlI0hVp5lcnQDb61lI/vXUzXuq0sx8cW5GOBxbWHfCDO4OWHrWKuPQHy41IVy2E0LRn5fWDahfLL6IkvFgvME1pFxQfgDSs4qgrZQzvJAkGTSZ7FIVmAimEV89/BS5ZFkkWm6zj8vv5BELd+wuoipYO2E/126TEVrjsmmyrEiw3Ga7M7eUIcPwy9ozr/tHmzaKM5PK5GpdbwUtGXS2WIu+RzmmZC7sfX5mK+NPOV7h52G5j+h4PNOLXi9eRD0DLs7e/TqoJ6yep/fDql5aAN/kQmyJC/eDhpPy99ttiA/M0sxBLZQ5aN/6Rv+Y/0oKDYk7Ojpo1GZrwQZRF4tueRRMup8F8UHq3FN1oOc+XBzKYIiDdyWNoROVeLIXzVkHaxvmx/wEZagAOSTwR6lNsSrzqX12XOfWBjsnq/9yBBrASK/OKLAo+o2Mfc2XqGdbEH2i7mrfAPFol1YH6BW9LLH0P4WN61BDAeyl22NbYtZBHEQo+N0qDn6yaa74LnCA= 18 | # for Nightly Enterprise images 19 | - NIGHTLY_EE_USER=kong-automation@kong 20 | - secure: HtfwBLd/EDGAz8vm1zz5CDCzD21xr/jOF8zQwnZ36XdLU2qASJX+RU+Mq8/lElZ6kjTmMYMPoi2Xp09TBvRJlj7nbJ5ckcc08mlUbjM0zRP2qCU8q/ZqLzTTOcaqF0RgmtCe5nMjpfF0bZJAIZmjbc51/LYm10rVuqWBtrogaAdHCqFXD9iYAD0DOEuJrZ81KaeiLIjjrw2uLRuzniK8Onax3/b8prQFkxN2Q2YxPyHP7UPfe24DVREmQQwKghBWlkvMrb6oZ+/jRsEIXOokUwAqQiEGJaVB6HfgmxgIGCYTKHU4grjzIt+AOuNJz4QaIAjlWM9817FNgV0IqKkiCYUAgtjNYahXHToWSP6eL6lrEiaPtrsrGs5FG3CAeIXBiCqj8hyXds64noFcnnyimrGGi2rOSXF7OLzbGFmvZUrESMXnTAM4XnRB36RyfTd03EuQ7EL839nWZ9Ptn/eJVpRNLAOA5YVMgobXR+i6/HpK6aoGJ8CReV9iiUKf4enS9h9gZVjjWq5jc/L+dtiIkHbqp0AsFDllyOZrgKVB0KusOWConXHhbUOlSG3+iX7muN3FConDJB5P3S6g2j67wZVhtKfJB/15kdEz/qTY/hNsuJzZqTVnPbb33gQFDddKHGNPY59hVu1Y8QQZam8LPf3F5ZZYxVnB5TvBMiCQIoA= 21 | 22 | install: 23 | - git clone --single-branch https://github.com/Kong/kong-pongo ../kong-pongo 24 | - "../kong-pongo/pongo.sh up" 25 | - "../kong-pongo/pongo.sh build" 26 | 27 | script: 28 | - "../kong-pongo/pongo.sh lint" 29 | - "../kong-pongo/pongo.sh run" 30 | 31 | notifications: 32 | slack: 33 | if: branch = master AND type != pull_request 34 | on_success: change 35 | on_failure: always 36 | rooms: 37 | secure: T6F7yqSoaYd9umahw2ATntEVaVJrrZVxkxmZAhdczlV5WYqTstRJay+sw11wGfR5wHTGRhOJ3NJdD2JQwUA2eMFWhL3u3g8/fw1cfnJ9fx/8ezsknzK9cpl2Xtv5Md22C8FzWyzZsEmnMzqkQBEyWvEq6z+9fhnESgbs+uXL1HbxOVTtL8RPsr3gzu1W5rbwgXrAdC6FN6lEMmKiL4BuIL4oHfqYz4sLAQFx8jZ5BwD7mSPOkoM82pGjI/bKsQcgP/vYdkK4Y0K/6D1cEPwEsTd2PA0nuAbRXunICn6hkjvHzCaH3VN/Jep5b6z1UxWULRRphiIE3PZgUL1h/eDnp2hjjptjfQefKgrF/fXZtxX0ssDTgZjQi0io8IWU4y1hIs6cB1Lm+Zw4Xrp+IB/RoTxT1hCFznAjKTLFLXMqxZrGoP6+Mi3cJTuFjMFVwVe+OcrWVs4I8D/XgsCXI41v8Xv6iuSbYuMIzRy1r+85GVZS/5/y5hykC3/dpSYXjnQofKZW1rJWLVfL3iuSlobb0JIJ3HRpuCdQ1aeaogMl5e9fLbARFWI7/s7+hlJRVKj7YbLLQNdaqqIRUd+GS4cK/IDcoxLFFMwWid9+V+6po587dQNWlnmYqDrCWGcAgLgLr+6lTHispiUuGZgAFvvNdqkH2b4iEwGpvV1R0wZl+cM= 38 | -------------------------------------------------------------------------------- /kong/plugins/proxy-cache-redis/api.lua: -------------------------------------------------------------------------------- 1 | local redis = require "kong.plugins.proxy-cache-redis.redis" 2 | local kong = kong 3 | 4 | return { 5 | ["/plugins/:plugin_id/proxy-cache-redis"] = { 6 | 7 | DELETE = function(self) 8 | -- Busco el plugin 9 | local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) 10 | 11 | if errp then 12 | kong.log.err("Error retrieving the plugin: " .. errp) 13 | return nil 14 | end 15 | 16 | if not plugin then 17 | kong.log.err("Could not find plugin.") 18 | return nil 19 | end 20 | 21 | local ok, err = redis:flush(plugin.config) 22 | if not ok then 23 | return kong.response.exit(500, { message = err }) 24 | end 25 | 26 | return kong.response.exit(204) 27 | end 28 | }, 29 | ["/plugins/:plugin_id/proxy-cache-redis/:cache_key"] = { 30 | 31 | GET = function(self) 32 | -- Busco el plugin 33 | local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) 34 | 35 | if errp then 36 | kong.log.err("Error retrieving the plugin: " .. errp) 37 | return nil 38 | end 39 | 40 | if not plugin then 41 | kong.log.err("Could not find plugin.") 42 | return nil 43 | end 44 | 45 | local cache_val, err = redis:fetch(plugin.config, self.params.cache_key) 46 | if err and err ~= "request object not in cache" then 47 | return kong.response.exit(500, err) 48 | end 49 | 50 | if cache_val then 51 | return kong.response.exit(200, cache_val) 52 | end 53 | 54 | -- fell through, not found 55 | return kong.response.exit(404) 56 | end, 57 | 58 | DELETE = function(self) 59 | -- Busco el plugin 60 | local plugin, errp = kong.db.plugins:select({ id = self.params.plugin_id }) 61 | 62 | if errp then 63 | kong.log.err("Error retrieving the plugin: " .. errp) 64 | return nil 65 | end 66 | 67 | if not plugin then 68 | kong.log.err("Could not find plugin.") 69 | return nil 70 | end 71 | 72 | local cache_val, err = redis:fetch(plugin.config, self.params.cache_key) 73 | if err and err ~= "request object not in cache" then 74 | return kong.response.exit(500, err) 75 | end 76 | 77 | if cache_val then 78 | local _, err2 = redis:purge(plugin.config, self.params.cache_key) 79 | if err2 then 80 | return kong.response.exit(500, err2) 81 | end 82 | 83 | return kong.response.exit(204) 84 | end 85 | 86 | -- fell through, not found 87 | return kong.response.exit(404) 88 | end, 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /kong/plugins/proxy-cache-redis/schema.lua: -------------------------------------------------------------------------------- 1 | return { 2 | name = "proxy-cache-redis", 3 | fields = { 4 | { config = { 5 | type = "record", 6 | fields = { 7 | { response_code = { 8 | type = "array", 9 | default = { 200, 301, 404 }, 10 | elements = { type = "integer", between = { 100, 900 } }, 11 | len_min = 1, 12 | required = true, 13 | } }, 14 | { request_method = { 15 | type = "array", 16 | default = { "GET", "HEAD" }, 17 | elements = { 18 | type = "string", 19 | one_of = { "HEAD", "GET", "POST", "PATCH", "PUT" }, 20 | }, 21 | required = true 22 | } }, 23 | { allow_force_cache_header = { 24 | type = "boolean", 25 | default = false, 26 | required = true, 27 | } }, 28 | { content_type = { 29 | type = "array", 30 | default = { "text/plain", "application/json" }, 31 | elements = { type = "string" }, 32 | required = true, 33 | } }, 34 | { cache_ttl = { 35 | type = "integer", 36 | default = 300, 37 | required = true, 38 | gt = 0, 39 | } }, 40 | { cache_control = { 41 | type = "boolean", 42 | default = false, 43 | required = true, 44 | } }, 45 | { storage_ttl = { 46 | type = "integer", 47 | gt = 0, 48 | } }, 49 | { vary_query_params = { 50 | type = "array", 51 | elements = { type = "string" }, 52 | } }, 53 | { vary_headers = { 54 | type = "array", 55 | elements = { type = "string" }, 56 | } }, 57 | { vary_body_json_fields = { 58 | type = "array", 59 | elements = { type = "string" }, 60 | } }, 61 | { redis_host = { 62 | type = "string", 63 | required = true, 64 | } }, 65 | { redis_port = { 66 | between = { 0, 65535 }, 67 | type = "integer", 68 | default = 6379, 69 | } }, 70 | { redis_password = { 71 | type = "string", 72 | len_min = 0, 73 | } }, 74 | { redis_timeout = { 75 | type = "number", 76 | default = 2000, 77 | } }, 78 | { redis_database = { 79 | type = "integer", 80 | default = 0, 81 | } }, 82 | }, 83 | } 84 | }, 85 | }, 86 | 87 | entity_checks = { 88 | }, 89 | } 90 | -------------------------------------------------------------------------------- /spec/01-schema_spec.lua: -------------------------------------------------------------------------------- 1 | local proxy_cache_schema = require "kong.plugins.proxy-cache-redis.schema" 2 | local v = require("spec.helpers").validate_plugin_config_schema 3 | 4 | describe("proxy-cache-redis schema", function() 5 | it("accepts a minimal config", function() 6 | local entity, err = v({ 7 | strategy = "memory", 8 | }, proxy_cache_schema) 9 | 10 | assert.is_nil(err) 11 | assert.is_truthy(entity) 12 | end) 13 | 14 | it("defines default content-type values", function() 15 | local config = {strategy = "memory"} 16 | local entity, err = v(config, proxy_cache_schema) 17 | assert.is_nil(err) 18 | assert.is_truthy(entity) 19 | assert.same(entity.config.content_type, {"text/plain", "application/json"}) 20 | end) 21 | 22 | it("accepts a config with custom values", function() 23 | local entity, err = v({ 24 | strategy = "memory", 25 | response_code = { 200, 301 }, 26 | request_method = { "GET" }, 27 | content_type = { "application/json" }, 28 | }, proxy_cache_schema) 29 | 30 | assert.is_nil(err) 31 | assert.is_truthy(entity) 32 | end) 33 | 34 | it("accepts an array of numbers as strings", function() 35 | local entity, err = v({ 36 | strategy = "memory", 37 | response_code = {123, 200}, 38 | }, proxy_cache_schema) 39 | 40 | assert.is_nil(err) 41 | assert.is_truthy(entity) 42 | end) 43 | 44 | it("errors with invalid response_code", function() 45 | local entity, err = v({ 46 | strategy = "memory", 47 | response_code = { 99 }, 48 | }, proxy_cache_schema) 49 | 50 | assert.same({ "value should be between 100 and 900" }, err.config.response_code) 51 | assert.is_falsy(entity) 52 | end) 53 | 54 | it("errors if response_code is an empty array", function() 55 | local entity, err = v({ 56 | strategy = "memory", 57 | response_code = {}, 58 | }, proxy_cache_schema) 59 | 60 | assert.same("length must be at least 1", err.config.response_code) 61 | assert.is_falsy(entity) 62 | end) 63 | 64 | it("errors if response_code is a string", function() 65 | local entity, err = v({ 66 | strategy = "memory", 67 | response_code = "", 68 | }, proxy_cache_schema) 69 | 70 | assert.same("expected an array", err.config.response_code) 71 | assert.is_falsy(entity) 72 | end) 73 | 74 | it("errors if response_code has non-numeric values", function() 75 | local entity, err = v({ 76 | strategy = "memory", 77 | response_code = {true, "alo", 123}, 78 | }, proxy_cache_schema) 79 | 80 | assert.same({ "expected an integer", "expected an integer" }, 81 | err.config.response_code) 82 | assert.is_falsy(entity) 83 | end) 84 | 85 | it("errors if response_code has float value", function() 86 | local entity, err = v({ 87 | strategy = "memory", 88 | response_code = {123.5}, 89 | }, proxy_cache_schema) 90 | 91 | assert.same({ "expected an integer" }, err.config.response_code) 92 | assert.is_falsy(entity) 93 | end) 94 | 95 | it("errors with invalid ttl", function() 96 | local entity, err = v({ 97 | strategy = "memory", 98 | cache_ttl = -1 99 | }, proxy_cache_schema) 100 | 101 | assert.same("value must be greater than 0", err.config.cache_ttl) 102 | assert.is_falsy(entity) 103 | end) 104 | 105 | it("supports vary_query_params values", function() 106 | local entity, err = v({ 107 | strategy = "memory", 108 | vary_query_params = { "foo" }, 109 | }, proxy_cache_schema) 110 | 111 | assert.is_nil(err) 112 | assert.is_truthy(entity) 113 | end) 114 | 115 | it("supports vary_headers values", function() 116 | local entity, err = v({ 117 | strategy = "memory", 118 | vary_headers = { "foo" }, 119 | }, proxy_cache_schema) 120 | 121 | assert.is_nil(err) 122 | assert.is_truthy(entity) 123 | end) 124 | end) 125 | -------------------------------------------------------------------------------- /kong/plugins/proxy-cache-redis/cache_key.lua: -------------------------------------------------------------------------------- 1 | local fmt = string.format 2 | local ipairs = ipairs 3 | local md5 = ngx.md5 4 | local type = type 5 | local pairs = pairs 6 | local sort = table.sort 7 | local insert = table.insert 8 | local concat = table.concat 9 | 10 | local _M = {} 11 | 12 | local EMPTY = {} 13 | 14 | local function keys(t) 15 | local res = {} 16 | for k, _ in pairs(t) do 17 | res[#res + 1] = k 18 | end 19 | 20 | return res 21 | end 22 | 23 | 24 | -- Return a string with the format "key=value(:key=value)*" of the 25 | -- actual keys and values in args that are in vary_fields. 26 | -- 27 | -- The elements are sorted so we get consistent cache actual_keys no matter 28 | -- the order in which params came in the request 29 | -- 30 | -- is_json: boolean that indicates that args are a JSON object (converted into a lua table) 31 | local function generate_key_from(args, vary_fields, is_json) 32 | local cache_key = {} 33 | 34 | for _, field in ipairs(vary_fields or {}) do 35 | local arg = args[field] 36 | if arg then 37 | if is_json == true and type(arg) == "table" then 38 | insert(cache_key, field .. "=" .. "#") 39 | 40 | elseif type(arg) == "table" then 41 | sort(arg) 42 | insert(cache_key, field .. "=" .. concat(arg, ",")) 43 | 44 | elseif arg == true then 45 | insert(cache_key, field) 46 | 47 | else 48 | insert(cache_key, field .. "=" .. tostring(arg)) 49 | end 50 | end 51 | end 52 | 53 | return concat(cache_key, ":") 54 | end 55 | 56 | 57 | -- Return the component of cache_key for vary_query_params in params 58 | -- 59 | -- If no vary_query_params are configured in the plugin, return 60 | -- all of them. 61 | local function params_key(params, plugin_config) 62 | if not (plugin_config.vary_query_params or EMPTY)[1] then 63 | local actual_keys = keys(params) 64 | sort(actual_keys) 65 | return generate_key_from(params, actual_keys, false) 66 | end 67 | 68 | return generate_key_from(params, plugin_config.vary_query_params, false) 69 | end 70 | _M.params_key = params_key 71 | 72 | 73 | -- Return the component of cache_key for vary_headers in params 74 | -- 75 | -- If no vary_headers are configured in the plugin, return 76 | -- the empty string. 77 | local function headers_key(headers, plugin_config) 78 | if not (plugin_config.vary_headers or EMPTY)[1] then 79 | return "" 80 | end 81 | 82 | return generate_key_from(headers, plugin_config.vary_headers, false) 83 | end 84 | _M.headers_key = headers_key 85 | 86 | 87 | -- Return the component of cache_key for vary_body_json_fields in params 88 | -- 89 | -- If no vary_body_json_fields are configured in the plugin, return 90 | -- the empty string. 91 | local function json_body_key(json_body, plugin_config) 92 | if not (plugin_config.vary_body_json_fields or EMPTY)[1] then 93 | return "" 94 | end 95 | 96 | return generate_key_from(json_body, plugin_config.vary_body_json_fields, true) 97 | end 98 | _M.json_body_key = json_body_key 99 | 100 | 101 | local function prefix_uuid(consumer_id, route_id) 102 | 103 | -- authenticated route 104 | if consumer_id and route_id then 105 | return fmt("%s:%s", consumer_id, route_id) 106 | end 107 | 108 | -- unauthenticated route 109 | if route_id then 110 | return route_id 111 | end 112 | 113 | -- global default 114 | return "default" 115 | end 116 | _M.prefix_uuid = prefix_uuid 117 | 118 | 119 | function _M.build_cache_key(consumer_id, route_id, method, uri, params_table, headers_table, json_body_table, conf) 120 | 121 | -- obtain cache key components 122 | local prefix_digest = prefix_uuid(consumer_id, route_id) 123 | local params_digest = params_key(params_table, conf) 124 | local headers_digest = headers_key(headers_table, conf) 125 | local json_body_digest = json_body_key(json_body_table, conf) 126 | 127 | return md5(fmt("%s|%s|%s|%s|%s|%s", prefix_digest, method, uri, params_digest, headers_digest, json_body_digest)) 128 | end 129 | 130 | return _M 131 | -------------------------------------------------------------------------------- /spec/05-cache_key_spec.lua: -------------------------------------------------------------------------------- 1 | local utils = require "kong.tools.utils" 2 | local key_utils = require "kong.plugins.proxy-cache-redis.cache_key" 3 | 4 | 5 | describe("prefix_uuid", function() 6 | local consumer1_uuid = utils.uuid() 7 | local consumer2_uuid = utils.uuid() 8 | local route1_uuid = utils.uuid() 9 | local route2_uuid = utils.uuid() 10 | 11 | it("returns distinct prefixes for a consumer on different routes", function() 12 | local prefix1 = assert(key_utils.prefix_uuid(consumer1_uuid, route1_uuid)) 13 | local prefix2 = assert(key_utils.prefix_uuid(consumer1_uuid, route2_uuid)) 14 | 15 | assert.not_equal(prefix1, prefix2) 16 | assert.not_equal("default", prefix1) 17 | assert.not_equal("default", prefix2) 18 | end) 19 | 20 | it("returns distinct prefixes for different consumers on a route", function() 21 | local prefix1 = assert(key_utils.prefix_uuid(consumer1_uuid, route1_uuid)) 22 | local prefix2 = assert(key_utils.prefix_uuid(consumer2_uuid, route1_uuid)) 23 | 24 | assert.not_equal(prefix1, prefix2) 25 | assert.not_equal("default", prefix1) 26 | assert.not_equal("default", prefix2) 27 | end) 28 | 29 | it("returns the same prefix for a route with no consumer", function() 30 | local prefix1 = assert(key_utils.prefix_uuid(nil, route1_uuid)) 31 | local prefix2 = assert(key_utils.prefix_uuid(nil, route1_uuid)) 32 | 33 | assert.equal(prefix1, prefix2) 34 | assert.not_equal("default", prefix1) 35 | end) 36 | 37 | it("returns a consumer-specific prefix for routes", function() 38 | local prefix1 = assert(key_utils.prefix_uuid(nil, route1_uuid)) 39 | local prefix2 = assert(key_utils.prefix_uuid(consumer1_uuid, route1_uuid)) 40 | 41 | assert.not_equal(prefix1, prefix2) 42 | end) 43 | 44 | describe("returns 'default' if", function() 45 | it("no consumer_id, api_id, or route_id was given", function() 46 | assert.equal("default", key_utils.prefix_uuid()) 47 | end) 48 | it("only consumer_id was given", function() 49 | assert.equal("default", key_utils.prefix_uuid(consumer1_uuid)) 50 | end) 51 | end) 52 | 53 | describe("does not return 'default' if", function() 54 | it("route_id is non-nil", function() 55 | assert.not_equal("default", key_utils.prefix_uuid(nil, route1_uuid)) 56 | end) 57 | end) 58 | end) 59 | 60 | describe("params_key", function() 61 | it("defaults to all", function() 62 | assert.equal("a=1:b=2", key_utils.params_key({a = 1, b = 2},{})) 63 | end) 64 | 65 | it("empty query_string returns empty", function() 66 | assert.equal("", key_utils.params_key({},{})) 67 | end) 68 | 69 | it("empty query_string returns empty with vary query_params", function() 70 | assert.equal("", key_utils.params_key({},{"a"})) 71 | end) 72 | 73 | it("sorts the arguments", function() 74 | for i = 1, 100 do 75 | local s1 = "a" .. utils.random_string() 76 | local s2 = "b" .. utils.random_string() 77 | assert.equal(s1.."=1:".. s2 .. "=2", key_utils.params_key({[s2] = 2, [s1] = 1},{})) 78 | end 79 | end) 80 | 81 | it("uses only params specified in vary", function() 82 | assert.equal("a=1", key_utils.params_key({a = 1, b = 2}, 83 | {vary_query_params = {"a"}})) 84 | end) 85 | 86 | it("deals with multiple params with same name", function() 87 | assert.equal("a=1,2", key_utils.params_key({a = {1, 2}}, 88 | {vary_query_params = {"a"}})) 89 | end) 90 | 91 | it("deals with multiple params with same name and sorts", function() 92 | assert.equal("a=1,2", key_utils.params_key({a = {2, 1}}, 93 | {vary_query_params = {"a"}})) 94 | end) 95 | 96 | it("discards params in config that are not in the request", function() 97 | assert.equal("a=1,2:b=2", key_utils.params_key({a = {1, 2}, b = 2}, 98 | {vary_query_params = {"a", "b", "c"}})) 99 | end) 100 | end) 101 | 102 | describe("headers_key", function() 103 | it("defaults to none", function() 104 | assert.equal("", key_utils.headers_key({a = 1, b = 2},{})) 105 | end) 106 | 107 | it("sorts the arguments", function() 108 | for i = 1, 100 do 109 | local s1 = "a" .. utils.random_string() 110 | local s2 = "b" .. utils.random_string() 111 | assert.equal(s1.."=1:".. s2 .. "=2", key_utils.params_key({[s2] = 2, [s1] = 1}, 112 | {vary_headers = {"a", "b"}})) 113 | end 114 | end) 115 | 116 | it("uses only params specified in vary", function() 117 | assert.equal("a=1", key_utils.headers_key({a = 1, b = 2}, 118 | {vary_headers = {"a"}})) 119 | end) 120 | 121 | it("deals with multiple params with same name", function() 122 | assert.equal("a=1,2", key_utils.headers_key({a = {1, 2}}, 123 | {vary_headers = {"a"}})) 124 | end) 125 | 126 | it("deals with multiple params with same name and sorts", function() 127 | assert.equal("a=1,2", key_utils.headers_key({a = {2, 1}}, 128 | {vary_headers = {"a"}})) 129 | end) 130 | 131 | it("discards params in config that are not in the request", function() 132 | assert.equal("a=1,2:b=2", key_utils.headers_key({a = {1, 2}, b = 2}, 133 | {vary_headers = {"a", "b", "c"}})) 134 | end) 135 | end) 136 | -------------------------------------------------------------------------------- /kong/plugins/proxy-cache-redis/redis.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson.safe" 2 | local redis = require "resty.redis" 3 | 4 | local ngx = ngx 5 | local type = type 6 | 7 | local function is_present(str) 8 | return str and str ~= "" and str ~= null 9 | end 10 | 11 | local _M = {} 12 | 13 | -- Conecta a redis 14 | local function red_connect(opts) 15 | local red, err_redis = redis:new() 16 | 17 | if err_redis then 18 | kong.log.err("error connecting to Redis: ", err_redis); 19 | return nil, err_redis 20 | end 21 | 22 | local redis_opts = {} 23 | -- use a special pool name only if database is set to non-zero 24 | -- otherwise use the default pool name host:port 25 | redis_opts.pool = opts.redis_database and opts.redis_host .. ":" .. opts.redis_port .. ":" .. opts.redis_database 26 | 27 | red:set_timeout(opts.redis_timeout) 28 | 29 | -- conecto 30 | local ok, err = red:connect(opts.redis_host, opts.redis_port, redis_opts) 31 | if not ok then 32 | kong.log.err("failed to connect to Redis: ", err) 33 | return nil, err 34 | end 35 | 36 | local times, err2 = red:get_reused_times() 37 | if err2 then 38 | kong.log.err("failed to get connect reused times: ", err2) 39 | return nil, err 40 | end 41 | 42 | if times == 0 then 43 | if is_present(opts.redis_password) then 44 | local ok3, err3 = red:auth(opts.redis_password) 45 | if not ok3 then 46 | kong.log.err("failed to auth Redis: ", err3) 47 | return nil, err 48 | end 49 | end 50 | 51 | if opts.redis_database ~= 0 then 52 | -- Only call select first time, since we know the connection is shared 53 | -- between instances that use the same redis database 54 | local ok4, err4 = red:select(opts.redis_database) 55 | if not ok4 then 56 | kong.log.err("failed to change Redis database: ", err4) 57 | return nil, err 58 | end 59 | end 60 | end 61 | 62 | return red 63 | end 64 | 65 | -- Obtiene un dato de Redis 66 | function _M:fetch(conf, key) 67 | local red, err_redis = red_connect(conf) 68 | 69 | -- Compruebo si he conectado a Redis bien 70 | if not red then 71 | kong.log.err("failed to get the Redis connection: ", err_redis) 72 | return nil, "there is no Redis connection established" 73 | end 74 | 75 | if type(key) ~= "string" then 76 | return nil, "key must be a string" 77 | end 78 | 79 | -- retrieve object from shared dict 80 | local req_json, err = red:get(key) 81 | if req_json == ngx.null then 82 | if not err then 83 | -- devuelvo nulo pero diciendo que no está en la caché, no que haya habido error realmente 84 | -- habrá que guardar la respuesta entonces 85 | return nil, "request object not in cache" 86 | else 87 | return nil, err 88 | end 89 | end 90 | 91 | local ok, err2 = red:set_keepalive(10000, 100) 92 | if not ok then 93 | kong.log.err("failed to set Redis keepalive: ", err2) 94 | return nil, err2 95 | end 96 | 97 | -- decode object from JSON to table 98 | local req_obj = cjson.decode(req_json) 99 | 100 | if not req_obj then 101 | return nil, "could not decode request object" 102 | end 103 | 104 | return req_obj 105 | end 106 | 107 | -- Guarda un dato en Redis 108 | function _M:store(conf, key, req_obj, req_ttl) 109 | local red, err_redis = red_connect(conf) 110 | 111 | -- Compruebo si he conectado a Redis bien 112 | if not red then 113 | kong.log.err("failed to get the Redis connection: ", err_redis) 114 | return nil, "there is no Redis connection established" 115 | end 116 | 117 | local ttl = req_ttl or conf.cache_ttl 118 | 119 | if type(key) ~= "string" then 120 | return nil, "key must be a string" 121 | end 122 | 123 | -- encode request table representation as JSON 124 | local req_json = cjson.encode(req_obj) 125 | if not req_json then 126 | return nil, "could not encode request object" 127 | end 128 | 129 | -- Hago efectivo el guardado 130 | -- inicio la transacción 131 | red:init_pipeline() 132 | -- guardo 133 | red:set(key, req_json) 134 | -- TTL 135 | red:expire(key, ttl) 136 | 137 | -- ejecuto la transacción 138 | local _, err = red:commit_pipeline() 139 | if err then 140 | kong.log.err("failed to commit the cache value to Redis: ", err) 141 | return nil, err 142 | end 143 | 144 | -- keepalive de la conexión: max_timeout, connection pool 145 | local ok, err2 = red:set_keepalive(10000, 100) 146 | if not ok then 147 | kong.log.err("failed to set Redis keepalive: ", err2) 148 | return nil, err2 149 | end 150 | 151 | return true and req_json or nil, err 152 | end 153 | 154 | 155 | -- Elimina una clave 156 | function _M:purge(conf, key) 157 | local red, err_redis = red_connect(conf) 158 | 159 | -- Compruebo si he conectado a Redis bien 160 | if not red then 161 | kong.log.err("failed to get the Redis connection: ", err_redis) 162 | return nil, "there is no Redis connection established" 163 | end 164 | 165 | if type(key) ~= "string" then 166 | return nil, "key must be a string" 167 | end 168 | 169 | -- borro entrada de redis 170 | local deleted, err = red:del(key) 171 | if err then 172 | kong.log.err("failed to delete the key from Redis: ", err) 173 | return nil, err 174 | end 175 | 176 | local ok, err2 = red:set_keepalive(10000, 100) 177 | if not ok then 178 | kong.log.err("failed to set Redis keepalive: ", err2) 179 | return nil, err2 180 | end 181 | 182 | return true 183 | end 184 | 185 | -- Elimina todas las entradas de la base de datos 186 | function _M:flush(conf) 187 | local red, err_redis = red_connect(conf) 188 | 189 | -- Compruebo si he conectado a Redis bien 190 | if not red then 191 | kong.log.err("failed to get the Redis connection: ", err_redis) 192 | return nil, "there is no Redis connection established" 193 | end 194 | 195 | -- aquí borro toda la cache de redis de forma asíncrona 196 | local flushed, err = red:flushdb("async") 197 | if err then 198 | kong.log.err("failed to flush the database from Redis: ", err) 199 | return nil, err 200 | end 201 | 202 | local ok, err2 = red:set_keepalive(10000, 100) 203 | if not ok then 204 | kong.log.err("failed to set Redis keepalive: ", err2) 205 | return nil, err2 206 | end 207 | 208 | return true 209 | end 210 | 211 | return _M 212 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kong proxy-cache-redis plugin 2 | 3 | HTTP Proxy Redis Caching for Kong 4 | 5 | ## Synopsis 6 | 7 | This plugin provides a reverse proxy cache implementation for Kong. It caches 8 | response entities based on configurable response code and content type, as 9 | well as request method. It can cache per-Consumer or per-API. Cache entities 10 | are stored for a configurable period of time, after which subsequent requests 11 | to the same resource will re-fetch and re-store the resource. Cache entities 12 | can also be forcefully purged via the Admin API prior to their expiration 13 | time. 14 | 15 | It caches all responses in a Redis server. 16 | 17 | ## Kong Plugin Priority 18 | 19 | This plugin has its priority modified to be executed before the rate-limit plungin. This way a request that is answered by the cache (a cache Hit), does not count in the rate-limit. 20 | 21 | The original proxy-cache plugin from Kong Hub has a priority of 101. This plugin has a 902 priority, right before the rate-limit that has 901. 22 | 23 | If you want a proxy-cache-redis plugin with the original priority so that it is executed after the rate-limit one, just modify the code and change the priority in handler.lua. 24 | 25 | ## Cache TTL 26 | 27 | TTL for serving the cached data. Kong sends a `X-Cache-Status` with value: 28 | 29 | - `Refresh` if the resource was found in cache, but could not satisfy the request, due to Cache-Control behaviors or reaching its hard-coded cache_ttl threshold. 30 | - `ByPass` if the request was not cacheable. 31 | - `Hit` if the request was cacheable, and a cached value was found and returned. 32 | - `Miss` if the request was cacheable, and no cached value was found. 33 | 34 | ## Storage TTL 35 | 36 | Kong can store resource entities in the storage engine longer than the prescribed cache_ttl or Cache-Control values indicate. This allows Kong to maintain a cached copy of a resource past its expiration. This allows clients capable of using max-age and max-stale headers to request stale copies of data if necessary. 37 | 38 | ## vary_body_json_fields 39 | 40 | The plugin allows to pass JSON field/properties names to be considered in the cache key generation process. Simple string or number fields can be used, and will be taken into account in the cache hey hash creation. 41 | 42 | ## allow_force_cache_header 43 | 44 | This boolean allows to force the cache mechanism even if the request method is not allowed to be cached by configuration. If the client sends a request with the header `X-Proxy-Cache-Redis-Force=true` then the request will bypass the method check, and can be elegible for caching. 45 | 46 | It also bypass the no-cache and no-store headers. The other checks, like response codes, will still be checked. 47 | 48 | ## Documentation 49 | 50 | The plugin works in the same way as the official `proxy-cache` plugin, in terms of the way it generates the cache key, or how to assign it to a service or route. [Documentation for the Proxy Cache plugin](https://docs.konghq.com/hub/kong-inc/proxy-cache/) 51 | 52 | ## Configuration 53 | 54 | |Parameter|Type|Required|Default|Description| 55 | |---|---|---|---|---| 56 | `name`|string|*required*| |The name of the plugin to use, in this case: `proxy-cache-redis` 57 | `service.id`|string|*optional*| |The ID of the Service the plugin targets. 58 | `route.id`|string|*optional*| |The ID of the Route the plugin targets. 59 | `consumer.id`|string|*optional*| |The ID of the Consumer the plugin targets. 60 | `enabled`|boolean|*optional*|true|Whether this plugin will be applied. 61 | `config.response_code`|array of integers|*required*|[200, 301, 404]|Upstream response status code considered cacheable. 62 | `config.request_method`|array of strings|*required*|["GET","HEAD"]|Downstream request methods considered cacheable. 63 | `config.allow_force_cache_header`|boolean|*required*|false|If true, clients can send the header "X-Proxy-Cache-Redis-Force" with value true, in order to force the request to be cached, even if its method is not among the request methods allowed to be cached. 64 | `config.content_type`|array of strings|*required*|["text/plain", "application/json"]|Upstream response content types considered cacheable. The plugin performs an exact match against each specified value; for example, if the upstream is expected to respond with an application/json; charset=utf-8 content-type, the plugin configuration must contain said value or a Bypass cache status is returned. 65 | `config.vary_headers`|array of strings|*optional*| |Relevant headers considered for the cache key. If undefined, none of the headers are taken into consideration. 66 | `config.vary_body_json_fields`|array of strings|*optional*| |Relevant JSON fields in the body of the request, to be considered for the cache key. If undefined, none of the fields in the body are taken into consideration. Note: only works on string or number fields, not on fields containing arrays or objects. 67 | `config.vary_query_params`|array of strings|*optional*| |Relevant query parameters considered for the cache key. If undefined, all params are taken into consideration. 68 | `config.cache_ttl`|integer|*required*|300|TTL, in seconds, of cache resources. May be overriden if `cache-control` is true and the client sends `s-maxage` or `max-age` in Cache-Control headers. 69 | `config.cache_control`|boolean|*required*|false|When enabled, respect the Cache-Control behaviors defined in RFC7234. It allows the use of the header Cache-Control with its values (no-store, no-cache, private, only-if-cached, max-age...). Read more info below. 70 | `config.storage_ttl`|integer|*required*| |Number of seconds to keep resources in the storage backend. This value is independent of cache_ttl or resource TTLs defined by Cache-Control behaviors. The resources may be stored for up to `storage_ttl` secs but served only for `cache_ttl`. 71 | `config.redis_host`|string|*required*| |The hostname or IP address of the redis server. 72 | `config.redis_port`|integer|*optional*|6379|The port of the redis server. 73 | `config.redis_timeout`|integer|*optional*|2000|The timeout in milliseconds for the redis connection. 74 | `config.redis_password`|string|*optional*| |The password (if required) to authenticate to the redis server. 75 | `config.redis_database`|string|*optional*|0|The Redis database to use for caching the resources. 76 | 77 | ## Cache-Control header 78 | 79 | When `cache-control` is true in the configuration of the plugin, it reads the following Cache-Control headers: 80 | 81 | - `no-cache` or `no-store` 82 | - This plugin manages both values the same way. A request with any (or both) of these Cache-Control header values, will not be cached or stored. 83 | - `private` 84 | - The response will not be cached, but the server may answer with a previously cached response. 85 | - `max-age=` 86 | - The maximum amount of time a resource is considered fresh. Unlike Expires, this directive is relative to the time of the request. 87 | - `max-stale[=]` 88 | - Indicates the client will accept a stale response. An optional value in seconds indicates the upper limit of staleness the client will accept. 89 | - `min-fresh=` 90 | - Indicates the client wants a response that will still be fresh for at least the specified number of seconds. 91 | - `only-if-cached` 92 | - Set by the client to indicate "do not use the network" for the response. The cache should either respond using a stored response, or respond with a 504 status code. 93 | 94 | 95 | 96 | Example of headers: 97 | ``` 98 | Cache-Control: max-age= 99 | Cache-Control: max-stale[=] 100 | Cache-Control: min-fresh= 101 | Cache-Control: no-cache 102 | Cache-Control: no-store 103 | ``` 104 | 105 | More info: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control 106 | -------------------------------------------------------------------------------- /spec/04-invalidations_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | 3 | 4 | local POLL_INTERVAL = 0.3 5 | 6 | 7 | for _, strategy in helpers.each_strategy() do 8 | describe("proxy-cache invalidations via: " .. strategy, function() 9 | 10 | local client_1 11 | local client_2 12 | local admin_client_1 13 | local admin_client_2 14 | local route1 15 | local route2 16 | local plugin1 17 | local plugin2 18 | local bp 19 | 20 | setup(function() 21 | bp = helpers.get_db_utils(strategy, nil, {"proxy-cache-redis"}) 22 | 23 | route1 = assert(bp.routes:insert { 24 | hosts = { "route-1.com" }, 25 | }) 26 | 27 | route2 = assert(bp.routes:insert { 28 | hosts = { "route-2.com" }, 29 | }) 30 | 31 | plugin1 = assert(bp.plugins:insert { 32 | name = "proxy-cache-redis", 33 | route = { id = route1.id }, 34 | config = { 35 | strategy = "memory", 36 | content_type = { "text/plain", "application/json" }, 37 | memory = { 38 | dictionary_name = "kong", 39 | }, 40 | }, 41 | }) 42 | 43 | plugin2 = assert(bp.plugins:insert { 44 | name = "proxy-cache-redis", 45 | route = { id = route2.id }, 46 | config = { 47 | strategy = "memory", 48 | content_type = { "text/plain", "application/json" }, 49 | memory = { 50 | dictionary_name = "kong", 51 | }, 52 | }, 53 | }) 54 | 55 | local db_update_propagation = strategy == "cassandra" and 3 or 0 56 | 57 | assert(helpers.start_kong { 58 | log_level = "debug", 59 | prefix = "servroot1", 60 | database = strategy, 61 | proxy_listen = "0.0.0.0:8000", 62 | proxy_listen_ssl = "0.0.0.0:8443", 63 | admin_listen = "0.0.0.0:8001", 64 | admin_gui_listen = "0.0.0.0:8002", 65 | admin_ssl = false, 66 | admin_gui_ssl = false, 67 | db_update_frequency = POLL_INTERVAL, 68 | db_update_propagation = db_update_propagation, 69 | plugins = "proxy-cache-redis", 70 | nginx_conf = "spec/fixtures/custom_nginx.template", 71 | }) 72 | 73 | assert(helpers.start_kong { 74 | log_level = "debug", 75 | prefix = "servroot2", 76 | database = strategy, 77 | proxy_listen = "0.0.0.0:9000", 78 | proxy_listen_ssl = "0.0.0.0:9443", 79 | admin_listen = "0.0.0.0:9001", 80 | admin_gui_listen = "0.0.0.0:9002", 81 | admin_ssl = false, 82 | admin_gui_ssl = false, 83 | db_update_frequency = POLL_INTERVAL, 84 | db_update_propagation = db_update_propagation, 85 | plugins = "proxy-cache-redis", 86 | }) 87 | 88 | client_1 = helpers.http_client("127.0.0.1", 8000) 89 | client_2 = helpers.http_client("127.0.0.1", 9000) 90 | admin_client_1 = helpers.http_client("127.0.0.1", 8001) 91 | admin_client_2 = helpers.http_client("127.0.0.1", 9001) 92 | end) 93 | 94 | teardown(function() 95 | helpers.stop_kong("servroot1", true) 96 | helpers.stop_kong("servroot2", true) 97 | end) 98 | 99 | before_each(function() 100 | client_1 = helpers.http_client("127.0.0.1", 8000) 101 | client_2 = helpers.http_client("127.0.0.1", 9000) 102 | admin_client_1 = helpers.http_client("127.0.0.1", 8001) 103 | admin_client_2 = helpers.http_client("127.0.0.1", 9001) 104 | end) 105 | 106 | after_each(function() 107 | client_1:close() 108 | client_2:close() 109 | admin_client_1:close() 110 | admin_client_2:close() 111 | end) 112 | 113 | describe("cache purge", function() 114 | local cache_key, cache_key2 115 | 116 | setup(function() 117 | -- prime cache entries on both instances 118 | local res_1 = assert(client_1:send { 119 | method = "GET", 120 | path = "/get", 121 | headers = { 122 | Host = "route-1.com", 123 | }, 124 | }) 125 | 126 | assert.res_status(200, res_1) 127 | assert.same("Miss", res_1.headers["X-Cache-Status"]) 128 | cache_key = res_1.headers["X-Cache-Key"] 129 | 130 | local res_2 = assert(client_2:send { 131 | method = "GET", 132 | path = "/get", 133 | headers = { 134 | host = "route-1.com", 135 | }, 136 | }) 137 | 138 | assert.res_status(200, res_2) 139 | assert.same("Miss", res_2.headers["X-Cache-Status"]) 140 | assert.same(cache_key, res_2.headers["X-Cache-Key"]) 141 | 142 | res_1 = assert(client_1:send { 143 | method = "GET", 144 | path = "/get", 145 | headers = { 146 | host = "route-2.com", 147 | }, 148 | }) 149 | 150 | assert.res_status(200, res_1) 151 | assert.same("Miss", res_1.headers["X-Cache-Status"]) 152 | cache_key2 = res_1.headers["X-Cache-Key"] 153 | assert.not_same(cache_key, cache_key2) 154 | 155 | res_2 = assert(client_2:send { 156 | method = "GET", 157 | path = "/get", 158 | headers = { 159 | host = "route-2.com", 160 | }, 161 | }) 162 | 163 | assert.res_status(200, res_2) 164 | assert.same("Miss", res_2.headers["X-Cache-Status"]) 165 | end) 166 | 167 | it("propagates purges via cluster events mechanism", function() 168 | local res_1 = assert(client_1:send { 169 | method = "GET", 170 | path = "/get", 171 | headers = { 172 | host = "route-1.com", 173 | }, 174 | }) 175 | 176 | assert.res_status(200, res_1) 177 | assert.same("Hit", res_1.headers["X-Cache-Status"]) 178 | 179 | local res_2 = assert(client_2:send { 180 | method = "GET", 181 | path = "/get", 182 | headers = { 183 | host = "route-1.com", 184 | }, 185 | }) 186 | 187 | assert.res_status(200, res_2) 188 | assert.same("Hit", res_2.headers["X-Cache-Status"]) 189 | 190 | -- now purge the entry 191 | local res = assert(admin_client_1:send { 192 | method = "DELETE", 193 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, 194 | }) 195 | 196 | assert.res_status(204, res) 197 | 198 | helpers.wait_until(function() 199 | -- assert that the entity was purged from the second instance 200 | res = assert(admin_client_2:send { 201 | method = "GET", 202 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, 203 | }) 204 | res:read_body() 205 | return res.status == 404 206 | end, 10) 207 | 208 | -- refresh and purge with our second endpoint 209 | res_1 = assert(client_1:send { 210 | method = "GET", 211 | path = "/get", 212 | headers = { 213 | Host = "route-1.com", 214 | }, 215 | }) 216 | 217 | assert.res_status(200, res_1) 218 | assert.same("Miss", res_1.headers["X-Cache-Status"]) 219 | 220 | res_2 = assert(client_2:send { 221 | method = "GET", 222 | path = "/get", 223 | headers = { 224 | host = "route-1.com", 225 | }, 226 | }) 227 | 228 | assert.res_status(200, res_2) 229 | assert.same("Miss", res_2.headers["X-Cache-Status"]) 230 | assert.same(cache_key, res_2.headers["X-Cache-Key"]) 231 | 232 | -- now purge the entry 233 | res = assert(admin_client_1:send { 234 | method = "DELETE", 235 | path = "/proxy-cache-redis/" .. cache_key, 236 | }) 237 | 238 | assert.res_status(204, res) 239 | 240 | admin_client_2:close() 241 | admin_client_2 = helpers.http_client("127.0.0.1", 9001) 242 | 243 | helpers.wait_until(function() 244 | -- assert that the entity was purged from the second instance 245 | res = assert(admin_client_2:send { 246 | method = "GET", 247 | path = "/proxy-cache-redis/" .. cache_key, 248 | }) 249 | res:read_body() 250 | return res.status == 404 251 | end, 10) 252 | end) 253 | 254 | it("does not affect cache entries under other plugin instances", function() 255 | local res = assert(admin_client_1:send { 256 | method = "GET", 257 | path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, 258 | }) 259 | 260 | assert.res_status(200, res) 261 | 262 | res = assert(admin_client_2:send { 263 | method = "GET", 264 | path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, 265 | }) 266 | 267 | assert.res_status(200, res) 268 | end) 269 | 270 | it("propagates global purges", function() 271 | do 272 | local res = assert(admin_client_1:send { 273 | method = "DELETE", 274 | path = "/proxy-cache-redis/", 275 | }) 276 | 277 | assert.res_status(204, res) 278 | end 279 | 280 | helpers.wait_until(function() 281 | local res = assert(admin_client_1:send { 282 | method = "GET", 283 | path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, 284 | }) 285 | res:read_body() 286 | return res.status == 404 287 | end, 10) 288 | 289 | helpers.wait_until(function() 290 | local res = assert(admin_client_2:send { 291 | method = "GET", 292 | path = "/proxy-cache-redis/" .. plugin2.id .. "/caches/" .. cache_key2, 293 | }) 294 | res:read_body() 295 | return res.status == 404 296 | end, 10) 297 | end) 298 | end) 299 | end) 300 | end 301 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019 Kong Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /kong/plugins/proxy-cache-redis/handler.lua: -------------------------------------------------------------------------------- 1 | local require = require 2 | local cache_key = require "kong.plugins.proxy-cache-redis.cache_key" 3 | local redis = require "kong.plugins.proxy-cache-redis.redis" 4 | local tab_new = require("table.new") 5 | 6 | local ngx = ngx 7 | local kong = kong 8 | local type = type 9 | local pairs = pairs 10 | local tostring = tostring 11 | local tonumber = tonumber 12 | local max = math.max 13 | local floor = math.floor 14 | local lower = string.lower 15 | local concat = table.concat 16 | local time = ngx.time 17 | local resp_get_headers = ngx.resp and ngx.resp.get_headers 18 | local ngx_re_gmatch = ngx.re.gmatch 19 | local ngx_re_sub = ngx.re.gsub 20 | local ngx_re_match = ngx.re.match 21 | local parse_http_time = ngx.parse_http_time 22 | 23 | local CACHE_VERSION = 1 24 | local EMPTY = {} 25 | 26 | 27 | -- http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1 28 | -- note content-length is not strictly hop-by-hop but we will be 29 | -- adjusting it here anyhow 30 | local hop_by_hop_headers = { 31 | ["connection"] = true, 32 | ["keep-alive"] = true, 33 | ["proxy-authenticate"] = true, 34 | ["proxy-authorization"] = true, 35 | ["te"] = true, 36 | ["trailers"] = true, 37 | ["transfer-encoding"] = true, 38 | ["upgrade"] = true, 39 | ["content-length"] = true, 40 | } 41 | 42 | local function overwritable_header(header) 43 | local n_header = lower(header) 44 | 45 | return not hop_by_hop_headers[n_header] 46 | and not ngx_re_match(n_header, "ratelimit-remaining") 47 | end 48 | 49 | local function parse_directive_header(h) 50 | if not h then 51 | return EMPTY 52 | end 53 | 54 | if type(h) == "table" then 55 | h = concat(h, ", ") 56 | end 57 | 58 | local t = {} 59 | local res = tab_new(3, 0) 60 | local iter = ngx_re_gmatch(h, "([^,]+)", "oj") 61 | 62 | local m = iter() 63 | while m do 64 | local _, err = ngx_re_match(m[0], [[^\s*([^=]+)(?:=(.+))?]], "oj", nil, res) 65 | if err then 66 | kong.log.err(err) 67 | end 68 | 69 | -- store the directive token as a numeric value if it looks like a number; 70 | -- otherwise, store the string value. for directives without token, we just 71 | -- set the key to true 72 | t[lower(res[1])] = tonumber(res[2]) or res[2] or true 73 | 74 | m = iter() 75 | end 76 | 77 | return t 78 | end 79 | 80 | local function req_cc() 81 | return parse_directive_header(ngx.var.http_cache_control) 82 | end 83 | 84 | local function res_cc() 85 | return parse_directive_header(ngx.var.sent_http_cache_control) 86 | end 87 | 88 | local function resource_ttl(res_cc) 89 | local max_age = res_cc["s-maxage"] or res_cc["max-age"] 90 | 91 | if not max_age then 92 | local expires = ngx.var.sent_http_expires 93 | 94 | -- if multiple Expires headers are present, last one wins 95 | if type(expires) == "table" then 96 | expires = expires[#expires] 97 | end 98 | 99 | local exp_time = parse_http_time(tostring(expires)) 100 | if exp_time then 101 | max_age = exp_time - time() 102 | end 103 | end 104 | 105 | return max_age and max(max_age, 0) or 0 106 | end 107 | 108 | -- Comprueba si la petición es cacheble 109 | local function cacheable_request(conf, cc) 110 | do 111 | -- check if is allowed the force cache, and if the header is present 112 | local forceHeader = kong.request.get_header("X-Proxy-Cache-Redis-Force") 113 | if conf.allow_force_cache_header and forceHeader == "true" then 114 | return true 115 | end 116 | 117 | -- check request method 118 | local method = kong.request.get_method() 119 | local method_match = false 120 | for i = 1, #conf.request_method do 121 | if conf.request_method[i] == method then 122 | method_match = true 123 | break 124 | end 125 | end 126 | 127 | if not method_match then 128 | return false 129 | end 130 | end 131 | 132 | -- check for explicit disallow directives 133 | if conf.cache_control and (cc["no-store"] or cc["no-cache"] or ngx.var.authorization) then 134 | return false 135 | end 136 | 137 | return true 138 | end 139 | 140 | -- Comprueba si la respuesta es cacheable 141 | local function cacheable_response(conf, cc) 142 | do 143 | local status = kong.response.get_status() 144 | local status_match = false 145 | 146 | for i = 1, #conf.response_code do 147 | if conf.response_code[i] == status then 148 | status_match = true 149 | break 150 | end 151 | end 152 | 153 | if not status_match then 154 | return false 155 | end 156 | end 157 | 158 | do 159 | local content_type = ngx.var.sent_http_content_type 160 | 161 | -- bail if we cannot examine this content type 162 | if not content_type or type(content_type) == "table" or content_type == "" then 163 | return false 164 | end 165 | 166 | local content_match = false 167 | for i = 1, #conf.content_type do 168 | if conf.content_type[i] == content_type then 169 | content_match = true 170 | break 171 | end 172 | end 173 | 174 | if not content_match then 175 | return false 176 | end 177 | end 178 | 179 | if conf.cache_control and (cc["private"] or cc["no-store"] or cc["no-cache"]) 180 | then 181 | return false 182 | end 183 | 184 | if conf.cache_control and resource_ttl(cc) <= 0 then 185 | return false 186 | end 187 | 188 | return true 189 | end 190 | 191 | 192 | -- indicate that we should attempt to cache the response to this request 193 | -- intentar guardar esta respuesta en caché 194 | local function signal_cache_req(ctx, this_cache_key, cache_status) 195 | ctx.proxy_cache_redis = { 196 | cache_key = this_cache_key, 197 | } 198 | 199 | kong.response.set_header("X-Cache-Status", cache_status or "Miss") 200 | end 201 | 202 | -- Guardar un valor en el Store 203 | local function store_cache_value(premature, conf, req_body, status, proxy_cache) 204 | 205 | local res = { 206 | status = status, 207 | headers = proxy_cache.res_headers, 208 | body = proxy_cache.res_body, 209 | body_len = #proxy_cache.res_body, 210 | timestamp = time(), 211 | ttl = proxy_cache.res_ttl, 212 | version = CACHE_VERSION, 213 | req_body = req_body, 214 | } 215 | 216 | local ttl = conf.storage_ttl or conf.cache_control and proxy_cache.res_ttl or conf.cache_ttl 217 | 218 | -- Almaceno la respuesta y sus datos en caché 219 | local ok, err = redis:store(conf, proxy_cache.cache_key, res, ttl) 220 | if not ok then 221 | kong.log.err(err) 222 | end 223 | end 224 | 225 | local ProxyCacheHandler = { 226 | VERSION = "2.0.1-0", 227 | PRIORITY = 902, 228 | } 229 | 230 | 231 | -- Executed upon every Nginx worker process’s startup. 232 | function ProxyCacheHandler:init_worker() 233 | end 234 | 235 | 236 | -- Executed for every request from a client and before it is being proxied to the upstream service. 237 | function ProxyCacheHandler:access(conf) 238 | kong.ctx.shared.plugin_configuration = conf 239 | 240 | local cc = req_cc() 241 | 242 | -- if we know this request is not cacheable, bail out 243 | if not cacheable_request(conf, cc) then 244 | kong.response.set_header("X-Cache-Status", "Bypass") 245 | return 246 | end 247 | 248 | -- Si en configuración me indican que he de tener en cuenta el body JSON 249 | local theBody = ""; 250 | if (conf.vary_body_json_fields or EMPTY)[1] then 251 | -- Si el body es un JSON lo tengo en cuenta 252 | local body, err6, mimetype = kong.request.get_body('application/json') 253 | if not err6 and mimetype == 'application/json' then 254 | theBody = body 255 | end 256 | end 257 | 258 | 259 | -- construye la clave o hash de esta petición 260 | local consumer = kong.client.get_consumer() 261 | local route = kong.router.get_route() 262 | local uri = ngx_re_sub(ngx.var.request, "\\?.*", "", "oj") 263 | local the_cache_key = cache_key.build_cache_key( 264 | consumer and consumer.id, 265 | route and route.id, 266 | kong.request.get_method(), 267 | uri, 268 | kong.request.get_query(), 269 | kong.request.get_headers(), 270 | theBody, 271 | conf) 272 | 273 | kong.response.set_header("X-Cache-Key", the_cache_key) 274 | 275 | -- try to fetch the cached object from the computed cache key 276 | local ctx = kong.ctx.plugin 277 | -- Intenta recoger la caché correspondiente a esta key 278 | local res, err = redis:fetch(conf, the_cache_key) 279 | -- Si obtengo un error de que no consigo obtener la cache 280 | if err == "request object not in cache" then 281 | 282 | -- this request wasn't found in the data store, but the client only wanted 283 | -- cache data. see https://tools.ietf.org/html/rfc7234#section-5.2.1.7 284 | if conf.cache_control and cc["only-if-cached"] then 285 | return kong.response.exit(ngx.HTTP_GATEWAY_TIMEOUT) 286 | end 287 | 288 | ctx.req_body = kong.request.get_raw_body() 289 | 290 | -- this request is cacheable but wasn't found in the data store 291 | -- make a note that we should store it in cache later, 292 | -- and pass the request upstream 293 | return signal_cache_req(ctx, the_cache_key) 294 | 295 | elseif err then 296 | kong.log.err(err) 297 | return 298 | end 299 | 300 | -- Si la versión de los datos cacheados no es la misma que la actual, purgo (para evitar errores) 301 | if res.version ~= CACHE_VERSION then 302 | kong.log.notice("cache format mismatch, purging ", the_cache_key) 303 | redis:purge(conf, the_cache_key) 304 | return signal_cache_req(ctx, the_cache_key, "Bypass") 305 | end 306 | 307 | -- figure out if the client will accept our cache value 308 | if conf.cache_control then 309 | if cc["max-age"] and time() - res.timestamp > cc["max-age"] then 310 | return signal_cache_req(ctx, the_cache_key, "Refresh") 311 | end 312 | 313 | if cc["max-stale"] and time() - res.timestamp - res.ttl > cc["max-stale"] 314 | then 315 | return signal_cache_req(ctx, the_cache_key, "Refresh") 316 | end 317 | 318 | if cc["min-fresh"] and res.ttl - (time() - res.timestamp) < cc["min-fresh"] 319 | then 320 | return signal_cache_req(ctx, the_cache_key, "Refresh") 321 | end 322 | 323 | else 324 | -- don't serve stale data; res may be stored for up to `conf.storage_ttl` secs but served only for conf.cache_ttl 325 | -- no servir datos obsoletos; se guardará res los segundos indicados en conf.storage_ttl 326 | -- pero sólo se sirven durante conf.cache_ttl 327 | if time() - res.timestamp > conf.cache_ttl then 328 | return signal_cache_req(ctx, the_cache_key, "Refresh") 329 | end 330 | end 331 | 332 | -- we have cache data yo! 333 | -- expose response data for logging plugins 334 | local response_data = { 335 | res = res, 336 | req = { 337 | body = res.req_body, 338 | }, 339 | server_addr = ngx.var.server_addr, 340 | } 341 | 342 | kong.ctx.shared.proxy_cache_hit = response_data 343 | 344 | local nctx = ngx.ctx 345 | nctx.proxy_cache_hit = response_data 346 | nctx.KONG_PROXIED = true 347 | 348 | for k in pairs(res.headers) do 349 | if not overwritable_header(k) then 350 | res.headers[k] = nil 351 | end 352 | end 353 | 354 | res.headers["Age"] = floor(time() - res.timestamp) 355 | res.headers["X-Cache-Status"] = "Hit" 356 | 357 | return kong.response.exit(res.status, res.body, res.headers) 358 | end 359 | 360 | 361 | -- Executed when all response headers bytes have been received from the upstream service. 362 | function ProxyCacheHandler:header_filter(conf) 363 | local ctx = kong.ctx.plugin 364 | local proxy_cache = ctx.proxy_cache_redis 365 | -- don't look at our headers if 366 | -- a) the request wasn't cacheable, or 367 | -- b) the request was served from cache 368 | if not proxy_cache then 369 | return 370 | end 371 | 372 | local cc = res_cc() 373 | 374 | -- if this is a cacheable request, gather the headers and mark it so 375 | if cacheable_response(conf, cc) then 376 | proxy_cache.res_headers = resp_get_headers(0, true) 377 | proxy_cache.res_ttl = conf.cache_control and resource_ttl(cc) or conf.cache_ttl 378 | else 379 | kong.response.set_header("X-Cache-Status", "Bypass") 380 | ctx.proxy_cache_redis = nil 381 | end 382 | 383 | -- TODO handle Vary header 384 | end 385 | 386 | 387 | -- Executed for each chunk of the response body received from the upstream service. Since the response is streamed back to the client, 388 | -- it can exceed the buffer size and be streamed chunk by chunk. hence this method can be called multiple times if the response is large. 389 | function ProxyCacheHandler:body_filter(conf) 390 | local ctx = kong.ctx.plugin 391 | local proxy_cache = ctx.proxy_cache_redis 392 | if not proxy_cache then 393 | return 394 | end 395 | 396 | local chunk = ngx.arg[1] 397 | local eof = ngx.arg[2] 398 | 399 | proxy_cache.res_body = (proxy_cache.res_body or "") .. (chunk or "") 400 | 401 | if eof then 402 | -- Retardo el guardado ya que en body_filter no puedo hacer conexiones cosocket que son las necesarias para conectar a redis 403 | ngx.timer.at(0, store_cache_value, conf, ctx.req_body, kong.response.get_status(), proxy_cache) 404 | end 405 | end 406 | 407 | return ProxyCacheHandler 408 | -------------------------------------------------------------------------------- /spec/03-api_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | local cjson = require "cjson" 3 | 4 | 5 | describe("Plugin: proxy-cache-redis", function() 6 | local bp 7 | local proxy_client, admin_client, cache_key, plugin1, route1 8 | 9 | setup(function() 10 | bp = helpers.get_db_utils(nil, nil, {"proxy-cache-redis"}) 11 | 12 | route1 = assert(bp.routes:insert { 13 | hosts = { "route-1.com" }, 14 | }) 15 | plugin1 = assert(bp.plugins:insert { 16 | name = "proxy-cache-redis", 17 | route = { id = route1.id }, 18 | config = { 19 | strategy = "memory", 20 | content_type = { "text/plain", "application/json" }, 21 | memory = { 22 | dictionary_name = "kong", 23 | }, 24 | }, 25 | }) 26 | 27 | -- an additional plugin does not interfere with the iteration in 28 | -- the global /proxy-cache API handler: regression test for 29 | -- https://github.com/Kong/kong-plugin-proxy-cache/issues/12 30 | assert(bp.plugins:insert { 31 | name = "request-transformer", 32 | }) 33 | 34 | local route2 = assert(bp.routes:insert { 35 | hosts = { "route-2.com" }, 36 | }) 37 | 38 | assert(bp.plugins:insert { 39 | name = "proxy-cache-redis", 40 | route = { id = route2.id }, 41 | config = { 42 | strategy = "memory", 43 | content_type = { "text/plain", "application/json" }, 44 | memory = { 45 | dictionary_name = "kong", 46 | }, 47 | }, 48 | }) 49 | 50 | assert(helpers.start_kong({ 51 | plugins = "proxy-cache-redis,request-transformer", 52 | nginx_conf = "spec/fixtures/custom_nginx.template", 53 | })) 54 | 55 | if admin_client then 56 | admin_client:close() 57 | end 58 | if proxy_client then 59 | proxy_client:close() 60 | end 61 | 62 | admin_client = helpers.admin_client() 63 | proxy_client = helpers.proxy_client() 64 | end) 65 | 66 | teardown(function() 67 | helpers.stop_kong(nil, true) 68 | end) 69 | 70 | describe("(schema)", function() 71 | local body 72 | 73 | it("accepts an array of numbers as strings", function() 74 | local res = assert(admin_client:send { 75 | method = "POST", 76 | path = "/plugins", 77 | body = { 78 | name = "proxy-cache-redis", 79 | config = { 80 | strategy = "memory", 81 | memory = { 82 | dictionary_name = "kong", 83 | }, 84 | response_code = {123, 200}, 85 | cache_ttl = 600, 86 | request_method = { "GET" }, 87 | content_type = { "text/json" }, 88 | }, 89 | }, 90 | headers = { 91 | ["Content-Type"] = "application/json", 92 | }, 93 | }) 94 | body = assert.res_status(201, res) 95 | end) 96 | it("casts an array of response_code values to number types", function() 97 | local json = cjson.decode(body) 98 | for _, v in ipairs(json.config.response_code) do 99 | assert.is_number(v) 100 | end 101 | end) 102 | it("errors if response_code is an empty array", function() 103 | local res = assert(admin_client:send { 104 | method = "POST", 105 | path = "/plugins", 106 | body = { 107 | name = "proxy-cache-redis", 108 | config = { 109 | strategy = "memory", 110 | memory = { 111 | dictionary_name = "kong", 112 | }, 113 | response_code = {}, 114 | cache_ttl = 600, 115 | request_method = { "GET" }, 116 | content_type = { "text/json" }, 117 | }, 118 | }, 119 | headers = { 120 | ["Content-Type"] = "application/json", 121 | }, 122 | }) 123 | local body = assert.res_status(400, res) 124 | local json_body = cjson.decode(body) 125 | assert.same("length must be at least 1", json_body.fields.config.response_code) 126 | end) 127 | it("errors if response_code is a string", function() 128 | local res = assert(admin_client:send { 129 | method = "POST", 130 | path = "/plugins", 131 | body = { 132 | name = "proxy-cache-redis", 133 | config = { 134 | strategy = "memory", 135 | memory = { 136 | dictionary_name = "kong", 137 | }, 138 | response_code = {}, 139 | cache_ttl = 600, 140 | request_method = "GET", 141 | content_type = "text/json", 142 | }, 143 | }, 144 | headers = { 145 | ["Content-Type"] = "application/json", 146 | }, 147 | }) 148 | local body = assert.res_status(400, res) 149 | local json_body = cjson.decode(body) 150 | assert.same("length must be at least 1", json_body.fields.config.response_code) 151 | end) 152 | it("errors if response_code has non-numeric values", function() 153 | local res = assert(admin_client:send { 154 | method = "POST", 155 | path = "/plugins", 156 | body = { 157 | name = "proxy-cache-redis", 158 | config = { 159 | strategy = "memory", 160 | memory = { 161 | dictionary_name = "kong", 162 | }, 163 | response_code = {true, "alo", 123}, 164 | cache_ttl = 600, 165 | request_method = "GET", 166 | content_type = "text/json", 167 | }, 168 | }, 169 | headers = { 170 | ["Content-Type"] = "application/json", 171 | }, 172 | }) 173 | local body = assert.res_status(400, res) 174 | local json_body = cjson.decode(body) 175 | assert.same( { "expected an integer", "expected an integer" }, 176 | json_body.fields.config.response_code) 177 | end) 178 | it("errors if response_code has float value", function() 179 | local res = assert(admin_client:send { 180 | method = "POST", 181 | path = "/plugins", 182 | body = { 183 | name = "proxy-cache-redis", 184 | config = { 185 | strategy = "memory", 186 | memory = { 187 | dictionary_name = "kong", 188 | }, 189 | response_code = {90}, 190 | cache_ttl = 600, 191 | request_method = "GET", 192 | content_type = "text/json", 193 | }, 194 | }, 195 | headers = { 196 | ["Content-Type"] = "application/json", 197 | }, 198 | }) 199 | local body = assert.res_status(400, res) 200 | local json_body = cjson.decode(body) 201 | assert.same({ "value should be between 100 and 900" }, 202 | json_body.fields.config.response_code) 203 | end) 204 | end) 205 | describe("(API)", function() 206 | describe("DELETE", function() 207 | it("delete a cache entry", function() 208 | local res = assert(proxy_client:send { 209 | method = "GET", 210 | path = "/get", 211 | headers = { 212 | host = "route-1.com", 213 | } 214 | }) 215 | 216 | assert.res_status(200, res) 217 | assert.same("Miss", res.headers["X-Cache-Status"]) 218 | 219 | -- cache key is an md5sum of the prefix uuid, method, and $request 220 | local cache_key1 = res.headers["X-Cache-Key"] 221 | assert.matches("^[%w%d]+$", cache_key1) 222 | assert.equals(32, #cache_key1) 223 | cache_key = cache_key1 224 | 225 | res = assert(proxy_client:send { 226 | method = "GET", 227 | path = "/get", 228 | headers = { 229 | host = "route-1.com", 230 | } 231 | }) 232 | 233 | assert.res_status(200, res) 234 | assert.same("Hit", res.headers["X-Cache-Status"]) 235 | local cache_key2 = res.headers["X-Cache-Key"] 236 | assert.same(cache_key1, cache_key2) 237 | 238 | -- delete the key 239 | res = assert(admin_client:send { 240 | method = "DELETE", 241 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, 242 | }) 243 | assert.res_status(204, res) 244 | 245 | local res = assert(proxy_client:send { 246 | method = "GET", 247 | path = "/get", 248 | headers = { 249 | host = "route-1.com", 250 | } 251 | }) 252 | 253 | assert.res_status(200, res) 254 | assert.same("Miss", res.headers["X-Cache-Status"]) 255 | 256 | -- delete directly, having to look up all proxy-cache instances 257 | res = assert(admin_client:send { 258 | method = "DELETE", 259 | path = "/proxy-cache-redis/" .. cache_key, 260 | }) 261 | assert.res_status(204, res) 262 | 263 | local res = assert(proxy_client:send { 264 | method = "GET", 265 | path = "/get", 266 | headers = { 267 | host = "route-1.com", 268 | } 269 | }) 270 | 271 | assert.res_status(200, res) 272 | assert.same("Miss", res.headers["X-Cache-Status"]) 273 | end) 274 | it("purge all the cache entries", function() 275 | -- make a `Hit` request to `route-1` 276 | local res = assert(proxy_client:send { 277 | method = "GET", 278 | path = "/get", 279 | headers = { 280 | host = "route-1.com", 281 | } 282 | }) 283 | assert.res_status(200, res) 284 | assert.same("Hit", res.headers["X-Cache-Status"]) 285 | 286 | -- make a `Miss` request to `route-2` 287 | local res = assert(proxy_client:send { 288 | method = "GET", 289 | path = "/get", 290 | headers = { 291 | host = "route-2.com", 292 | } 293 | }) 294 | 295 | assert.res_status(200, res) 296 | assert.same("Miss", res.headers["X-Cache-Status"]) 297 | 298 | -- cache key is an md5sum of the prefix uuid, method, and $request 299 | local cache_key1 = res.headers["X-Cache-Key"] 300 | assert.matches("^[%w%d]+$", cache_key1) 301 | assert.equals(32, #cache_key1) 302 | 303 | -- make a `Hit` request to `route-1` 304 | res = assert(proxy_client:send { 305 | method = "GET", 306 | path = "/get", 307 | headers = { 308 | host = "route-2.com", 309 | } 310 | }) 311 | 312 | assert.res_status(200, res) 313 | assert.same("Hit", res.headers["X-Cache-Status"]) 314 | local cache_key2 = res.headers["X-Cache-Key"] 315 | assert.same(cache_key1, cache_key2) 316 | 317 | -- delete all the cache keys 318 | res = assert(admin_client:send { 319 | method = "DELETE", 320 | path = "/proxy-cache-redis", 321 | }) 322 | assert.res_status(204, res) 323 | 324 | local res = assert(proxy_client:send { 325 | method = "GET", 326 | path = "/get", 327 | headers = { 328 | host = "route-1.com", 329 | } 330 | }) 331 | 332 | assert.res_status(200, res) 333 | assert.same("Miss", res.headers["X-Cache-Status"]) 334 | 335 | local res = assert(proxy_client:send { 336 | method = "GET", 337 | path = "/get", 338 | headers = { 339 | host = "route-2.com", 340 | } 341 | }) 342 | 343 | assert.res_status(200, res) 344 | assert.same("Miss", res.headers["X-Cache-Status"]) 345 | end) 346 | it("delete a non-existing cache key", function() 347 | -- delete all the cache keys 348 | local res = assert(admin_client:send { 349 | method = "DELETE", 350 | path = "/proxy-cache-redis", 351 | }) 352 | assert.res_status(204, res) 353 | 354 | local res = assert(admin_client:send { 355 | method = "DELETE", 356 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. "123", 357 | }) 358 | assert.res_status(404, res) 359 | end) 360 | it("delete a non-existing plugins's cache key", function() 361 | -- delete all the cache keys 362 | local res = assert(admin_client:send { 363 | method = "DELETE", 364 | path = "/proxy-cache-redis", 365 | }) 366 | assert.res_status(204, res) 367 | 368 | local res = assert(admin_client:send { 369 | method = "DELETE", 370 | path = "/proxy-cache-redis/" .. route1.id .. "/caches/" .. "123", 371 | }) 372 | assert.res_status(404, res) 373 | end) 374 | end) 375 | describe("GET", function() 376 | it("get a non-existing cache", function() 377 | -- delete all the cache keys 378 | local res = assert(admin_client:send { 379 | method = "DELETE", 380 | path = "/proxy-cache-redis", 381 | }) 382 | assert.res_status(204, res) 383 | 384 | local res = assert(admin_client:send { 385 | method = "GET", 386 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, 387 | }) 388 | assert.res_status(404, res) 389 | 390 | -- attempt to list an entry directly via cache key 391 | local res = assert(admin_client:send { 392 | method = "GET", 393 | path = "/proxy-cache-redis/" .. cache_key, 394 | }) 395 | assert.res_status(404, res) 396 | end) 397 | it("get a existing cache", function() 398 | -- add request to cache 399 | local res = assert(proxy_client:send { 400 | method = "GET", 401 | path = "/get", 402 | headers = { 403 | host = "route-1.com", 404 | } 405 | }) 406 | assert.res_status(200, res) 407 | 408 | local res = assert(admin_client:send { 409 | method = "GET", 410 | path = "/proxy-cache-redis/" .. plugin1.id .. "/caches/" .. cache_key, 411 | }) 412 | local body = assert.res_status(200, res) 413 | local json_body = cjson.decode(body) 414 | assert.same(cache_key, json_body.headers["X-Cache-Key"]) 415 | 416 | -- list an entry directly via cache key 417 | local res = assert(admin_client:send { 418 | method = "GET", 419 | path = "/proxy-cache-redis/" .. cache_key, 420 | }) 421 | local body = assert.res_status(200, res) 422 | local json_body = cjson.decode(body) 423 | assert.same(cache_key, json_body.headers["X-Cache-Key"]) 424 | end) 425 | end) 426 | end) 427 | end) 428 | -------------------------------------------------------------------------------- /spec/02-access_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | local strategies = require("kong.plugins.proxy-cache-redis.strategies") 3 | 4 | 5 | --local TIMEOUT = 10 -- default timeout for non-memory strategies 6 | 7 | -- use wait_until spec helper only on async strategies 8 | --local function strategy_wait_until(strategy, func, timeout) 9 | -- if strategies.DELAY_STRATEGY_STORE[strategy] then 10 | -- helpers.wait_until(func, timeout) 11 | -- end 12 | --end 13 | 14 | 15 | do 16 | local policy = "memory" 17 | describe("proxy-cache-redis access with policy: " .. policy, function() 18 | local client, admin_client 19 | --local cache_key 20 | local policy_config = { dictionary_name = "kong", } 21 | 22 | local strategy = strategies({ 23 | strategy_name = policy, 24 | strategy_opts = policy_config, 25 | }) 26 | 27 | setup(function() 28 | 29 | local bp = helpers.get_db_utils(nil, nil, {"proxy-cache-redis"}) 30 | strategy:flush(true) 31 | 32 | local route1 = assert(bp.routes:insert { 33 | hosts = { "route-1.com" }, 34 | }) 35 | local route2 = assert(bp.routes:insert { 36 | hosts = { "route-2.com" }, 37 | }) 38 | assert(bp.routes:insert { 39 | hosts = { "route-3.com" }, 40 | }) 41 | assert(bp.routes:insert { 42 | hosts = { "route-4.com" }, 43 | }) 44 | local route5 = assert(bp.routes:insert { 45 | hosts = { "route-5.com" }, 46 | }) 47 | local route6 = assert(bp.routes:insert { 48 | hosts = { "route-6.com" }, 49 | }) 50 | local route7 = assert(bp.routes:insert { 51 | hosts = { "route-7.com" }, 52 | }) 53 | local route8 = assert(bp.routes:insert { 54 | hosts = { "route-8.com" }, 55 | }) 56 | local route9 = assert(bp.routes:insert { 57 | hosts = { "route-9.com" }, 58 | }) 59 | local route10 = assert(bp.routes:insert { 60 | hosts = { "route-10.com" }, 61 | }) 62 | local route11 = assert(bp.routes:insert { 63 | hosts = { "route-11.com" }, 64 | }) 65 | local route12 = assert(bp.routes:insert { 66 | hosts = { "route-12.com" }, 67 | }) 68 | local route13 = assert(bp.routes:insert { 69 | hosts = { "route-13.com" }, 70 | }) 71 | local route14 = assert(bp.routes:insert { 72 | hosts = { "route-14.com" }, 73 | }) 74 | local route15 = assert(bp.routes:insert({ 75 | hosts = { "route-15.com" }, 76 | })) 77 | local route16 = assert(bp.routes:insert({ 78 | hosts = { "route-16.com" }, 79 | })) 80 | 81 | local consumer1 = assert(bp.consumers:insert { 82 | username = "bob", 83 | }) 84 | assert(bp.keyauth_credentials:insert { 85 | key = "bob", 86 | consumer = { id = consumer1.id }, 87 | }) 88 | local consumer2 = assert(bp.consumers:insert { 89 | username = "alice", 90 | }) 91 | assert(bp.keyauth_credentials:insert { 92 | key = "alice", 93 | consumer = { id = consumer2.id }, 94 | }) 95 | assert(bp.plugins:insert { 96 | name = "key-auth", 97 | route = { id = route5.id }, 98 | config = {}, 99 | }) 100 | assert(bp.plugins:insert { 101 | name = "key-auth", 102 | route = { id = route13.id }, 103 | config = {}, 104 | }) 105 | assert(bp.plugins:insert { 106 | name = "key-auth", 107 | route = { id = route14.id }, 108 | config = {}, 109 | }) 110 | assert(bp.plugins:insert { 111 | name = "key-auth", 112 | route = { id = route15.id }, 113 | config = {}, 114 | }) 115 | assert(bp.plugins:insert { 116 | name = "key-auth", 117 | route = { id = route16.id }, 118 | config = {}, 119 | }) 120 | 121 | assert(bp.plugins:insert { 122 | name = "proxy-cache-redis", 123 | route = { id = route1.id }, 124 | config = { 125 | strategy = policy, 126 | content_type = { "text/plain", "application/json" }, 127 | [policy] = policy_config, 128 | }, 129 | }) 130 | 131 | assert(bp.plugins:insert { 132 | name = "proxy-cache-redis", 133 | route = { id = route2.id }, 134 | config = { 135 | strategy = policy, 136 | content_type = { "text/plain", "application/json" }, 137 | [policy] = policy_config, 138 | }, 139 | }) 140 | 141 | -- global plugin for routes 3 and 4 142 | assert(bp.plugins:insert { 143 | name = "proxy-cache-redis", 144 | config = { 145 | strategy = policy, 146 | content_type = { "text/plain", "application/json" }, 147 | [policy] = policy_config, 148 | }, 149 | }) 150 | 151 | assert(bp.plugins:insert { 152 | name = "proxy-cache-redis", 153 | route = { id = route5.id }, 154 | config = { 155 | strategy = policy, 156 | content_type = { "text/plain", "application/json" }, 157 | [policy] = policy_config, 158 | }, 159 | }) 160 | 161 | assert(bp.plugins:insert { 162 | name = "proxy-cache-redis", 163 | route = { id = route6.id }, 164 | config = { 165 | strategy = policy, 166 | content_type = { "text/plain", "application/json" }, 167 | [policy] = policy_config, 168 | cache_ttl = 2, 169 | }, 170 | }) 171 | 172 | assert(bp.plugins:insert { 173 | name = "proxy-cache-redis", 174 | route = { id = route7.id }, 175 | config = { 176 | strategy = policy, 177 | content_type = { "text/plain", "application/json" }, 178 | [policy] = policy_config, 179 | cache_control = true, 180 | }, 181 | }) 182 | 183 | assert(bp.plugins:insert { 184 | name = "proxy-cache-redis", 185 | route = { id = route8.id }, 186 | config = { 187 | strategy = policy, 188 | content_type = { "text/plain", "application/json" }, 189 | [policy] = policy_config, 190 | cache_control = true, 191 | storage_ttl = 600, 192 | }, 193 | }) 194 | 195 | assert(bp.plugins:insert { 196 | name = "proxy-cache-redis", 197 | route = { id = route9.id }, 198 | config = { 199 | strategy = policy, 200 | content_type = { "text/plain", "application/json" }, 201 | [policy] = policy_config, 202 | cache_ttl = 2, 203 | storage_ttl = 60, 204 | }, 205 | }) 206 | 207 | assert(bp.plugins:insert { 208 | name = "proxy-cache-redis", 209 | route = { id = route10.id }, 210 | config = { 211 | strategy = policy, 212 | content_type = { "text/html; charset=utf-8", "application/json" }, 213 | response_code = { 200, 417 }, 214 | request_method = { "GET", "HEAD", "POST" }, 215 | [policy] = policy_config, 216 | }, 217 | }) 218 | 219 | assert(bp.plugins:insert { 220 | name = "proxy-cache-redis", 221 | route = { id = route11.id }, 222 | config = { 223 | strategy = policy, 224 | [policy] = policy_config, 225 | content_type = { "text/plain", "application/json" }, 226 | response_code = { 200 }, 227 | request_method = { "GET", "HEAD", "POST" }, 228 | vary_headers = {"foo"} 229 | }, 230 | }) 231 | 232 | assert(bp.plugins:insert { 233 | name = "proxy-cache-redis", 234 | route = { id = route12.id }, 235 | config = { 236 | strategy = policy, 237 | [policy] = policy_config, 238 | content_type = { "text/plain", "application/json" }, 239 | response_code = { 200 }, 240 | request_method = { "GET", "HEAD", "POST" }, 241 | vary_query_params = {"foo"} 242 | }, 243 | }) 244 | 245 | assert(helpers.start_kong({ 246 | plugins = "bundled,proxy-cache-redis", 247 | nginx_conf = "spec/fixtures/custom_nginx.template", 248 | })) 249 | end) 250 | 251 | 252 | before_each(function() 253 | if client then 254 | client:close() 255 | end 256 | if admin_client then 257 | admin_client:close() 258 | end 259 | client = helpers.proxy_client() 260 | admin_client = helpers.admin_client() 261 | end) 262 | 263 | 264 | teardown(function() 265 | if client then 266 | client:close() 267 | end 268 | 269 | if admin_client then 270 | admin_client:close() 271 | end 272 | 273 | helpers.stop_kong(nil, true) 274 | end) 275 | 276 | it("caches a simple request", function() 277 | local res = assert(client:send { 278 | method = "GET", 279 | path = "/get", 280 | headers = { 281 | host = "route-1.com", 282 | } 283 | }) 284 | 285 | local body1 = assert.res_status(200, res) 286 | assert.same("Miss", res.headers["X-Cache-Status"]) 287 | 288 | -- cache key is an md5sum of the prefix uuid, method, and $request 289 | local cache_key1 = res.headers["X-Cache-Key"] 290 | assert.matches("^[%w%d]+$", cache_key1) 291 | assert.equals(32, #cache_key1) 292 | 293 | -- wait until the underlying strategy converges 294 | --strategy_wait_until(policy, function() 295 | -- return strategy:fetch(cache_key1) ~= nil 296 | --end, TIMEOUT) 297 | 298 | local res = client:send { 299 | method = "GET", 300 | path = "/get", 301 | headers = { 302 | host = "route-1.com", 303 | } 304 | } 305 | 306 | local body2 = assert.res_status(200, res) 307 | assert.same("Hit", res.headers["X-Cache-Status"]) 308 | local cache_key2 = res.headers["X-Cache-Key"] 309 | assert.same(cache_key1, cache_key2) 310 | 311 | -- assert that response bodies are identical 312 | assert.same(body1, body2) 313 | 314 | -- examine this cache key against another plugin's cache key for the same req 315 | --cache_key = cache_key1 316 | end) 317 | 318 | it("respects cache ttl", function() 319 | local res = assert(client:send { 320 | method = "GET", 321 | path = "/get", 322 | headers = { 323 | host = "route-6.com", 324 | } 325 | }) 326 | 327 | --local cache_key2 = res.headers["X-Cache-Key"] 328 | assert.res_status(200, res) 329 | assert.same("Miss", res.headers["X-Cache-Status"]) 330 | 331 | -- wait until the underlying strategy converges 332 | --strategy_wait_until(policy, function() 333 | -- return strategy:fetch(cache_key2) ~= nil 334 | --end, TIMEOUT) 335 | 336 | res = client:send { 337 | method = "GET", 338 | path = "/get", 339 | headers = { 340 | host = "route-6.com", 341 | } 342 | } 343 | 344 | assert.res_status(200, res) 345 | assert.same("Hit", res.headers["X-Cache-Status"]) 346 | --local cache_key = res.headers["X-Cache-Key"] 347 | 348 | -- if strategy is local, it's enough to simply use a sleep 349 | if strategies.LOCAL_DATA_STRATEGIES[policy] then 350 | ngx.sleep(3) 351 | end 352 | 353 | -- wait until the strategy expires the object for the given 354 | -- cache key 355 | --strategy_wait_until(policy, function() 356 | -- return strategy:fetch(cache_key) == nil 357 | --end, TIMEOUT) 358 | 359 | -- and go through the cycle again 360 | res = assert(client:send { 361 | method = "GET", 362 | path = "/get", 363 | headers = { 364 | host = "route-6.com", 365 | } 366 | }) 367 | 368 | assert.res_status(200, res) 369 | assert.same("Miss", res.headers["X-Cache-Status"]) 370 | --cache_key = res.headers["X-Cache-Key"] 371 | 372 | -- wait until the underlying strategy converges 373 | --strategy_wait_until(policy, function() 374 | -- return strategy:fetch(cache_key) ~= nil 375 | --end, TIMEOUT) 376 | 377 | res = assert(client:send { 378 | method = "GET", 379 | path = "/get", 380 | headers = { 381 | host = "route-6.com", 382 | } 383 | }) 384 | 385 | assert.res_status(200, res) 386 | assert.same("Hit", res.headers["X-Cache-Status"]) 387 | 388 | -- examine the behavior of keeping cache in memory for longer than ttl 389 | res = assert(client:send { 390 | method = "GET", 391 | path = "/get", 392 | headers = { 393 | host = "route-9.com", 394 | } 395 | }) 396 | 397 | assert.res_status(200, res) 398 | assert.same("Miss", res.headers["X-Cache-Status"]) 399 | --cache_key = res.headers["X-Cache-Key"] 400 | 401 | -- wait until the underlying strategy converges 402 | --strategy_wait_until(policy, function() 403 | -- return strategy:fetch(cache_key) ~= nil 404 | --end, TIMEOUT) 405 | 406 | res = assert(client:send { 407 | method = "GET", 408 | path = "/get", 409 | headers = { 410 | host = "route-9.com", 411 | } 412 | }) 413 | 414 | assert.res_status(200, res) 415 | assert.same("Hit", res.headers["X-Cache-Status"]) 416 | 417 | -- if strategy is local, it's enough to simply use a sleep 418 | if strategies.LOCAL_DATA_STRATEGIES[policy] then 419 | ngx.sleep(3) 420 | end 421 | 422 | -- give ourselves time to expire 423 | -- as storage_ttl > cache_ttl, the object still remains in storage 424 | -- in an expired state 425 | --strategy_wait_until(policy, function() 426 | -- local obj = strategy:fetch(cache_key) 427 | -- return ngx.time() - obj.timestamp > obj.ttl 428 | --end, TIMEOUT) 429 | 430 | -- and go through the cycle again 431 | res = assert(client:send { 432 | method = "GET", 433 | path = "/get", 434 | headers = { 435 | host = "route-9.com", 436 | } 437 | }) 438 | 439 | assert.res_status(200, res) 440 | assert.same("Refresh", res.headers["X-Cache-Status"]) 441 | 442 | res = assert(client:send { 443 | method = "GET", 444 | path = "/get", 445 | headers = { 446 | host = "route-9.com", 447 | } 448 | }) 449 | 450 | assert.res_status(200, res) 451 | assert.same("Hit", res.headers["X-Cache-Status"]) 452 | end) 453 | 454 | it("respects cache ttl via cache control", function() 455 | local res = assert(client:send { 456 | method = "GET", 457 | path = "/cache/2", 458 | headers = { 459 | host = "route-7.com", 460 | } 461 | }) 462 | 463 | assert.res_status(200, res) 464 | assert.same("Miss", res.headers["X-Cache-Status"]) 465 | --local cache_key = res.headers["X-Cache-Key"] 466 | 467 | -- wait until the underlying strategy converges 468 | --strategy_wait_until(policy, function() 469 | -- return strategy:fetch(cache_key) ~= nil 470 | --end, TIMEOUT) 471 | 472 | res = assert(client:send { 473 | method = "GET", 474 | path = "/cache/2", 475 | headers = { 476 | host = "route-7.com", 477 | } 478 | }) 479 | 480 | assert.res_status(200, res) 481 | assert.same("Hit", res.headers["X-Cache-Status"]) 482 | 483 | -- if strategy is local, it's enough to simply use a sleep 484 | if strategies.LOCAL_DATA_STRATEGIES[policy] then 485 | ngx.sleep(3) 486 | end 487 | 488 | -- give ourselves time to expire 489 | --strategy_wait_until(policy, function() 490 | -- return strategy:fetch(cache_key) == nil 491 | --end, TIMEOUT) 492 | 493 | -- and go through the cycle again 494 | res = assert(client:send { 495 | method = "GET", 496 | path = "/cache/2", 497 | headers = { 498 | host = "route-7.com", 499 | } 500 | }) 501 | 502 | assert.res_status(200, res) 503 | assert.same("Miss", res.headers["X-Cache-Status"]) 504 | 505 | -- wait until the underlying strategy converges 506 | --strategy_wait_until(policy, function() 507 | -- return strategy:fetch(cache_key) ~= nil 508 | --end, TIMEOUT) 509 | 510 | res = assert(client:send { 511 | method = "GET", 512 | path = "/cache/2", 513 | headers = { 514 | host = "route-7.com", 515 | } 516 | }) 517 | 518 | assert.res_status(200, res) 519 | assert.same("Hit", res.headers["X-Cache-Status"]) 520 | 521 | -- assert that max-age=0 never results in caching 522 | res = assert(client:send { 523 | method = "GET", 524 | path = "/cache/0", 525 | headers = { 526 | host = "route-7.com", 527 | } 528 | }) 529 | 530 | assert.res_status(200, res) 531 | assert.same("Bypass", res.headers["X-Cache-Status"]) 532 | 533 | res = assert(client:send { 534 | method = "GET", 535 | path = "/cache/0", 536 | headers = { 537 | host = "route-7.com", 538 | } 539 | }) 540 | 541 | assert.res_status(200, res) 542 | assert.same("Bypass", res.headers["X-Cache-Status"]) 543 | end) 544 | 545 | it("public not present in Cache-Control, but max-age is", function() 546 | -- httpbin's /cache endpoint always sets "Cache-Control: public" 547 | -- necessary to set it manually using /response-headers instead 548 | local res = assert(client:send { 549 | method = "GET", 550 | path = "/response-headers?Cache-Control=max-age%3D604800", 551 | headers = { 552 | host = "route-7.com", 553 | } 554 | }) 555 | 556 | assert.res_status(200, res) 557 | assert.same("Miss", res.headers["X-Cache-Status"]) 558 | end) 559 | 560 | it("Cache-Control contains s-maxage only", function() 561 | local res = assert(client:send { 562 | method = "GET", 563 | path = "/response-headers?Cache-Control=s-maxage%3D604800", 564 | headers = { 565 | host = "route-7.com", 566 | } 567 | }) 568 | 569 | assert.res_status(200, res) 570 | assert.same("Miss", res.headers["X-Cache-Status"]) 571 | end) 572 | 573 | it("Expires present, Cache-Control absent", function() 574 | local httpdate = ngx.escape_uri(os.date("!%a, %d %b %Y %X %Z", os.time()+5000)) 575 | local res = assert(client:send { 576 | method = "GET", 577 | path = "/response-headers", 578 | query = "Expires=" .. httpdate, 579 | headers = { 580 | host = "route-7.com", 581 | } 582 | }) 583 | 584 | assert.res_status(200, res) 585 | assert.same("Miss", res.headers["X-Cache-Status"]) 586 | end) 587 | 588 | describe("respects cache-control", function() 589 | it("min-fresh", function() 590 | -- bypass via unsatisfied min-fresh 591 | local res = assert(client:send { 592 | method = "GET", 593 | path = "/cache/2", 594 | headers = { 595 | host = "route-7.com", 596 | ["Cache-Control"] = "min-fresh=30" 597 | } 598 | }) 599 | 600 | assert.res_status(200, res) 601 | assert.same("Refresh", res.headers["X-Cache-Status"]) 602 | end) 603 | 604 | it("max-age", function() 605 | local res = assert(client:send { 606 | method = "GET", 607 | path = "/cache/10", 608 | headers = { 609 | host = "route-7.com", 610 | ["Cache-Control"] = "max-age=2" 611 | } 612 | }) 613 | 614 | assert.res_status(200, res) 615 | assert.same("Miss", res.headers["X-Cache-Status"]) 616 | --local cache_key = res.headers["X-Cache-Key"] 617 | 618 | -- wait until the underlying strategy converges 619 | --strategy_wait_until(policy, function() 620 | -- return strategy:fetch(cache_key) ~= nil 621 | --end, TIMEOUT) 622 | 623 | res = assert(client:send { 624 | method = "GET", 625 | path = "/cache/10", 626 | headers = { 627 | host = "route-7.com", 628 | ["Cache-Control"] = "max-age=2" 629 | } 630 | }) 631 | 632 | assert.res_status(200, res) 633 | assert.same("Hit", res.headers["X-Cache-Status"]) 634 | --local cache_key = res.headers["X-Cache-Key"] 635 | 636 | -- if strategy is local, it's enough to simply use a sleep 637 | if strategies.LOCAL_DATA_STRATEGIES[policy] then 638 | ngx.sleep(3) 639 | end 640 | 641 | -- wait until max-age 642 | --strategy_wait_until(policy, function() 643 | -- local obj = strategy:fetch(cache_key) 644 | -- return ngx.time() - obj.timestamp > 2 645 | --end, TIMEOUT) 646 | 647 | res = assert(client:send { 648 | method = "GET", 649 | path = "/cache/10", 650 | headers = { 651 | host = "route-7.com", 652 | ["Cache-Control"] = "max-age=2" 653 | } 654 | }) 655 | 656 | assert.res_status(200, res) 657 | assert.same("Refresh", res.headers["X-Cache-Status"]) 658 | end) 659 | 660 | it("max-stale", function() 661 | local res = assert(client:send { 662 | method = "GET", 663 | path = "/cache/2", 664 | headers = { 665 | host = "route-8.com", 666 | } 667 | }) 668 | 669 | assert.res_status(200, res) 670 | assert.same("Miss", res.headers["X-Cache-Status"]) 671 | --local cache_key = res.headers["X-Cache-Key"] 672 | 673 | -- wait until the underlying strategy converges 674 | --strategy_wait_until(policy, function() 675 | -- return strategy:fetch(cache_key) ~= nil 676 | --end, TIMEOUT) 677 | 678 | res = assert(client:send { 679 | method = "GET", 680 | path = "/cache/2", 681 | headers = { 682 | host = "route-8.com", 683 | } 684 | }) 685 | 686 | assert.res_status(200, res) 687 | assert.same("Hit", res.headers["X-Cache-Status"]) 688 | 689 | -- if strategy is local, it's enough to simply use a sleep 690 | if strategies.LOCAL_DATA_STRATEGIES[policy] then 691 | ngx.sleep(4) 692 | end 693 | 694 | -- wait for longer than max-stale below 695 | --strategy_wait_until(policy, function() 696 | -- local obj = strategy:fetch(cache_key) 697 | -- return ngx.time() - obj.timestamp - obj.ttl > 2 698 | --end, TIMEOUT) 699 | 700 | res = assert(client:send { 701 | method = "GET", 702 | path = "/cache/2", 703 | headers = { 704 | host = "route-8.com", 705 | ["Cache-Control"] = "max-stale=1", 706 | } 707 | }) 708 | 709 | assert.res_status(200, res) 710 | assert.same("Refresh", res.headers["X-Cache-Status"]) 711 | end) 712 | 713 | it("#o only-if-cached", function() 714 | local res = assert(client:send { 715 | method = "GET", 716 | path = "/get?not=here", 717 | headers = { 718 | host = "route-8.com", 719 | ["Cache-Control"] = "only-if-cached", 720 | } 721 | }) 722 | 723 | assert.res_status(504, res) 724 | end) 725 | end) 726 | 727 | it("caches a streaming request", function() 728 | local res = assert(client:send { 729 | method = "GET", 730 | path = "/stream/3", 731 | headers = { 732 | host = "route-1.com", 733 | } 734 | }) 735 | 736 | local body1 = assert.res_status(200, res) 737 | assert.same("Miss", res.headers["X-Cache-Status"]) 738 | assert.is_nil(res.headers["Content-Length"]) 739 | --local cache_key = res.headers["X-Cache-Key"] 740 | 741 | -- wait until the underlying strategy converges 742 | --strategy_wait_until(policy, function() 743 | -- return strategy:fetch(cache_key) ~= nil 744 | --end, TIMEOUT) 745 | 746 | res = assert(client:send { 747 | method = "GET", 748 | path = "/stream/3", 749 | headers = { 750 | host = "route-1.com", 751 | } 752 | }) 753 | 754 | local body2 = assert.res_status(200, res) 755 | assert.same("Hit", res.headers["X-Cache-Status"]) 756 | assert.same(body1, body2) 757 | end) 758 | 759 | it("uses a separate cache key for the same consumer between routes", function() 760 | local res = assert(client:send { 761 | method = "GET", 762 | path = "/get", 763 | headers = { 764 | host = "route-13.com", 765 | apikey = "bob", 766 | } 767 | }) 768 | assert.res_status(200, res) 769 | local cache_key1 = res.headers["X-Cache-Key"] 770 | 771 | local res = assert(client:send { 772 | method = "GET", 773 | path = "/get", 774 | headers = { 775 | host = "route-14.com", 776 | apikey = "bob", 777 | } 778 | }) 779 | assert.res_status(200, res) 780 | local cache_key2 = res.headers["X-Cache-Key"] 781 | 782 | assert.not_equal(cache_key1, cache_key2) 783 | end) 784 | 785 | it("uses a separate cache key for the same consumer between routes/services", function() 786 | local res = assert(client:send { 787 | method = "GET", 788 | path = "/get", 789 | headers = { 790 | host = "route-15.com", 791 | apikey = "bob", 792 | } 793 | }) 794 | assert.res_status(200, res) 795 | local cache_key1 = res.headers["X-Cache-Key"] 796 | 797 | local res = assert(client:send { 798 | method = "GET", 799 | path = "/get", 800 | headers = { 801 | host = "route-16.com", 802 | apikey = "bob", 803 | } 804 | }) 805 | assert.res_status(200, res) 806 | local cache_key2 = res.headers["X-Cache-Key"] 807 | 808 | assert.not_equal(cache_key1, cache_key2) 809 | end) 810 | 811 | it("uses an separate cache key between routes-specific and a global plugin", function() 812 | local res = assert(client:send { 813 | method = "GET", 814 | path = "/get", 815 | headers = { 816 | host = "route-3.com", 817 | } 818 | }) 819 | 820 | assert.res_status(200, res) 821 | assert.same("Miss", res.headers["X-Cache-Status"]) 822 | 823 | local cache_key1 = res.headers["X-Cache-Key"] 824 | assert.matches("^[%w%d]+$", cache_key1) 825 | assert.equals(32, #cache_key1) 826 | 827 | res = assert(client:send { 828 | method = "GET", 829 | path = "/get", 830 | headers = { 831 | host = "route-4.com", 832 | } 833 | }) 834 | 835 | assert.res_status(200, res) 836 | 837 | assert.same("Miss", res.headers["X-Cache-Status"]) 838 | local cache_key2 = res.headers["X-Cache-Key"] 839 | assert.not_same(cache_key1, cache_key2) 840 | end) 841 | 842 | it("#o differentiates caches between instances", function() 843 | local res = assert(client:send { 844 | method = "GET", 845 | path = "/get", 846 | headers = { 847 | host = "route-2.com", 848 | } 849 | }) 850 | 851 | assert.res_status(200, res) 852 | assert.same("Miss", res.headers["X-Cache-Status"]) 853 | 854 | local cache_key1 = res.headers["X-Cache-Key"] 855 | assert.matches("^[%w%d]+$", cache_key1) 856 | assert.equals(32, #cache_key1) 857 | 858 | -- wait until the underlying strategy converges 859 | --strategy_wait_until(policy, function() 860 | -- return strategy:fetch(cache_key1) ~= nil 861 | --end, TIMEOUT) 862 | 863 | res = assert(client:send { 864 | method = "GET", 865 | path = "/get", 866 | headers = { 867 | host = "route-2.com", 868 | } 869 | }) 870 | 871 | local cache_key2 = res.headers["X-Cache-Key"] 872 | assert.res_status(200, res) 873 | assert.same("Hit", res.headers["X-Cache-Status"]) 874 | assert.same(cache_key1, cache_key2) 875 | end) 876 | 877 | it("uses request params as part of the cache key", function() 878 | local res = assert(client:send { 879 | method = "GET", 880 | path = "/get?a=b&b=c", 881 | headers = { 882 | host = "route-1.com", 883 | } 884 | }) 885 | 886 | assert.res_status(200, res) 887 | assert.same("Miss", res.headers["X-Cache-Status"]) 888 | 889 | res = assert(client:send { 890 | method = "GET", 891 | path = "/get?a=c", 892 | headers = { 893 | host = "route-1.com", 894 | } 895 | }) 896 | 897 | assert.res_status(200, res) 898 | 899 | assert.same("Miss", res.headers["X-Cache-Status"]) 900 | 901 | res = assert(client:send { 902 | method = "GET", 903 | path = "/get?b=c&a=b", 904 | headers = { 905 | host = "route-1.com", 906 | } 907 | }) 908 | 909 | assert.res_status(200, res) 910 | assert.same("Hit", res.headers["X-Cache-Status"]) 911 | 912 | res = assert(client:send { 913 | method = "GET", 914 | path = "/get?a&b", 915 | headers = { 916 | host = "route-1.com", 917 | } 918 | }) 919 | assert.res_status(200, res) 920 | assert.same("Miss", res.headers["X-Cache-Status"]) 921 | 922 | res = assert(client:send { 923 | method = "GET", 924 | path = "/get?a&b", 925 | headers = { 926 | host = "route-1.com", 927 | } 928 | }) 929 | assert.res_status(200, res) 930 | assert.same("Hit", res.headers["X-Cache-Status"]) 931 | end) 932 | 933 | it("can focus only in a subset of the query arguments", function() 934 | local res = assert(client:send { 935 | method = "GET", 936 | path = "/get?foo=b&b=c", 937 | headers = { 938 | host = "route-12.com", 939 | } 940 | }) 941 | 942 | assert.res_status(200, res) 943 | assert.same("Miss", res.headers["X-Cache-Status"]) 944 | 945 | --local cache_key = res.headers["X-Cache-Key"] 946 | 947 | -- wait until the underlying strategy converges 948 | --strategy_wait_until(policy, function() 949 | -- return strategy:fetch(cache_key) ~= nil 950 | --end, TIMEOUT) 951 | 952 | res = assert(client:send { 953 | method = "GET", 954 | path = "/get?b=d&foo=b", 955 | headers = { 956 | host = "route-12.com", 957 | } 958 | }) 959 | 960 | assert.res_status(200, res) 961 | 962 | assert.same("Hit", res.headers["X-Cache-Status"]) 963 | end) 964 | 965 | it("uses headers if instructed to do so", function() 966 | local res = assert(client:send { 967 | method = "GET", 968 | path = "/get", 969 | headers = { 970 | host = "route-11.com", 971 | foo = "bar" 972 | } 973 | }) 974 | assert.res_status(200, res) 975 | assert.same("Miss", res.headers["X-Cache-Status"]) 976 | --local cache_key = res.headers["X-Cache-Key"] 977 | 978 | -- wait until the underlying strategy converges 979 | --strategy_wait_until(policy, function() 980 | -- return strategy:fetch(cache_key) ~= nil 981 | --end, TIMEOUT) 982 | 983 | res = assert(client:send { 984 | method = "GET", 985 | path = "/get", 986 | headers = { 987 | host = "route-11.com", 988 | foo = "bar" 989 | } 990 | }) 991 | assert.res_status(200, res) 992 | assert.same("Hit", res.headers["X-Cache-Status"]) 993 | 994 | res = assert(client:send { 995 | method = "GET", 996 | path = "/get", 997 | headers = { 998 | host = "route-11.com", 999 | foo = "baz" 1000 | } 1001 | }) 1002 | assert.res_status(200, res) 1003 | assert.same("Miss", res.headers["X-Cache-Status"]) 1004 | end) 1005 | 1006 | describe("handles authenticated routes", function() 1007 | it("by ignoring cache if the request is unauthenticated", function() 1008 | local res = assert(client:send { 1009 | method = "GET", 1010 | path = "/get", 1011 | headers = { 1012 | host = "route-5.com", 1013 | } 1014 | }) 1015 | 1016 | assert.res_status(401, res) 1017 | assert.is_nil(res.headers["X-Cache-Status"]) 1018 | end) 1019 | 1020 | it("by maintaining a separate cache per consumer", function() 1021 | local res = assert(client:send { 1022 | method = "GET", 1023 | path = "/get", 1024 | headers = { 1025 | host = "route-5.com", 1026 | apikey = "bob", 1027 | } 1028 | }) 1029 | 1030 | assert.res_status(200, res) 1031 | assert.same("Miss", res.headers["X-Cache-Status"]) 1032 | 1033 | res = assert(client:send { 1034 | method = "GET", 1035 | path = "/get", 1036 | headers = { 1037 | host = "route-5.com", 1038 | apikey = "bob", 1039 | } 1040 | }) 1041 | 1042 | assert.res_status(200, res) 1043 | assert.same("Hit", res.headers["X-Cache-Status"]) 1044 | 1045 | local res = assert(client:send { 1046 | method = "GET", 1047 | path = "/get", 1048 | headers = { 1049 | host = "route-5.com", 1050 | apikey = "alice", 1051 | } 1052 | }) 1053 | 1054 | assert.res_status(200, res) 1055 | assert.same("Miss", res.headers["X-Cache-Status"]) 1056 | 1057 | res = assert(client:send { 1058 | method = "GET", 1059 | path = "/get", 1060 | headers = { 1061 | host = "route-5.com", 1062 | apikey = "alice", 1063 | } 1064 | }) 1065 | 1066 | assert.res_status(200, res) 1067 | assert.same("Hit", res.headers["X-Cache-Status"]) 1068 | 1069 | end) 1070 | end) 1071 | 1072 | describe("bypasses cache for uncacheable requests: ", function() 1073 | it("request method", function() 1074 | local res = assert(client:send { 1075 | method = "POST", 1076 | path = "/post", 1077 | headers = { 1078 | host = "route-1.com", 1079 | ["Content-Type"] = "application/json", 1080 | }, 1081 | { 1082 | foo = "bar", 1083 | }, 1084 | }) 1085 | 1086 | assert.res_status(200, res) 1087 | assert.same("Bypass", res.headers["X-Cache-Status"]) 1088 | end) 1089 | end) 1090 | 1091 | describe("bypasses cache for uncacheable responses:", function() 1092 | it("response status", function() 1093 | local res = assert(client:send { 1094 | method = "GET", 1095 | path = "/status/418", 1096 | headers = { 1097 | host = "route-1.com", 1098 | }, 1099 | }) 1100 | 1101 | assert.res_status(418, res) 1102 | assert.same("Bypass", res.headers["X-Cache-Status"]) 1103 | end) 1104 | 1105 | it("response content type", function() 1106 | local res = assert(client:send { 1107 | method = "GET", 1108 | path = "/xml", 1109 | headers = { 1110 | host = "route-1.com", 1111 | }, 1112 | }) 1113 | 1114 | assert.res_status(200, res) 1115 | assert.same("Bypass", res.headers["X-Cache-Status"]) 1116 | end) 1117 | end) 1118 | 1119 | describe("caches non-default", function() 1120 | it("request methods", function() 1121 | local res = assert(client:send { 1122 | method = "POST", 1123 | path = "/post", 1124 | headers = { 1125 | host = "route-10.com", 1126 | ["Content-Type"] = "application/json", 1127 | }, 1128 | { 1129 | foo = "bar", 1130 | }, 1131 | }) 1132 | 1133 | assert.res_status(200, res) 1134 | assert.same("Miss", res.headers["X-Cache-Status"]) 1135 | --local cache_key = res.headers["X-Cache-Key"] 1136 | 1137 | -- wait until the underlying strategy converges 1138 | --strategy_wait_until(policy, function() 1139 | -- return strategy:fetch(cache_key) ~= nil 1140 | --end, TIMEOUT) 1141 | 1142 | res = assert(client:send { 1143 | method = "POST", 1144 | path = "/post", 1145 | headers = { 1146 | host = "route-10.com", 1147 | ["Content-Type"] = "application/json", 1148 | }, 1149 | { 1150 | foo = "bar", 1151 | }, 1152 | }) 1153 | 1154 | assert.res_status(200, res) 1155 | assert.same("Hit", res.headers["X-Cache-Status"]) 1156 | end) 1157 | 1158 | it("response status", function() 1159 | local res = assert(client:send { 1160 | method = "GET", 1161 | path = "/status/417", 1162 | headers = { 1163 | host = "route-10.com", 1164 | }, 1165 | }) 1166 | 1167 | assert.res_status(417, res) 1168 | assert.same("Miss", res.headers["X-Cache-Status"]) 1169 | 1170 | res = assert(client:send { 1171 | method = "GET", 1172 | path = "/status/417", 1173 | headers = { 1174 | host = "route-10.com", 1175 | }, 1176 | }) 1177 | 1178 | assert.res_status(417, res) 1179 | assert.same("Hit", res.headers["X-Cache-Status"]) 1180 | end) 1181 | 1182 | end) 1183 | 1184 | describe("displays Kong core headers:", function() 1185 | it("X-Kong-Proxy-Latency", function() 1186 | local res = assert(client:send { 1187 | method = "GET", 1188 | path = "/get?show-me=proxy-latency", 1189 | headers = { 1190 | host = "route-1.com", 1191 | } 1192 | }) 1193 | 1194 | assert.res_status(200, res) 1195 | assert.same("Miss", res.headers["X-Cache-Status"]) 1196 | assert.matches("^%d+$", res.headers["X-Kong-Proxy-Latency"]) 1197 | 1198 | res = assert(client:send { 1199 | method = "GET", 1200 | path = "/get?show-me=proxy-latency", 1201 | headers = { 1202 | host = "route-1.com", 1203 | } 1204 | }) 1205 | 1206 | assert.res_status(200, res) 1207 | assert.same("Hit", res.headers["X-Cache-Status"]) 1208 | assert.matches("^%d+$", res.headers["X-Kong-Proxy-Latency"]) 1209 | end) 1210 | 1211 | it("X-Kong-Upstream-Latency", function() 1212 | local res = assert(client:send { 1213 | method = "GET", 1214 | path = "/get?show-me=upstream-latency", 1215 | headers = { 1216 | host = "route-1.com", 1217 | } 1218 | }) 1219 | 1220 | assert.res_status(200, res) 1221 | assert.same("Miss", res.headers["X-Cache-Status"]) 1222 | assert.matches("^%d+$", res.headers["X-Kong-Upstream-Latency"]) 1223 | --cache_key = res.headers["X-Cache-Key"] 1224 | 1225 | -- wait until the underlying strategy converges 1226 | --strategy_wait_until(policy, function() 1227 | -- return strategy:fetch(cache_key) ~= nil 1228 | --end, TIMEOUT) 1229 | 1230 | res = assert(client:send { 1231 | method = "GET", 1232 | path = "/get?show-me=upstream-latency", 1233 | headers = { 1234 | host = "route-1.com", 1235 | } 1236 | }) 1237 | 1238 | assert.res_status(200, res) 1239 | assert.same("Hit", res.headers["X-Cache-Status"]) 1240 | assert.matches("^%d+$", res.headers["X-Kong-Upstream-Latency"]) 1241 | end) 1242 | end) 1243 | end) 1244 | end 1245 | --------------------------------------------------------------------------------