├── .busted ├── .editorconfig ├── .gitignore ├── .luacheckrc ├── .pongo ├── pongo-setup.sh └── pongorc ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── grafana ├── README.md └── kong-official.json ├── kong-prometheus-plugin-1.3.0-1.rockspec ├── kong └── plugins │ └── prometheus │ ├── api.lua │ ├── enterprise │ └── exporter.lua │ ├── exporter.lua │ ├── handler.lua │ ├── prometheus.lua │ ├── schema.lua │ ├── serve.lua │ └── status_api.lua └── spec ├── 01-api_spec.lua ├── 02-access_spec.lua ├── 03-custom-serve_spec.lua ├── 04-status_api_spec.lua ├── 05-enterprise-exporter_spec.lua └── fixtures └── prometheus ├── custom_nginx.template └── metrics.conf /.busted: -------------------------------------------------------------------------------- 1 | return { 2 | default = { 3 | verbose = true, 4 | coverage = false, 5 | output = "gtest", 6 | }, 7 | } 8 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | trim_trailing_whitespace = true 7 | charset = utf-8 8 | 9 | [*.lua] 10 | indent_style = space 11 | indent_size = 2 12 | 13 | [kong/templates/nginx*] 14 | indent_style = space 15 | indent_size = 4 16 | 17 | [*.template] 18 | indent_style = space 19 | indent_size = 4 20 | 21 | [Makefile] 22 | indent_style = tab 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.rock 2 | *.tar.gz 3 | servroot 4 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | std = "ngx_lua" 2 | 3 | files["spec"] = { 4 | std = "+busted"; 5 | } 6 | max_line_length = false 7 | unused_args = false 8 | redefined = false 9 | 10 | globals = { 11 | "kong", 12 | } 13 | -------------------------------------------------------------------------------- /.pongo/pongo-setup.sh: -------------------------------------------------------------------------------- 1 | # due to makefile omission in Kong grpcurl will not get installed 2 | # on 1.3 through 2.0. So add manually if not installed already. 3 | # see: https://github.com/Kong/kong/pull/5857 4 | 5 | if [ ! -f /kong/bin/grpcurl ]; then 6 | echo grpcurl not found, now adding... 7 | curl -s -S -L https://github.com/fullstorydev/grpcurl/releases/download/v1.3.0/grpcurl_1.3.0_linux_x86_64.tar.gz | tar xz -C /kong/bin; 8 | fi 9 | 10 | # install rockspec, dependencies only 11 | find /kong-plugin -maxdepth 1 -type f -name '*.rockspec' -exec luarocks install --only-deps {} \; 12 | -------------------------------------------------------------------------------- /.pongo/pongorc: -------------------------------------------------------------------------------- 1 | --postgres 2 | --no-cassandra 3 | --grpcbin 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: bionic 2 | 3 | jobs: 4 | include: 5 | - name: Kong CE 2.1.x 6 | env: KONG_VERSION=2.1.x 7 | - name: Kong CE 2.2.x 8 | env: KONG_VERSION=2.2.x 9 | - name: Kong CE 2.3.x 10 | env: KONG_VERSION=2.3.x 11 | - name: Kong CE Master 12 | env: KONG_VERSION=nightly 13 | # - name: Kong EE 1.5.0.x # FT to look into failure 14 | # env: KONG_VERSION=1.5.0.x 15 | - name: Kong EE 2.4.1.x 16 | env: KONG_VERSION=2.4.1.x 17 | - name: Kong Enterprise nightly 18 | env: KONG_VERSION=nightly-ee 19 | 20 | install: 21 | - git clone --single-branch https://github.com/Kong/kong-pongo ../kong-pongo 22 | - "../kong-pongo/pongo.sh up" 23 | - "../kong-pongo/pongo.sh build" 24 | 25 | script: 26 | - "../kong-pongo/pongo.sh lint" 27 | - "../kong-pongo/pongo.sh run" 28 | 29 | notifications: 30 | slack: 31 | if: branch = master AND type != pull_request 32 | on_success: change 33 | on_failure: always 34 | rooms: 35 | secure: HiNnqUzWUG9RFfxSrjb2K9Vzi3ZdTUZYO7Zvjb+lJwP/pwA68RcHpmEvhBa3cMhBokl15w506Mcp8xm/Uj+HUV8HzZEigJ0d2tc7XRVix+gVh4L0wyATHvZ3+GOW3le1CQVVEKLGaanl/lObN0dCP12h4uVyObQRpHEjTf6KtyhW2Atvs5iqOFpfeT/u2oPV9aQ/VRsGjA/72VfVfwggX6KeTLw5+xSDxG7GSt2mGOzmLreOzbaEEeg7FKnN00bMoY/T9lNhNttEQ44VauddSWzOmtCfpR35O1ruIyX5HxOPDt+FD+wMikWtomGm0solOlEEKhlmEi7noTCNb4VW2nhjCEw1O8uQr2Y0MUOPdVQ7/d1FoS4l91X87+/qWAKZzfCnEQfXmM/tSC5OTWGzeRCGAuQKvOuBy9yXuQ2PqedugK+SIgB39CuX5uSwVo23MxNtXDO5c7pAAQQ7MqWjmRSNW92Qp/lKRBIHgfVXgpr4FG6fa9MTnXNB/VGLV7e3QN5evtZQ7c0bwnW+T6Npren2U9IWgkc1dlo/UqBreC3hKVnRf5C+U9RLKM9z6dOiDvLSpS/RF6fl1d9j7UxRsrqP2WCzAHX9rXQtRqEUHtY1x1fZXu2EXdo5Y5zYEtgWMbm6Rh+NzcLy+NKC6bBz/bakhTFto/MeRrkV/vaW+rQ= 36 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Table of Contents 2 | 3 | - [1.3.0](#130---20210527) 4 | - [1.2.1](#121---20210415) 5 | - [1.2.0](#120---20210324) 6 | - [1.1.0](#110---20210303) 7 | - [1.0.0](#100---20200820) 8 | - [0.9.0](#090---20200617) 9 | - [0.8.0](#080---20200424) 10 | - [0.7.1](#071---20200105) 11 | - [0.7.0](#070---20191204) 12 | - [0.6.0](#060---20190929) 13 | - [0.5.0](#050---20190916) 14 | - [0.4.1](#041---20190801) 15 | - [0.4.0](#040---20190605) 16 | - [0.3.4](#034---20181217) 17 | - [0.3.3](#033---20181214) 18 | - [0.3.2](#032---20181101) 19 | - [0.3.1](#031---20181017) 20 | - [0.3.0](#030---20181015) 21 | - [0.2.0](#020---20180924) 22 | - [0.1.0](#010---20180615) 23 | 24 | ## [1.3.0] - 2021/05/27 25 | 26 | - Fix exporter to attach subsystem label to memory stats 27 | [#118](https://github.com/Kong/kong-plugin-prometheus/pull/118) 28 | - Expose dataplane status on control plane, new metrics `data_plane_last_seen`, 29 | `data_plane_config_hash` and `data_plane_version_compatible` are added. 30 | [#98](https://github.com/Kong/kong-plugin-prometheus/pull/98) 31 | 32 | ## [1.2.1] - 2021/04/15 33 | 34 | - Fix an issue where the Content-Length header could be potentially mispelled 35 | [#124](https://github.com/Kong/kong-plugin-prometheus/pull/124) 36 | 37 | ## [1.2.0] - 2021/03/24 38 | 39 | - Fix an issue where there's no stream listener or stream API is not available, 40 | /metrics endpoint may timeout [#108](https://github.com/Kong/kong-plugin-prometheus/pull/108) 41 | - Export per-consumer status [#115](https://github.com/Kong/kong-plugin-prometheus/pull/115) 42 | (Thanks, [samsk](https://github.com/samsk)!) 43 | 44 | ## [1.1.0] - 2021/03/03 45 | 46 | - Export Kong Enterprise Edition licensing information. 47 | [#110](https://github.com/Kong/kong-plugin-prometheus/pull/110) 48 | 49 | ## [1.0.0] - 2020/08/20 50 | 51 | - Change handler to use Kong PDK function kong.log.serialize instead of using 52 | a deprecated basic serializer. 53 | 54 | ## [0.9.0] - 2020/06/17 55 | 56 | - Expose healthiness of upstream targets 57 | (Thanks, [carnei-ro](https://github.com/carnei-ro)!) 58 | [#88](https://github.com/Kong/kong-plugin-prometheus/pull/88) 59 | - Fix a typo on the dashboard 60 | (Thanks, [Monska85](https://github.com/Monska85)!) 61 | 62 | ## [0.8.0] - 2020/04/24 63 | 64 | - Expose the `prometheus` object for custom metrics 65 | [#78](https://github.com/Kong/kong-plugin-prometheus/pull/78) 66 | - Significant performance enhancements; expect manifolds improvements in 67 | Kong's throughput while using the plugin and reduction in CPU usage while 68 | memory usage is expected to go up. 69 | [#79](https://github.com/Kong/kong-plugin-prometheus/pull/79) 70 | 71 | ## [0.7.1] - 2020/01/05 72 | 73 | - Fix `full_metric_name` function was not accessible 74 | - Fix linting issues 75 | 76 | ## [0.7.0] - 2019/12/04 77 | 78 | - **Performance improvements:** Reduced the number of writes (and hence locks) 79 | to the shared dictionary using lua-resty-counter library. 80 | (Status API is being shipped with Kong 1.4). 81 | [#69](https://github.com/Kong/kong-plugin-prometheus/pull/69) 82 | - Update schema for the plugin for Kong 2.0 compatibility 83 | [#72](https://github.com/Kong/kong-plugin-prometheus/pull/72) 84 | 85 | ## [0.6.0] - 2019/09/29 86 | 87 | - **Metrics on Status API:** Metrics are now be available on the Status API 88 | (Status API is being shipped with Kong 1.4). 89 | [#66](https://github.com/Kong/kong-plugin-prometheus/pull/66) 90 | 91 | ## [0.5.0] - 2019/09/16 92 | 93 | - **Route based metrics:** All proxy metrics now contain a tag with the name 94 | or ID of the route. 95 | [#40](https://github.com/Kong/kong-plugin-prometheus/issues/40) 96 | - **New metrics releated to Kong's memory usage:** 97 | New metrics related to Kong's shared dictionaries 98 | and Lua VMs are now available 99 | [#62](https://github.com/Kong/kong-plugin-prometheus/pull/62): 100 | - per worker Lua VM allocated bytes (`kong_memory_workers_lua_vms_bytes`) 101 | - shm capacity and bytes allocated (`kong_memory_lua_shared_dict_bytes` and 102 | `kong_memory_lua_shared_dict_total_bytes`) 103 | - Performance has been improved by avoiding unnecessary timer creation. 104 | This will lower the impact of the plugin on Kong's overall latency. 105 | [#60](https://github.com/Kong/kong-plugin-prometheus/pull/60) 106 | - Tests to ensure gRPC compatibility have been added. 107 | [#57](https://github.com/Kong/kong-plugin-prometheus/pull/57) 108 | 109 | ## [0.4.1] - 2019/08/01 110 | 111 | - Fix issue where the plugin's shared dictionary would not be properly 112 | initialized 113 | 114 | ## [0.4.0] - 2019/06/05 115 | 116 | - Remove BasePlugin inheritance (not needed anymore) 117 | 118 | ## [0.3.4] - 2018/12/17 119 | 120 | - Drop the use of `kong.tools.responses` module for 121 | Kong 1.0 compatibility. 122 | [#34](https://github.com/Kong/kong-plugin-prometheus/pull/34) 123 | 124 | ## [0.3.3] - 2018/12/14 125 | 126 | - Do not attempt to send HTTP status code after the body has been sent 127 | while serving `/metrics`. This would result in error being logged in Kong. 128 | [#33](https://github.com/Kong/kong-plugin-prometheus/pull/33) 129 | 130 | ## [0.3.2] - 2018/11/01 131 | 132 | - Fix a nil pointer de-reference bug when no routes are matched in Kong. 133 | [#28](https://github.com/Kong/kong-plugin-prometheus/pull/28) 134 | 135 | ## [0.3.1] - 2018/10/17 136 | 137 | - Fix bugs introduced in 0.3.0 due to incorrect PDK function calls 138 | Thank you @kikito for the fix! 139 | [#26](https://github.com/Kong/kong-plugin-prometheus/pull/26) 140 | 141 | ## [0.3.0] - 2018/10/15 142 | 143 | - This release has no user facing changes but has under the hood 144 | changes for upcoming Kong 1.0.0 release. 145 | - Migrated schema and API endpoint of the plugin to the new DAO and 146 | use PDK functions where possible. 147 | Thank you @kikito for the contribution! 148 | [#24](https://github.com/Kong/kong-plugin-prometheus/pull/24) 149 | 150 | ## [0.2.0] - 2018/09/24 151 | 152 | - :warning: Dropped metrics that were aggregated across services in Kong. 153 | These metrics can be obtained much more efficiently using queries in Prometheus. 154 | [#8](https://github.com/Kong/kong-plugin-prometheus/pull/8) 155 | 156 | ## [0.1.0] - 2018/06/15 157 | 158 | - Initial release of Prometheus plugin for Kong. 159 | 160 | [1.3.0]: https://github.com/Kong/kong-plugin-prometheus/compare/1.2.1...1.3.0 161 | [1.2.1]: https://github.com/Kong/kong-plugin-prometheus/compare/1.2.0...1.2.1 162 | [1.2.0]: https://github.com/Kong/kong-plugin-prometheus/compare/1.1.0...1.2.0 163 | [1.1.0]: https://github.com/Kong/kong-plugin-prometheus/compare/1.0.0...1.1.0 164 | [1.0.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.9.0...1.0.0 165 | [0.9.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.8.0...0.9.0 166 | [0.8.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.7.1...0.8.0 167 | [0.7.1]: https://github.com/Kong/kong-plugin-prometheus/compare/0.7.0...0.7.1 168 | [0.7.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.6.0...0.7.0 169 | [0.6.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.5.0...0.6.0 170 | [0.5.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.4.1...0.5.0 171 | [0.4.1]: https://github.com/Kong/kong-plugin-prometheus/compare/0.4.0...0.4.1 172 | [0.4.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.3.4...0.4.0 173 | [0.3.4]: https://github.com/Kong/kong-plugin-prometheus/compare/0.3.3...0.3.4 174 | [0.3.3]: https://github.com/Kong/kong-plugin-prometheus/compare/0.3.2...0.3.3 175 | [0.3.2]: https://github.com/Kong/kong-plugin-prometheus/compare/0.3.1...0.3.2 176 | [0.3.1]: https://github.com/Kong/kong-plugin-prometheus/compare/0.3.0...0.3.1 177 | [0.3.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.2.0...0.3.0 178 | [0.2.0]: https://github.com/Kong/kong-plugin-prometheus/compare/0.1.0...0.2.0 179 | [0.1.0]: https://github.com/Kong/kong-plugin-prometheus/commit/dc81ea15bd2b331beb8f59176e3ce0fd9007ec03 180 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2018 Kong Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kong Prometheus Plugin 2 | 3 | ## :warning: This plugin is now maintained as part of [Kong](https://github.com/Kong/kong). Please open Issues and PRs in that repository 4 | 5 | ## :open_book: For documentation, please visit https://docs.konghq.com/hub/kong-inc/prometheus 6 | -------------------------------------------------------------------------------- /grafana/README.md: -------------------------------------------------------------------------------- 1 | # Grafana integration 2 | 3 | kong-official.json is the source of the Grafana dashboard at 4 | https://grafana.com/grafana/dashboards/7424 5 | 6 | The copy in this repository and the copy on Grafana Labs should be kept in 7 | sync. Currently, this must be handled manually: if you make changes here, you 8 | will need to log in to Grafana labs and upload the new version, or vice-versa. 9 | -------------------------------------------------------------------------------- /kong-prometheus-plugin-1.3.0-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "kong-prometheus-plugin" 2 | version = "1.3.0-1" 3 | 4 | source = { 5 | url = "git://github.com/Kong/kong-plugin-prometheus", 6 | tag = "1.3.0" 7 | } 8 | 9 | supported_platforms = {"linux", "macosx"} 10 | description = { 11 | summary = "Prometheus metrics for Kong and upstreams configured in Kong", 12 | license = "Apache 2.0", 13 | } 14 | 15 | dependencies = { 16 | "lua-resty-counter >= 0.2.0", 17 | } 18 | 19 | build = { 20 | type = "builtin", 21 | modules = { 22 | ["kong.plugins.prometheus.api"] = "kong/plugins/prometheus/api.lua", 23 | ["kong.plugins.prometheus.status_api"] = "kong/plugins/prometheus/status_api.lua", 24 | ["kong.plugins.prometheus.exporter"] = "kong/plugins/prometheus/exporter.lua", 25 | ["kong.plugins.prometheus.enterprise.exporter"] = "kong/plugins/prometheus/enterprise/exporter.lua", 26 | ["kong.plugins.prometheus.handler"] = "kong/plugins/prometheus/handler.lua", 27 | ["kong.plugins.prometheus.prometheus"] = "kong/plugins/prometheus/prometheus.lua", 28 | ["kong.plugins.prometheus.serve"] = "kong/plugins/prometheus/serve.lua", 29 | ["kong.plugins.prometheus.schema"] = "kong/plugins/prometheus/schema.lua", 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/api.lua: -------------------------------------------------------------------------------- 1 | local prometheus = require "kong.plugins.prometheus.exporter" 2 | 3 | local printable_metric_data = function() 4 | return table.concat(prometheus.metric_data(), "") 5 | end 6 | 7 | return { 8 | ["/metrics"] = { 9 | GET = function() 10 | prometheus.collect() 11 | end, 12 | }, 13 | 14 | _stream = ngx.config.subsystem == "stream" and printable_metric_data or nil, 15 | } 16 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/enterprise/exporter.lua: -------------------------------------------------------------------------------- 1 | local kong = kong 2 | local sub = string.sub 3 | local split = require('kong.tools.utils').split 4 | 5 | local metrics = {} 6 | 7 | 8 | local function init(prometheus) 9 | metrics.license_errors = prometheus:counter("enterprise_license_errors", 10 | "Errors when collecting license info") 11 | metrics.license_signature = prometheus:gauge("enterprise_license_signature", 12 | "Last 32 bytes of the license signature in number") 13 | metrics.license_expiration = prometheus:gauge("enterprise_license_expiration", 14 | "Unix epoch time when the license expires, " .. 15 | "the timestamp is substracted by 24 hours ".. 16 | "to avoid difference in timezone") 17 | metrics.license_features = prometheus:gauge("enterprise_license_features", 18 | "License features features", 19 | { "feature" }) 20 | 21 | prometheus.dict:set("enterprise_license_errors", 0) 22 | end 23 | 24 | local function license_date_to_unix(yyyy_mm_dd) 25 | local date_t = split(yyyy_mm_dd, "-") 26 | 27 | local ok, res = pcall(os.time, { 28 | year = tonumber(date_t[1]), 29 | month = tonumber(date_t[2]), 30 | day = tonumber(date_t[3]) 31 | }) 32 | if ok then 33 | return res 34 | end 35 | 36 | return nil, res 37 | end 38 | 39 | local function metric_data() 40 | if not metrics then 41 | kong.log.err("prometheus: plugin is not initialized, please make sure ", 42 | " 'prometheus_metrics' shared dict is present in nginx template") 43 | return kong.response.exit(500, { message = "An unexpected error occurred" }) 44 | end 45 | 46 | if not kong.license or not kong.license.license then 47 | metrics.license_errors:inc() 48 | kong.log.err("cannot read kong.license when collecting license info") 49 | return 50 | end 51 | 52 | local lic = kong.license.license 53 | 54 | if tonumber(lic.version) ~= 1 then 55 | metrics.license_errors:inc() 56 | kong.log.err("enterprise license version (" .. (lic.version or "nil") .. ") unsupported") 57 | return 58 | end 59 | 60 | local sig = lic.signature 61 | if not sig then 62 | metrics.license_errors:inc() 63 | kong.log.err("cannot read license signature when collecting license info") 64 | return 65 | end 66 | -- last 32 bytes as an int32 67 | metrics.license_signature:set(tonumber("0x" .. sub(sig, #sig-33, #sig))) 68 | 69 | local expiration = lic.payload and lic.payload.license_expiration_date 70 | if not expiration then 71 | metrics.license_errors:inc() 72 | kong.log.err("cannot read license expiration when collecting license info") 73 | return 74 | end 75 | local tm, err = license_date_to_unix(expiration) 76 | if not tm then 77 | metrics.license_errors:inc() 78 | kong.log.err("cannot parse license expiration when collecting license info ", err) 79 | return 80 | end 81 | -- substract it by 24h so everyone one earth is happy monitoring it 82 | metrics.license_expiration:set(tm - 86400) 83 | 84 | 85 | metrics.license_features:set(kong.licensing:can("ee_plugins") and 1 or 0, 86 | { "ee_plugins" }) 87 | 88 | metrics.license_features:set(kong.licensing:can("write_admin_api") and 1 or 0, 89 | { "write_admin_api" }) 90 | end 91 | 92 | 93 | return { 94 | init = init, 95 | metric_data = metric_data, 96 | } 97 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/exporter.lua: -------------------------------------------------------------------------------- 1 | local kong = kong 2 | local ngx = ngx 3 | local find = string.find 4 | local lower = string.lower 5 | local concat = table.concat 6 | local select = select 7 | local balancer = require("kong.runloop.balancer") 8 | local get_all_upstreams = balancer.get_all_upstreams 9 | if not balancer.get_all_upstreams then -- API changed since after Kong 2.5 10 | get_all_upstreams = require("kong.runloop.balancer.upstreams").get_all_upstreams 11 | end 12 | 13 | local CLUSTERING_SYNC_STATUS = require("kong.constants").CLUSTERING_SYNC_STATUS 14 | 15 | local stream_available, stream_api = pcall(require, "kong.tools.stream_api") 16 | 17 | local role = kong.configuration.role 18 | 19 | local DEFAULT_BUCKETS = { 1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 20 | 80, 90, 100, 200, 300, 400, 500, 1000, 21 | 2000, 5000, 10000, 30000, 60000 } 22 | local metrics = {} 23 | -- prometheus.lua instance 24 | local prometheus 25 | 26 | -- use the same counter library shipped with Kong 27 | package.loaded['prometheus_resty_counter'] = require("resty.counter") 28 | 29 | local enterprise 30 | local pok = pcall(require, "kong.enterprise_edition.licensing") 31 | if pok then 32 | enterprise = require("kong.plugins.prometheus.enterprise.exporter") 33 | end 34 | 35 | local kong_subsystem = ngx.config.subsystem 36 | 37 | local function init() 38 | local shm = "prometheus_metrics" 39 | if not ngx.shared.prometheus_metrics then 40 | kong.log.err("prometheus: ngx shared dict 'prometheus_metrics' not found") 41 | return 42 | end 43 | 44 | prometheus = require("kong.plugins.prometheus.prometheus").init(shm, "kong_") 45 | 46 | -- global metrics 47 | if kong_subsystem == "http" then 48 | metrics.connections = prometheus:gauge("nginx_http_current_connections", 49 | "Number of HTTP connections", 50 | {"state"}) 51 | else 52 | metrics.connections = prometheus:gauge("nginx_stream_current_connections", 53 | "Number of Stream connections", 54 | {"state"}) 55 | end 56 | metrics.db_reachable = prometheus:gauge("datastore_reachable", 57 | "Datastore reachable from Kong, " .. 58 | "0 is unreachable") 59 | metrics.upstream_target_health = prometheus:gauge("upstream_target_health", 60 | "Health status of targets of upstream. " .. 61 | "States = healthchecks_off|healthy|unhealthy|dns_error, " .. 62 | "value is 1 when state is populated.", 63 | {"upstream", "target", "address", "state"}) 64 | 65 | local memory_stats = {} 66 | memory_stats.worker_vms = prometheus:gauge("memory_workers_lua_vms_bytes", 67 | "Allocated bytes in worker Lua VM", 68 | {"pid", "kong_subsystem"}) 69 | memory_stats.shms = prometheus:gauge("memory_lua_shared_dict_bytes", 70 | "Allocated slabs in bytes in a shared_dict", 71 | {"shared_dict", "kong_subsystem"}) 72 | memory_stats.shm_capacity = prometheus:gauge("memory_lua_shared_dict_total_bytes", 73 | "Total capacity in bytes of a shared_dict", 74 | {"shared_dict", "kong_subsystem"}) 75 | 76 | local res = kong.node.get_memory_stats() 77 | for shm_name, value in pairs(res.lua_shared_dicts) do 78 | memory_stats.shm_capacity:set(value.capacity, { shm_name, kong_subsystem }) 79 | end 80 | 81 | metrics.memory_stats = memory_stats 82 | 83 | -- per service/route 84 | if kong_subsystem == "http" then 85 | metrics.status = prometheus:counter("http_status", 86 | "HTTP status codes per service/route in Kong", 87 | {"service", "route", "code"}) 88 | else 89 | metrics.status = prometheus:counter("stream_status", 90 | "Stream status codes per service/route in Kong", 91 | {"service", "route", "code"}) 92 | end 93 | metrics.latency = prometheus:histogram("latency", 94 | "Latency added by Kong, total " .. 95 | "request time and upstream latency " .. 96 | "for each service/route in Kong", 97 | {"service", "route", "type"}, 98 | DEFAULT_BUCKETS) -- TODO make this configurable 99 | metrics.bandwidth = prometheus:counter("bandwidth", 100 | "Total bandwidth in bytes " .. 101 | "consumed per service/route in Kong", 102 | {"service", "route", "type"}) 103 | metrics.consumer_status = prometheus:counter("http_consumer_status", 104 | "HTTP status codes for customer per service/route in Kong", 105 | {"service", "route", "code", "consumer"}) 106 | 107 | if enterprise then 108 | enterprise.init(prometheus) 109 | end 110 | 111 | 112 | -- Hybrid mode status 113 | if role == "control_plane" then 114 | metrics.data_plane_last_seen = prometheus:gauge("data_plane_last_seen", 115 | "Last time data plane contacted control plane", 116 | {"node_id", "hostname", "ip"}) 117 | metrics.data_plane_config_hash = prometheus:gauge("data_plane_config_hash", 118 | "Config hash numeric value of the data plane", 119 | {"node_id", "hostname", "ip"}) 120 | 121 | metrics.data_plane_version_compatible = prometheus:gauge("data_plane_version_compatible", 122 | "Version compatible status of the data plane, 0 is incompatible", 123 | {"node_id", "hostname", "ip", "kong_version"}) 124 | end 125 | end 126 | 127 | local function init_worker() 128 | prometheus:init_worker() 129 | end 130 | 131 | -- Convert the MD5 hex string to its numeric representation 132 | -- Note the following will be represented as a float instead of int64 since luajit 133 | -- don't like int64. Good news is prometheus uses float instead of int64 as well 134 | local function config_hash_to_number(hash_str) 135 | return tonumber("0x" .. hash_str) 136 | end 137 | 138 | -- Since in the prometheus library we create a new table for each diverged label 139 | -- so putting the "more dynamic" label at the end will save us some memory 140 | local labels_table = {0, 0, 0} 141 | local labels_table4 = {0, 0, 0, 0} 142 | local upstream_target_addr_health_table = { 143 | { value = 0, labels = { 0, 0, 0, "healthchecks_off" } }, 144 | { value = 0, labels = { 0, 0, 0, "healthy" } }, 145 | { value = 0, labels = { 0, 0, 0, "unhealthy" } }, 146 | { value = 0, labels = { 0, 0, 0, "dns_error" } }, 147 | } 148 | 149 | local function set_healthiness_metrics(table, upstream, target, address, status, metrics_bucket) 150 | for i = 1, #table do 151 | table[i]['labels'][1] = upstream 152 | table[i]['labels'][2] = target 153 | table[i]['labels'][3] = address 154 | table[i]['value'] = (status == table[i]['labels'][4]) and 1 or 0 155 | metrics_bucket:set(table[i]['value'], table[i]['labels']) 156 | end 157 | end 158 | 159 | 160 | local log 161 | 162 | if kong_subsystem == "http" then 163 | function log(message, serialized) 164 | if not metrics then 165 | kong.log.err("prometheus: can not log metrics because of an initialization " 166 | .. "error, please make sure that you've declared " 167 | .. "'prometheus_metrics' shared dict in your nginx template") 168 | return 169 | end 170 | 171 | local service_name 172 | if message and message.service then 173 | service_name = message.service.name or message.service.host 174 | else 175 | -- do not record any stats if the service is not present 176 | return 177 | end 178 | 179 | local route_name 180 | if message and message.route then 181 | route_name = message.route.name or message.route.id 182 | end 183 | 184 | labels_table[1] = service_name 185 | labels_table[2] = route_name 186 | labels_table[3] = message.response.status 187 | metrics.status:inc(1, labels_table) 188 | 189 | local request_size = tonumber(message.request.size) 190 | if request_size and request_size > 0 then 191 | labels_table[3] = "ingress" 192 | metrics.bandwidth:inc(request_size, labels_table) 193 | end 194 | 195 | local response_size = tonumber(message.response.size) 196 | if response_size and response_size > 0 then 197 | labels_table[3] = "egress" 198 | metrics.bandwidth:inc(response_size, labels_table) 199 | end 200 | 201 | local request_latency = message.latencies.request 202 | if request_latency and request_latency >= 0 then 203 | labels_table[3] = "request" 204 | metrics.latency:observe(request_latency, labels_table) 205 | end 206 | 207 | local upstream_latency = message.latencies.proxy 208 | if upstream_latency ~= nil and upstream_latency >= 0 then 209 | labels_table[3] = "upstream" 210 | metrics.latency:observe(upstream_latency, labels_table) 211 | end 212 | 213 | local kong_proxy_latency = message.latencies.kong 214 | if kong_proxy_latency ~= nil and kong_proxy_latency >= 0 then 215 | labels_table[3] = "kong" 216 | metrics.latency:observe(kong_proxy_latency, labels_table) 217 | end 218 | 219 | if serialized.consumer ~= nil then 220 | labels_table4[1] = labels_table[1] 221 | labels_table4[2] = labels_table[2] 222 | labels_table4[3] = message.response.status 223 | labels_table4[4] = serialized.consumer 224 | metrics.consumer_status:inc(1, labels_table4) 225 | end 226 | end 227 | 228 | else 229 | function log(message) 230 | if not metrics then 231 | kong.log.err("prometheus: can not log metrics because of an initialization " 232 | .. "error, please make sure that you've declared " 233 | .. "'prometheus_metrics' shared dict in your nginx template") 234 | return 235 | end 236 | 237 | local service_name 238 | if message and message.service then 239 | service_name = message.service.name or message.service.host 240 | else 241 | -- do not record any stats if the service is not present 242 | return 243 | end 244 | 245 | local route_name 246 | if message and message.route then 247 | route_name = message.route.name or message.route.id 248 | end 249 | 250 | labels_table[1] = service_name 251 | labels_table[2] = route_name 252 | labels_table[3] = message.session.status 253 | metrics.status:inc(1, labels_table) 254 | 255 | local ingress_size = tonumber(message.session.received) 256 | if ingress_size and ingress_size > 0 then 257 | labels_table[3] = "ingress" 258 | metrics.bandwidth:inc(ingress_size, labels_table) 259 | end 260 | 261 | local egress_size = tonumber(message.session.sent) 262 | if egress_size and egress_size > 0 then 263 | labels_table[3] = "egress" 264 | metrics.bandwidth:inc(egress_size, labels_table) 265 | end 266 | 267 | local session_latency = message.latencies.session 268 | if session_latency and session_latency >= 0 then 269 | labels_table[3] = "request" 270 | metrics.latency:observe(session_latency, labels_table) 271 | end 272 | 273 | local kong_proxy_latency = message.latencies.kong 274 | if kong_proxy_latency ~= nil and kong_proxy_latency >= 0 then 275 | labels_table[3] = "kong" 276 | metrics.latency:observe(kong_proxy_latency, labels_table) 277 | end 278 | end 279 | end 280 | 281 | 282 | local function metric_data() 283 | if not prometheus or not metrics then 284 | kong.log.err("prometheus: plugin is not initialized, please make sure ", 285 | " 'prometheus_metrics' shared dict is present in nginx template") 286 | return kong.response.exit(500, { message = "An unexpected error occurred" }) 287 | end 288 | 289 | if ngx.location then 290 | local r = ngx.location.capture "/nginx_status" 291 | 292 | if r.status ~= 200 then 293 | kong.log.warn("prometheus: failed to retrieve /nginx_status ", 294 | "while processing /metrics endpoint") 295 | 296 | else 297 | local accepted, handled, total = select(3, find(r.body, 298 | "accepts handled requests\n (%d*) (%d*) (%d*)")) 299 | metrics.connections:set(accepted, { "accepted" }) 300 | metrics.connections:set(handled, { "handled" }) 301 | metrics.connections:set(total, { "total" }) 302 | end 303 | end 304 | 305 | metrics.connections:set(ngx.var.connections_active or 0, { "active" }) 306 | metrics.connections:set(ngx.var.connections_reading or 0, { "reading" }) 307 | metrics.connections:set(ngx.var.connections_writing or 0, { "writing" }) 308 | metrics.connections:set(ngx.var.connections_waiting or 0, { "waiting" }) 309 | 310 | -- db reachable? 311 | local ok, err = kong.db.connector:connect() 312 | if ok then 313 | metrics.db_reachable:set(1) 314 | 315 | else 316 | metrics.db_reachable:set(0) 317 | kong.log.err("prometheus: failed to reach database while processing", 318 | "/metrics endpoint: ", err) 319 | end 320 | 321 | -- erase all target/upstream metrics, prevent exposing old metrics 322 | metrics.upstream_target_health:reset() 323 | 324 | -- upstream targets accessible? 325 | local upstreams_dict = get_all_upstreams() 326 | for key, upstream_id in pairs(upstreams_dict) do 327 | local _, upstream_name = key:match("^([^:]*):(.-)$") 328 | upstream_name = upstream_name and upstream_name or key 329 | -- based on logic from kong.db.dao.targets 330 | local health_info 331 | health_info, err = balancer.get_upstream_health(upstream_id) 332 | if err then 333 | kong.log.err("failed getting upstream health: ", err) 334 | end 335 | 336 | if health_info then 337 | for target_name, target_info in pairs(health_info) do 338 | if target_info ~= nil and target_info.addresses ~= nil and 339 | #target_info.addresses > 0 then 340 | -- healthchecks_off|healthy|unhealthy 341 | for _, address in ipairs(target_info.addresses) do 342 | local address_label = concat({address.ip, ':', address.port}) 343 | local status = lower(address.health) 344 | set_healthiness_metrics(upstream_target_addr_health_table, upstream_name, target_name, address_label, status, metrics.upstream_target_health) 345 | end 346 | else 347 | -- dns_error 348 | set_healthiness_metrics(upstream_target_addr_health_table, upstream_name, target_name, '', 'dns_error', metrics.upstream_target_health) 349 | end 350 | end 351 | end 352 | end 353 | 354 | -- memory stats 355 | local res = kong.node.get_memory_stats() 356 | for shm_name, value in pairs(res.lua_shared_dicts) do 357 | metrics.memory_stats.shms:set(value.allocated_slabs, { shm_name, kong_subsystem }) 358 | end 359 | for i = 1, #res.workers_lua_vms do 360 | metrics.memory_stats.worker_vms:set(res.workers_lua_vms[i].http_allocated_gc, 361 | { res.workers_lua_vms[i].pid, kong_subsystem }) 362 | end 363 | 364 | if enterprise then 365 | enterprise.metric_data() 366 | end 367 | 368 | -- Hybrid mode status 369 | if role == "control_plane" then 370 | -- Cleanup old metrics 371 | metrics.data_plane_last_seen:reset() 372 | metrics.data_plane_config_hash:reset() 373 | 374 | for data_plane, err in kong.db.clustering_data_planes:each() do 375 | if err then 376 | kong.log.err("failed to list data planes: ", err) 377 | goto next_data_plane 378 | end 379 | 380 | local labels = { data_plane.id, data_plane.hostname, data_plane.ip } 381 | 382 | metrics.data_plane_last_seen:set(data_plane.last_seen, labels) 383 | metrics.data_plane_config_hash:set(config_hash_to_number(data_plane.config_hash), labels) 384 | 385 | labels[4] = data_plane.version 386 | local compatible = 1 387 | 388 | if data_plane.sync_status == CLUSTERING_SYNC_STATUS.KONG_VERSION_INCOMPATIBLE 389 | or data_plane.sync_status == CLUSTERING_SYNC_STATUS.PLUGIN_SET_INCOMPATIBLE 390 | or data_plane.sync_status == CLUSTERING_SYNC_STATUS.PLUGIN_VERSION_INCOMPATIBLE then 391 | 392 | compatible = 0 393 | end 394 | metrics.data_plane_version_compatible:set(compatible, labels) 395 | 396 | ::next_data_plane:: 397 | end 398 | end 399 | 400 | return prometheus:metric_data() 401 | end 402 | 403 | local function collect(with_stream) 404 | ngx.header["Content-Type"] = "text/plain; charset=UTF-8" 405 | 406 | ngx.print(metric_data()) 407 | 408 | -- only gather stream metrics if stream_api module is avaiable 409 | -- and user has configured at least one stream listeners 410 | if stream_available and #kong.configuration.stream_listeners > 0 then 411 | local res, err = stream_api.request("prometheus", "") 412 | if err then 413 | kong.log.err("failed to collect stream metrics: ", err) 414 | else 415 | ngx.print(res) 416 | end 417 | end 418 | end 419 | 420 | local function get_prometheus() 421 | if not prometheus then 422 | kong.log.err("prometheus: plugin is not initialized, please make sure ", 423 | " 'prometheus_metrics' shared dict is present in nginx template") 424 | end 425 | return prometheus 426 | end 427 | 428 | return { 429 | init = init, 430 | init_worker = init_worker, 431 | log = log, 432 | metric_data = metric_data, 433 | collect = collect, 434 | get_prometheus = get_prometheus, 435 | } 436 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/handler.lua: -------------------------------------------------------------------------------- 1 | local prometheus = require "kong.plugins.prometheus.exporter" 2 | local kong = kong 3 | 4 | 5 | prometheus.init() 6 | 7 | 8 | local PrometheusHandler = { 9 | PRIORITY = 13, 10 | VERSION = "1.3.0", 11 | } 12 | 13 | function PrometheusHandler.init_worker() 14 | prometheus.init_worker() 15 | end 16 | 17 | 18 | function PrometheusHandler.log(self, conf) 19 | local message = kong.log.serialize() 20 | 21 | local serialized = {} 22 | if conf.per_consumer and message.consumer ~= nil then 23 | serialized.consumer = message.consumer.username 24 | end 25 | 26 | prometheus.log(message, serialized) 27 | end 28 | 29 | 30 | return PrometheusHandler 31 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/prometheus.lua: -------------------------------------------------------------------------------- 1 | --- @module Prometheus 2 | -- 3 | -- vim: ts=2:sw=2:sts=2:expandtab:textwidth=80 4 | -- This module uses a single dictionary shared between Nginx workers to keep 5 | -- all metrics. Each metric is stored as a separate entry in that dictionary. 6 | -- 7 | -- In addition, each worker process has a separate set of counters within 8 | -- its lua runtime that are used to track increments to counte metrics, and 9 | -- are regularly flushed into the main shared dictionary. This is a performance 10 | -- optimization that allows counters to be incremented without locking the 11 | -- shared dictionary. It also means that counter increments are "eventually 12 | -- consistent"; it can take up to a single counter sync interval (which 13 | -- defaults to 1 second) for counter values to be visible for collection. 14 | -- 15 | -- Prometheus requires that (a) all samples for a given metric are presented 16 | -- as one uninterrupted group, and (b) buckets of a histogram appear in 17 | -- increasing numerical order. We satisfy that by carefully constructing full 18 | -- metric names (i.e. metric name along with all labels) so that they meet 19 | -- those requirements while being sorted alphabetically. In particular: 20 | -- 21 | -- * all labels for a given metric are presented in reproducible order (the one 22 | -- used when labels were declared). "le" label for histogram metrics always 23 | -- goes last; 24 | -- * bucket boundaries (which are exposed as values of the "le" label) are 25 | -- stored as floating point numbers with leading and trailing zeroes, 26 | -- and those zeros would be removed just before we expose the metrics; 27 | -- * internally "+Inf" bucket is stored as "Inf" (to make it appear after 28 | -- all numeric buckets), and gets replaced by "+Inf" just before we 29 | -- expose the metrics. 30 | -- 31 | -- For example, if you define your bucket boundaries as {0.00005, 10, 1000} 32 | -- then we will keep the following samples for a metric `m1` with label 33 | -- `site` set to `site1`: 34 | -- 35 | -- m1_bucket{site="site1",le="0000.00005"} 36 | -- m1_bucket{site="site1",le="0010.00000"} 37 | -- m1_bucket{site="site1",le="1000.00000"} 38 | -- m1_bucket{site="site1",le="Inf"} 39 | -- m1_count{site="site1"} 40 | -- m1_sum{site="site1"} 41 | -- 42 | -- And when exposing the metrics, their names will be changed to: 43 | -- 44 | -- m1_bucket{site="site1",le="0.00005"} 45 | -- m1_bucket{site="site1",le="10"} 46 | -- m1_bucket{site="site1",le="1000"} 47 | -- m1_bucket{site="site1",le="+Inf"} 48 | -- m1_count{site="site1"} 49 | -- m1_sum{site="site1"} 50 | -- 51 | -- You can find the latest version and documentation at 52 | -- https://github.com/knyar/nginx-lua-prometheus 53 | -- Released under MIT license. 54 | 55 | -- This library provides per-worker counters used to store counter metric 56 | -- increments. Copied from https://github.com/Kong/lua-resty-counter 57 | local resty_counter_lib = require("prometheus_resty_counter") 58 | 59 | local Prometheus = {} 60 | local mt = { __index = Prometheus } 61 | 62 | local TYPE_COUNTER = 0x1 63 | local TYPE_GAUGE = 0x2 64 | local TYPE_HISTOGRAM = 0x4 65 | local TYPE_LITERAL = { 66 | [TYPE_COUNTER] = "counter", 67 | [TYPE_GAUGE] = "gauge", 68 | [TYPE_HISTOGRAM] = "histogram", 69 | } 70 | 71 | -- Default name for error metric incremented by this library. 72 | local DEFAULT_ERROR_METRIC_NAME = "nginx_metric_errors_total" 73 | 74 | -- Default value for per-worker counter sync interval (seconds). 75 | local DEFAULT_SYNC_INTERVAL = 1 76 | 77 | -- Default set of latency buckets, 5ms to 10s: 78 | local DEFAULT_BUCKETS = {0.005, 0.01, 0.02, 0.03, 0.05, 0.075, 0.1, 0.2, 0.3, 79 | 0.4, 0.5, 0.75, 1, 1.5, 2, 3, 4, 5, 10} 80 | 81 | 82 | -- Accepted range of byte values for tailing bytes of utf8 strings. 83 | -- This is defined outside of the validate_utf8_string function as a const 84 | -- variable to avoid creating and destroying table frequently. 85 | -- Values in this table (and in validate_utf8_string) are from table 3-7 of 86 | -- www.unicode.org/versions/Unicode6.2.0/UnicodeStandard-6.2.pdf 87 | local accept_range = { 88 | {lo = 0x80, hi = 0xBF}, 89 | {lo = 0xA0, hi = 0xBF}, 90 | {lo = 0x80, hi = 0x9F}, 91 | {lo = 0x90, hi = 0xBF}, 92 | {lo = 0x80, hi = 0x8F} 93 | } 94 | 95 | -- Validate utf8 string for label values. 96 | -- 97 | -- Args: 98 | -- str: string 99 | -- 100 | -- Returns: 101 | -- (bool) whether the input string is a valid utf8 string. 102 | -- (number) position of the first invalid byte. 103 | local function validate_utf8_string(str) 104 | local i, n = 1, #str 105 | local first, byte, left_size, range_idx 106 | while i <= n do 107 | first = string.byte(str, i) 108 | if first >= 0x80 then 109 | range_idx = 1 110 | if first >= 0xC2 and first <= 0xDF then -- 2 bytes 111 | left_size = 1 112 | elseif first >= 0xE0 and first <= 0xEF then -- 3 bytes 113 | left_size = 2 114 | if first == 0xE0 then 115 | range_idx = 2 116 | elseif first == 0xED then 117 | range_idx = 3 118 | end 119 | elseif first >= 0xF0 and first <= 0xF4 then -- 4 bytes 120 | left_size = 3 121 | if first == 0xF0 then 122 | range_idx = 4 123 | elseif first == 0xF4 then 124 | range_idx = 5 125 | end 126 | else 127 | return false, i 128 | end 129 | 130 | if i + left_size > n then 131 | return false, i 132 | end 133 | 134 | for j = 1, left_size do 135 | byte = string.byte(str, i + j) 136 | if byte < accept_range[range_idx].lo or byte > accept_range[range_idx].hi then 137 | return false, i 138 | end 139 | range_idx = 1 140 | end 141 | i = i + left_size 142 | end 143 | i = i + 1 144 | end 145 | return true 146 | end 147 | 148 | -- Generate full metric name that includes all labels. 149 | -- 150 | -- Args: 151 | -- name: string 152 | -- label_names: (array) a list of label keys. 153 | -- label_values: (array) a list of label values. 154 | -- 155 | -- Returns: 156 | -- (string) full metric name. 157 | local function full_metric_name(name, label_names, label_values) 158 | if not label_names then 159 | return name 160 | end 161 | local label_parts = {} 162 | for idx, key in ipairs(label_names) do 163 | local label_value 164 | if type(label_values[idx]) == "string" then 165 | local valid, pos = validate_utf8_string(label_values[idx]) 166 | if not valid then 167 | label_value = string.sub(label_values[idx], 1, pos - 1) 168 | :gsub("\\", "\\\\") 169 | :gsub('"', '\\"') 170 | else 171 | label_value = label_values[idx] 172 | :gsub("\\", "\\\\") 173 | :gsub('"', '\\"') 174 | end 175 | else 176 | label_value = tostring(label_values[idx]) 177 | end 178 | table.insert(label_parts, key .. '="' .. label_value .. '"') 179 | end 180 | return name .. "{" .. table.concat(label_parts, ",") .. "}" 181 | end 182 | 183 | -- Extract short metric name from the full one. 184 | -- 185 | -- This function is only used by Prometheus:metric_data. 186 | -- 187 | -- Args: 188 | -- full_name: (string) full metric name that can include labels. 189 | -- 190 | -- Returns: 191 | -- (string) short metric name with no labels. For a `*_bucket` metric of 192 | -- histogram the _bucket suffix will be removed. 193 | local function short_metric_name(full_name) 194 | local labels_start, _ = full_name:find("{") 195 | if not labels_start then 196 | return full_name 197 | end 198 | -- Try to detect if this is a histogram metric. We only check for the 199 | -- `_bucket` suffix here, since it alphabetically goes before other 200 | -- histogram suffixes (`_count` and `_sum`). 201 | local suffix_idx, _ = full_name:find("_bucket{") 202 | if suffix_idx and full_name:find("le=") then 203 | -- this is a histogram metric 204 | return full_name:sub(1, suffix_idx - 1) 205 | end 206 | -- this is not a histogram metric 207 | return full_name:sub(1, labels_start - 1) 208 | end 209 | 210 | -- Check metric name and label names for correctness. 211 | -- 212 | -- Regular expressions to validate metric and label names are 213 | -- documented in https://prometheus.io/docs/concepts/data_model/ 214 | -- 215 | -- Args: 216 | -- metric_name: (string) metric name. 217 | -- label_names: label names (array of strings). 218 | -- 219 | -- Returns: 220 | -- Either an error string, or nil of no errors were found. 221 | local function check_metric_and_label_names(metric_name, label_names) 222 | if not metric_name:match("^[a-zA-Z_:][a-zA-Z0-9_:]*$") then 223 | return "Metric name '" .. metric_name .. "' is invalid" 224 | end 225 | for _, label_name in ipairs(label_names or {}) do 226 | if label_name == "le" then 227 | return "Invalid label name 'le' in " .. metric_name 228 | end 229 | if not label_name:match("^[a-zA-Z_][a-zA-Z0-9_]*$") then 230 | return "Metric '" .. metric_name .. "' label name '" .. label_name .. 231 | "' is invalid" 232 | end 233 | end 234 | end 235 | 236 | -- Construct bucket format for a list of buckets. 237 | -- 238 | -- This receives a list of buckets and returns a sprintf template that should 239 | -- be used for bucket boundaries to make them come in increasing order when 240 | -- sorted alphabetically. 241 | -- 242 | -- To re-phrase, this is where we detect how many leading and trailing zeros we 243 | -- need. 244 | -- 245 | -- Args: 246 | -- buckets: a list of buckets 247 | -- 248 | -- Returns: 249 | -- (string) a sprintf template. 250 | local function construct_bucket_format(buckets) 251 | local max_order = 1 252 | local max_precision = 1 253 | for _, bucket in ipairs(buckets) do 254 | assert(type(bucket) == "number", "bucket boundaries should be numeric") 255 | -- floating point number with all trailing zeros removed 256 | local as_string = string.format("%f", bucket):gsub("0*$", "") 257 | local dot_idx = as_string:find(".", 1, true) 258 | max_order = math.max(max_order, dot_idx - 1) 259 | max_precision = math.max(max_precision, as_string:len() - dot_idx) 260 | end 261 | return "%0" .. (max_order + max_precision + 1) .. "." .. max_precision .. "f" 262 | end 263 | 264 | -- Format bucket format when exposing metrics. 265 | -- 266 | -- This function removes leading and trailing zeroes from `le` label values. 267 | -- 268 | -- Args: 269 | -- key: the metric key 270 | -- 271 | -- Returns: 272 | -- (string) the formatted key 273 | local function fix_histogram_bucket_labels(key) 274 | local part1, bucket, part2 = key:match('(.*[,{]le=")(.*)(".*)') 275 | if part1 == nil then 276 | return key 277 | end 278 | 279 | if bucket == "Inf" then 280 | return table.concat({part1, "+Inf", part2}) 281 | else 282 | return table.concat({part1, tostring(tonumber(bucket)), part2}) 283 | end 284 | end 285 | 286 | -- Return a full metric name for a given metric+label combination. 287 | -- 288 | -- This function calculates a full metric name (or, in case of a histogram 289 | -- metric, several metric names) for a given combination of label values. It 290 | -- stores the result in a tree of tables used as a cache (self.lookup) and 291 | -- uses that cache to return results faster. 292 | -- 293 | -- Args: 294 | -- self: a `metric` object, created by register(). 295 | -- label_values: a list of label values. 296 | -- 297 | -- Returns: 298 | -- - If `self` is a counter or a gauge: full metric name as a string. 299 | -- - If `self` is a histogram metric: a list of strings: 300 | -- [0]: full name of the _count histogram metric; 301 | -- [1]: full name of the _sum histogram metric; 302 | -- [...]: full names of each _bucket metrics. 303 | local function lookup_or_create(self, label_values) 304 | -- If one of the `label_values` is nil, #label_values will return the number 305 | -- of non-nil labels in the beginning of the list. This will make us return an 306 | -- error here as well. 307 | local cnt = label_values and #label_values or 0 308 | -- specially, if first element is nil, # will treat it as "non-empty" 309 | if cnt ~= self.label_count or (self.label_count > 0 and not label_values[1]) then 310 | return nil, string.format("inconsistent labels count, expected %d, got %d", 311 | self.label_count, cnt) 312 | end 313 | local t = self.lookup 314 | if label_values then 315 | -- Don't use ipairs here to avoid inner loop generates trace first 316 | -- Otherwise the inner for loop below is likely to get JIT compiled before 317 | -- the outer loop which include `lookup_or_create`, in this case the trace 318 | -- for outer loop will be aborted. By not using ipairs, we will be able to 319 | -- compile longer traces as possible. 320 | local label 321 | for i=1, self.label_count do 322 | label = label_values[i] 323 | if not t[label] then 324 | t[label] = {} 325 | end 326 | t = t[label] 327 | end 328 | end 329 | 330 | local LEAF_KEY = mt -- key used to store full metric names in leaf tables. 331 | local full_name = t[LEAF_KEY] 332 | if full_name then 333 | return full_name 334 | end 335 | 336 | if self.typ == TYPE_HISTOGRAM then 337 | -- Pass empty metric name to full_metric_name to just get the formatted 338 | -- labels ({key1="value1",key2="value2",...}). 339 | local labels = full_metric_name("", self.label_names, label_values) 340 | full_name = { 341 | self.name .. "_count" .. labels, 342 | self.name .. "_sum" .. labels, 343 | } 344 | 345 | local bucket_pref 346 | if self.label_count > 0 then 347 | -- strip last } 348 | bucket_pref = self.name .. "_bucket" .. string.sub(labels, 1, #labels-1) .. "," 349 | else 350 | bucket_pref = self.name .. "_bucket{" 351 | end 352 | 353 | for i, buc in ipairs(self.buckets) do 354 | full_name[i+2] = string.format("%sle=\"%s\"}", bucket_pref, self.bucket_format:format(buc)) 355 | end 356 | -- Last bucket. Note, that the label value is "Inf" rather than "+Inf" 357 | -- required by Prometheus. This is necessary for this bucket to be the last 358 | -- one when all metrics are lexicographically sorted. "Inf" will get replaced 359 | -- by "+Inf" in Prometheus:metric_data(). 360 | full_name[self.bucket_count+3] = string.format("%sle=\"Inf\"}", bucket_pref) 361 | else 362 | full_name = full_metric_name(self.name, self.label_names, label_values) 363 | end 364 | t[LEAF_KEY] = full_name 365 | return full_name 366 | end 367 | 368 | -- Increment a gauge metric. 369 | -- 370 | -- Gauges are incremented in the dictionary directly to provide strong ordering 371 | -- of inc() and set() operations. 372 | -- 373 | -- Args: 374 | -- self: a `metric` object, created by register(). 375 | -- value: numeric value to increment by. Can be negative. 376 | -- label_values: a list of label values, in the same order as label keys. 377 | local function inc_gauge(self, value, label_values) 378 | local k, err, _ 379 | k, err = lookup_or_create(self, label_values) 380 | if err then 381 | self._log_error(err) 382 | return 383 | end 384 | 385 | _, err, _ = self._dict:incr(k, value, 0) 386 | if err then 387 | self._log_error_kv(k, value, err) 388 | end 389 | end 390 | 391 | local ERR_MSG_COUNTER_NOT_INITIALIZED = "counter not initialized! " .. 392 | "Have you called Prometheus:init() from the " .. 393 | "init_worker_by_lua_block nginx phase?" 394 | 395 | -- Increment a counter metric. 396 | -- 397 | -- Counters are incremented in the per-worker counter, which will eventually get 398 | -- flushed into the global shared dictionary. 399 | -- 400 | -- Args: 401 | -- self: a `metric` object, created by register(). 402 | -- value: numeric value to increment by. Can be negative. 403 | -- label_values: a list of label values, in the same order as label keys. 404 | local function inc_counter(self, value, label_values) 405 | -- counter is not allowed to decrease 406 | if value and value < 0 then 407 | self._log_error_kv(self.name, value, "Value should not be negative") 408 | return 409 | end 410 | 411 | local k, err 412 | k, err = lookup_or_create(self, label_values) 413 | if err then 414 | self._log_error(err) 415 | return 416 | end 417 | 418 | local c = self._counter 419 | if not c then 420 | c = self.parent._counter 421 | if not c then 422 | self._log_error(ERR_MSG_COUNTER_NOT_INITIALIZED) 423 | return 424 | end 425 | self._counter = c 426 | end 427 | c:incr(k, value) 428 | end 429 | 430 | -- Delete a counter or a gauge metric. 431 | -- 432 | -- Args: 433 | -- self: a `metric` object, created by register(). 434 | -- label_values: a list of label values, in the same order as label keys. 435 | local function del(self, label_values) 436 | local k, _, err 437 | k, err = lookup_or_create(self, label_values) 438 | if err then 439 | self._log_error(err) 440 | return 441 | end 442 | 443 | -- `del` might be called immediately after a configuration change that stops a 444 | -- given metric from being used, so we cannot guarantee that other workers 445 | -- don't have unflushed counter values for a metric that is about to be 446 | -- deleted. We wait for `sync_interval` here to ensure that those values are 447 | -- synced (and deleted from worker-local counters) before a given metric is 448 | -- removed. 449 | -- Gauge metrics don't use per-worker counters, so for gauges we don't need to 450 | -- wait for the counter to sync. 451 | if self.typ ~= TYPE_GAUGE then 452 | ngx.log(ngx.INFO, "waiting ", self.parent.sync_interval, "s for counter to sync") 453 | ngx.sleep(self.parent.sync_interval) 454 | end 455 | 456 | _, err = self._dict:delete(k) 457 | if err then 458 | self._log_error("Error deleting key: ".. k .. ": " .. err) 459 | end 460 | end 461 | 462 | -- Set the value of a gauge metric. 463 | -- 464 | -- Args: 465 | -- self: a `metric` object, created by register(). 466 | -- value: numeric value. 467 | -- label_values: a list of label values, in the same order as label keys. 468 | local function set(self, value, label_values) 469 | if not value then 470 | self._log_error("No value passed for " .. self.name) 471 | return 472 | end 473 | 474 | local k, _, err 475 | k, err = lookup_or_create(self, label_values) 476 | if err then 477 | self._log_error(err) 478 | return 479 | end 480 | _, err = self._dict:safe_set(k, value) 481 | if err then 482 | self._log_error_kv(k, value, err) 483 | end 484 | end 485 | 486 | -- Record a given value in a histogram. 487 | -- 488 | -- Args: 489 | -- self: a `metric` object, created by register(). 490 | -- value: numeric value to record. Should be defined. 491 | -- label_values: a list of label values, in the same order as label keys. 492 | local function observe(self, value, label_values) 493 | if not value then 494 | self._log_error("No value passed for " .. self.name) 495 | return 496 | end 497 | 498 | local keys, err = lookup_or_create(self, label_values) 499 | if err then 500 | self._log_error(err) 501 | return 502 | end 503 | 504 | local c = self._counter 505 | if not c then 506 | c = self.parent._counter 507 | if not c then 508 | self._log_error(ERR_MSG_COUNTER_NOT_INITIALIZED) 509 | return 510 | end 511 | self._counter = c 512 | end 513 | 514 | -- _count metric. 515 | c:incr(keys[1], 1) 516 | 517 | -- _sum metric. 518 | c:incr(keys[2], value) 519 | 520 | local seen = false 521 | -- check in reverse order, otherwise we will always 522 | -- need to traverse the whole table. 523 | for i=self.bucket_count, 1, -1 do 524 | if value <= self.buckets[i] then 525 | c:incr(keys[2+i], 1) 526 | seen = true 527 | elseif seen then 528 | break 529 | end 530 | end 531 | -- the last bucket (le="Inf"). 532 | c:incr(keys[self.bucket_count+3], 1) 533 | end 534 | 535 | -- Delete all metrics for a given gauge, counter or a histogram. 536 | -- 537 | -- This is like `del`, but will delete all time series for all previously 538 | -- recorded label values. 539 | -- 540 | -- Args: 541 | -- self: a `metric` object, created by register(). 542 | local function reset(self) 543 | -- Wait for other worker threads to sync their counters before removing the 544 | -- metric (please see `del` for a more detailed comment). 545 | -- Gauge metrics don't use per-worker counters, so for gauges we don't need to 546 | -- wait for the counter to sync. 547 | if self.typ ~= TYPE_GAUGE then 548 | ngx.log(ngx.INFO, "waiting ", self.parent.sync_interval, "s for counter to sync") 549 | ngx.sleep(self.parent.sync_interval) 550 | end 551 | 552 | local keys = self._dict:get_keys(0) 553 | local name_prefixes = {} 554 | local name_prefix_length_base = #self.name 555 | if self.typ == TYPE_HISTOGRAM then 556 | if self.label_count == 0 then 557 | name_prefixes[self.name .. "_count"] = name_prefix_length_base + 6 558 | name_prefixes[self.name .. "_sum"] = name_prefix_length_base + 4 559 | else 560 | name_prefixes[self.name .. "_count{"] = name_prefix_length_base + 7 561 | name_prefixes[self.name .. "_sum{"] = name_prefix_length_base + 5 562 | end 563 | name_prefixes[self.name .. "_bucket{"] = name_prefix_length_base + 8 564 | else 565 | name_prefixes[self.name .. "{"] = name_prefix_length_base + 1 566 | end 567 | 568 | for _, key in ipairs(keys) do 569 | local value, err = self._dict:get(key) 570 | if value then 571 | -- For a metric to be deleted its name should either match exactly, or 572 | -- have a prefix listed in `name_prefixes` (which ensures deletion of 573 | -- metrics with label values). 574 | local remove = key == self.name 575 | if not remove then 576 | for name_prefix, name_prefix_length in pairs(name_prefixes) do 577 | if name_prefix == string.sub(key, 1, name_prefix_length) then 578 | remove = true 579 | break 580 | end 581 | end 582 | end 583 | if remove then 584 | local _, err = self._dict:safe_set(key, nil) 585 | if err then 586 | self._log_error("Error resetting '", key, "': ", err) 587 | end 588 | end 589 | else 590 | self._log_error("Error getting '", key, "': ", err) 591 | end 592 | end 593 | 594 | -- Clean up the full metric name lookup table as well. 595 | self.lookup = {} 596 | end 597 | 598 | -- Initialize the module. 599 | -- 600 | -- This should be called once from the `init_by_lua` section in nginx 601 | -- configuration. 602 | -- 603 | -- Args: 604 | -- dict_name: (string) name of the nginx shared dictionary which will be 605 | -- used to store all metrics 606 | -- prefix: (optional string) if supplied, prefix is added to all 607 | -- metric names on output 608 | -- 609 | -- Returns: 610 | -- an object that should be used to register metrics. 611 | function Prometheus.init(dict_name, options_or_prefix) 612 | if ngx.get_phase() ~= 'init' and ngx.get_phase() ~= 'init_worker' and 613 | ngx.get_phase() ~= 'timer' then 614 | error('Prometheus.init can only be called from ' .. 615 | 'init_by_lua_block, init_worker_by_lua_block or timer' , 2) 616 | end 617 | 618 | local self = setmetatable({}, mt) 619 | dict_name = dict_name or "prometheus_metrics" 620 | self.dict_name = dict_name 621 | self.dict = ngx.shared[dict_name] 622 | if self.dict == nil then 623 | error("Dictionary '" .. dict_name .. "' does not seem to exist. " .. 624 | "Please define the dictionary using `lua_shared_dict`.", 2) 625 | end 626 | 627 | if type(options_or_prefix) == "table" then 628 | self.prefix = options_or_prefix.prefix or '' 629 | self.error_metric_name = options_or_prefix.error_metric_name or 630 | DEFAULT_ERROR_METRIC_NAME 631 | self.sync_interval = options_or_prefix.sync_interval or 632 | DEFAULT_SYNC_INTERVAL 633 | else 634 | self.prefix = options_or_prefix or '' 635 | self.error_metric_name = DEFAULT_ERROR_METRIC_NAME 636 | self.sync_interval = DEFAULT_SYNC_INTERVAL 637 | end 638 | 639 | self.registry = {} 640 | 641 | self.initialized = true 642 | 643 | self:counter(self.error_metric_name, "Number of nginx-lua-prometheus errors") 644 | self.dict:set(self.error_metric_name, 0) 645 | 646 | if ngx.get_phase() == 'init_worker' then 647 | self:init_worker(self.sync_interval) 648 | end 649 | return self 650 | end 651 | 652 | -- Initialize the worker counter. 653 | -- 654 | -- This can call this function from the `init_worker_by_lua` if you are calling 655 | -- Prometheus.init() from `init_by_lua`, but this is deprecated. Instead, just 656 | -- call Prometheus.init() from `init_worker_by_lua_block` and pass sync_interval 657 | -- as part of the `options` argument if you need. 658 | -- 659 | -- Args: 660 | -- sync_interval: per-worker counter sync interval (in seconds). 661 | function Prometheus:init_worker(sync_interval) 662 | if ngx.get_phase() ~= 'init_worker' then 663 | error('Prometheus:init_worker can only be called in ' .. 664 | 'init_worker_by_lua_block', 2) 665 | end 666 | if self._counter then 667 | ngx.log(ngx.WARN, 'init_worker() has been called twice. ' .. 668 | 'Please do not explicitly call init_worker. ' .. 669 | 'Instead, call Prometheus:init() in the init_worker_by_lua_block') 670 | return 671 | end 672 | self.sync_interval = sync_interval or DEFAULT_SYNC_INTERVAL 673 | local counter_instance, err = resty_counter_lib.new( 674 | self.dict_name, self.sync_interval) 675 | if err then 676 | error(err, 2) 677 | end 678 | self._counter = counter_instance 679 | end 680 | 681 | -- Register a new metric. 682 | -- 683 | -- Args: 684 | -- self: a Prometheus object. 685 | -- name: (string) name of the metric. Required. 686 | -- help: (string) description of the metric. Will be used for the HELP 687 | -- comment on the metrics page. Optional. 688 | -- label_names: array of strings, defining a list of metrics. Optional. 689 | -- buckets: array if numbers, defining bucket boundaries. Only used for 690 | -- histogram metrics. 691 | -- typ: metric type (one of the TYPE_* constants). 692 | -- 693 | -- Returns: 694 | -- a new metric object. 695 | local function register(self, name, help, label_names, buckets, typ) 696 | if not self.initialized then 697 | ngx.log(ngx.ERR, "Prometheus module has not been initialized") 698 | return 699 | end 700 | 701 | local err = check_metric_and_label_names(name, label_names) 702 | if err then 703 | self:log_error(err) 704 | return 705 | end 706 | 707 | local name_maybe_historgram = name:gsub("_bucket$", "") 708 | :gsub("_count$", "") 709 | :gsub("_sum$", "") 710 | if (typ ~= TYPE_HISTOGRAM and ( 711 | self.registry[name] or self.registry[name_maybe_historgram] 712 | )) or 713 | (typ == TYPE_HISTOGRAM and ( 714 | self.registry[name] or 715 | self.registry[name .. "_count"] or 716 | self.registry[name .. "_sum"] or self.registry[name .. "_bucket"] 717 | )) then 718 | 719 | self:log_error("Duplicate metric " .. name) 720 | return 721 | end 722 | 723 | local metric = { 724 | name = name, 725 | help = help, 726 | typ = typ, 727 | label_names = label_names, 728 | label_count = label_names and #label_names or 0, 729 | -- Lookup is a tree of lua tables that contain label values, with leaf 730 | -- tables containing full metric names. For example, given a metric 731 | -- `http_count` and labels `host` and `status`, it might contain the 732 | -- following values: 733 | -- ['me.com']['200'][LEAF_KEY] = 'http_count{host="me.com",status="200"}' 734 | -- ['me.com']['500'][LEAF_KEY] = 'http_count{host="me.com",status="500"}' 735 | -- ['my.net']['200'][LEAF_KEY] = 'http_count{host="my.net",status="200"}' 736 | -- ['my.net']['500'][LEAF_KEY] = 'http_count{host="my.net",status="500"}' 737 | lookup = {}, 738 | parent = self, 739 | -- Store a reference for logging functions for faster lookup. 740 | _log_error = function(...) self:log_error(...) end, 741 | _log_error_kv = function(...) self:log_error_kv(...) end, 742 | _dict = self.dict, 743 | reset = reset, 744 | } 745 | if typ < TYPE_HISTOGRAM then 746 | if typ == TYPE_GAUGE then 747 | metric.set = set 748 | metric.inc = inc_gauge 749 | else 750 | metric.inc = inc_counter 751 | end 752 | metric.del = del 753 | else 754 | metric.observe = observe 755 | metric.buckets = buckets or DEFAULT_BUCKETS 756 | metric.bucket_count = #metric.buckets 757 | metric.bucket_format = construct_bucket_format(metric.buckets) 758 | end 759 | 760 | self.registry[name] = metric 761 | return metric 762 | end 763 | 764 | -- Public function to register a counter. 765 | function Prometheus:counter(name, help, label_names) 766 | return register(self, name, help, label_names, nil, TYPE_COUNTER) 767 | end 768 | 769 | -- Public function to register a gauge. 770 | function Prometheus:gauge(name, help, label_names) 771 | return register(self, name, help, label_names, nil, TYPE_GAUGE) 772 | end 773 | 774 | -- Public function to register a histogram. 775 | function Prometheus:histogram(name, help, label_names, buckets) 776 | return register(self, name, help, label_names, buckets, TYPE_HISTOGRAM) 777 | end 778 | 779 | -- Prometheus compatible metric data as an array of strings. 780 | -- 781 | -- Returns: 782 | -- Array of strings with all metrics in a text format compatible with 783 | -- Prometheus. 784 | function Prometheus:metric_data() 785 | if not self.initialized then 786 | ngx.log(ngx.ERR, "Prometheus module has not been initialized") 787 | return 788 | end 789 | 790 | -- Force a manual sync of counter local state (mostly to make tests work). 791 | self._counter:sync() 792 | 793 | local keys = self.dict:get_keys(0) 794 | -- Prometheus server expects buckets of a histogram to appear in increasing 795 | -- numerical order of their label values. 796 | table.sort(keys) 797 | 798 | local seen_metrics = {} 799 | local output = {} 800 | for _, key in ipairs(keys) do 801 | local value, err = self.dict:get(key) 802 | if value then 803 | local short_name = short_metric_name(key) 804 | if not seen_metrics[short_name] then 805 | local m = self.registry[short_name] 806 | if m then 807 | if m.help then 808 | table.insert(output, string.format("# HELP %s%s %s\n", 809 | self.prefix, short_name, m.help)) 810 | end 811 | if m.typ then 812 | table.insert(output, string.format("# TYPE %s%s %s\n", 813 | self.prefix, short_name, TYPE_LITERAL[m.typ])) 814 | end 815 | end 816 | seen_metrics[short_name] = true 817 | end 818 | key = fix_histogram_bucket_labels(key) 819 | table.insert(output, string.format("%s%s %s\n", self.prefix, key, value)) 820 | else 821 | self:log_error("Error getting '", key, "': ", err) 822 | end 823 | end 824 | return output 825 | end 826 | 827 | -- Present all metrics in a text format compatible with Prometheus. 828 | -- 829 | -- This function should be used to expose the metrics on a separate HTTP page. 830 | -- It will get the metrics from the dictionary, sort them, and expose them 831 | -- aling with TYPE and HELP comments. 832 | function Prometheus:collect() 833 | ngx.header["Content-Type"] = "text/plain" 834 | ngx.print(self:metric_data()) 835 | end 836 | 837 | -- Log an error, incrementing the error counter. 838 | function Prometheus:log_error(...) 839 | ngx.log(ngx.ERR, ...) 840 | self.dict:incr(self.error_metric_name, 1, 0) 841 | end 842 | 843 | -- Log an error that happened while setting up a dictionary key. 844 | function Prometheus:log_error_kv(key, value, err) 845 | self:log_error( 846 | "Error while setting '", key, "' to '", value, "': '", err, "'") 847 | end 848 | 849 | return Prometheus 850 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/schema.lua: -------------------------------------------------------------------------------- 1 | local function validate_shared_dict() 2 | if not ngx.shared.prometheus_metrics then 3 | return nil, 4 | "ngx shared dict 'prometheus_metrics' not found" 5 | end 6 | return true 7 | end 8 | 9 | 10 | return { 11 | name = "prometheus", 12 | fields = { 13 | { config = { 14 | type = "record", 15 | fields = { 16 | { per_consumer = { type = "boolean", default = false }, }, 17 | }, 18 | custom_validator = validate_shared_dict, 19 | }, }, 20 | }, 21 | } 22 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/serve.lua: -------------------------------------------------------------------------------- 1 | local lapis = require "lapis" 2 | local prometheus = require "kong.plugins.prometheus.exporter" 3 | 4 | 5 | local kong = kong 6 | 7 | 8 | local app = lapis.Application() 9 | 10 | 11 | app.default_route = function(self) 12 | local path = self.req.parsed_url.path:match("^(.*)/$") 13 | 14 | if path and self.app.router:resolve(path, self) then 15 | return 16 | 17 | elseif self.app.router:resolve(self.req.parsed_url.path .. "/", self) then 18 | return 19 | end 20 | 21 | return self.app.handle_404(self) 22 | end 23 | 24 | 25 | app.handle_404 = function(self) -- luacheck: ignore 212 26 | local body = '{"message":"Not found"}' 27 | ngx.status = 404 28 | ngx.header["Content-Type"] = "application/json; charset=utf-8" 29 | ngx.header["Content-Length"] = #body + 1 30 | ngx.say(body) 31 | end 32 | 33 | 34 | app:match("/", function() 35 | kong.response.exit(200, "Kong Prometheus exporter, visit /metrics") 36 | end) 37 | 38 | 39 | app:match("/metrics", function() 40 | prometheus:collect() 41 | end) 42 | 43 | 44 | return { 45 | prometheus_server = function() 46 | return lapis.serve(app) 47 | end, 48 | } 49 | -------------------------------------------------------------------------------- /kong/plugins/prometheus/status_api.lua: -------------------------------------------------------------------------------- 1 | local prometheus = require "kong.plugins.prometheus.exporter" 2 | 3 | 4 | return { 5 | ["/metrics"] = { 6 | GET = function() 7 | prometheus.collect() 8 | end, 9 | }, 10 | } 11 | -------------------------------------------------------------------------------- /spec/01-api_spec.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson" 2 | local helpers = require "spec.helpers" 3 | 4 | describe("Plugin: prometheus (API)",function() 5 | local admin_client 6 | 7 | describe("with no 'prometheus_metrics' shm defined", function() 8 | setup(function() 9 | helpers.get_db_utils() 10 | assert(helpers.start_kong({ 11 | plugins = "bundled, prometheus", 12 | })) 13 | 14 | admin_client = helpers.admin_client() 15 | end) 16 | teardown(function() 17 | if admin_client then 18 | admin_client:close() 19 | end 20 | 21 | helpers.stop_kong() 22 | end) 23 | 24 | -- skipping since Kong always injected a `prometheus_metrics` shm when 25 | -- prometheus plugin is loaded into memory 26 | pending("prometheus plugin cannot be configured", function() 27 | local res = assert(admin_client:send { 28 | method = "POST", 29 | path = "/plugins", 30 | body = { 31 | name = "prometheus" 32 | }, 33 | headers = { 34 | ["Content-Type"] = "application/json" 35 | } 36 | }) 37 | local body = assert.res_status(400, res) 38 | local json = cjson.decode(body) 39 | assert.equal(json.config, "ngx shared dict 'prometheus_metrics' not found") 40 | end) 41 | end) 42 | 43 | describe("with 'prometheus_metrics' defined", function() 44 | setup(function() 45 | helpers.get_db_utils() 46 | assert(helpers.start_kong({ 47 | plugins = "bundled, prometheus", 48 | })) 49 | 50 | admin_client = helpers.admin_client() 51 | end) 52 | teardown(function() 53 | if admin_client then 54 | admin_client:close() 55 | end 56 | 57 | helpers.stop_kong() 58 | end) 59 | 60 | it("prometheus plugin can be configured", function() 61 | local res = assert(admin_client:send { 62 | method = "POST", 63 | path = "/plugins", 64 | body = { 65 | name = "prometheus" 66 | }, 67 | headers = { 68 | ["Content-Type"] = "application/json" 69 | } 70 | }) 71 | assert.res_status(201, res) 72 | end) 73 | end) 74 | end) 75 | -------------------------------------------------------------------------------- /spec/02-access_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | 3 | local TCP_SERVICE_PORT = 8189 4 | local TCP_PROXY_PORT = 9007 5 | 6 | -- Note: remove the below hack when https://github.com/Kong/kong/pull/6952 is merged 7 | local stream_available, _ = pcall(require, "kong.tools.stream_api") 8 | 9 | local spec_path = debug.getinfo(1).source:match("@?(.*/)") 10 | 11 | local nginx_conf 12 | if stream_available then 13 | nginx_conf = spec_path .. "/fixtures/prometheus/custom_nginx.template" 14 | else 15 | nginx_conf = "./spec/fixtures/custom_nginx.template" 16 | end 17 | -- Note ends 18 | 19 | describe("Plugin: prometheus (access)", function() 20 | local proxy_client 21 | local admin_client 22 | local proxy_client_grpc 23 | local proxy_client_grpcs 24 | 25 | setup(function() 26 | local bp = helpers.get_db_utils() 27 | 28 | local service = bp.services:insert { 29 | name = "mock-service", 30 | host = helpers.mock_upstream_host, 31 | port = helpers.mock_upstream_port, 32 | protocol = helpers.mock_upstream_protocol, 33 | } 34 | 35 | bp.routes:insert { 36 | protocols = { "http" }, 37 | name = "http-route", 38 | paths = { "/" }, 39 | methods = { "GET" }, 40 | service = service, 41 | } 42 | 43 | local grpc_service = bp.services:insert { 44 | name = "mock-grpc-service", 45 | url = "grpc://grpcbin:9000", 46 | } 47 | 48 | bp.routes:insert { 49 | protocols = { "grpc" }, 50 | name = "grpc-route", 51 | hosts = { "grpc" }, 52 | service = grpc_service, 53 | } 54 | 55 | local grpcs_service = bp.services:insert { 56 | name = "mock-grpcs-service", 57 | url = "grpcs://grpcbin:9001", 58 | } 59 | 60 | bp.routes:insert { 61 | protocols = { "grpcs" }, 62 | name = "grpcs-route", 63 | hosts = { "grpcs" }, 64 | service = grpcs_service, 65 | } 66 | 67 | local tcp_service = bp.services:insert { 68 | name = "tcp-service", 69 | url = "tcp://127.0.0.1:" .. TCP_SERVICE_PORT, 70 | } 71 | 72 | bp.routes:insert { 73 | protocols = { "tcp" }, 74 | name = "tcp-route", 75 | service = tcp_service, 76 | destinations = { { port = TCP_PROXY_PORT } }, 77 | } 78 | 79 | bp.plugins:insert { 80 | protocols = { "http", "https", "grpc", "grpcs", "tcp", "tls" }, 81 | name = "prometheus" 82 | } 83 | 84 | helpers.tcp_server(TCP_SERVICE_PORT) 85 | assert(helpers.start_kong { 86 | nginx_conf = nginx_conf, 87 | plugins = "bundled, prometheus", 88 | stream_listen = "127.0.0.1:" .. TCP_PROXY_PORT, 89 | }) 90 | proxy_client = helpers.proxy_client() 91 | admin_client = helpers.admin_client() 92 | proxy_client_grpc = helpers.proxy_client_grpc() 93 | proxy_client_grpcs = helpers.proxy_client_grpcs() 94 | end) 95 | 96 | teardown(function() 97 | helpers.kill_tcp_server(TCP_SERVICE_PORT) 98 | if proxy_client then 99 | proxy_client:close() 100 | end 101 | if admin_client then 102 | admin_client:close() 103 | end 104 | 105 | helpers.stop_kong() 106 | end) 107 | 108 | it("increments the count for proxied requests", function() 109 | local res = assert(proxy_client:send { 110 | method = "GET", 111 | path = "/status/200", 112 | headers = { 113 | host = helpers.mock_upstream_host, 114 | } 115 | }) 116 | assert.res_status(200, res) 117 | 118 | helpers.wait_until(function() 119 | local res = assert(admin_client:send { 120 | method = "GET", 121 | path = "/metrics", 122 | }) 123 | local body = assert.res_status(200, res) 124 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 125 | 126 | return body:find('kong_http_status{service="mock-service",route="http-route",code="200"} 1', nil, true) 127 | end) 128 | 129 | res = assert(proxy_client:send { 130 | method = "GET", 131 | path = "/status/400", 132 | headers = { 133 | host = helpers.mock_upstream_host, 134 | } 135 | }) 136 | assert.res_status(400, res) 137 | 138 | helpers.wait_until(function() 139 | local res = assert(admin_client:send { 140 | method = "GET", 141 | path = "/metrics", 142 | }) 143 | local body = assert.res_status(200, res) 144 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 145 | 146 | return body:find('kong_http_status{service="mock-service",route="http-route",code="400"} 1', nil, true) 147 | end) 148 | end) 149 | 150 | it("increments the count for proxied grpc requests", function() 151 | local ok, resp = proxy_client_grpc({ 152 | service = "hello.HelloService.SayHello", 153 | body = { 154 | greeting = "world!" 155 | }, 156 | opts = { 157 | ["-authority"] = "grpc", 158 | } 159 | }) 160 | assert(ok, resp) 161 | assert.truthy(resp) 162 | 163 | helpers.wait_until(function() 164 | local res = assert(admin_client:send { 165 | method = "GET", 166 | path = "/metrics", 167 | }) 168 | local body = assert.res_status(200, res) 169 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 170 | 171 | return body:find('kong_http_status{service="mock-grpc-service",route="grpc-route",code="200"} 1', nil, true) 172 | end) 173 | 174 | ok, resp = proxy_client_grpcs({ 175 | service = "hello.HelloService.SayHello", 176 | body = { 177 | greeting = "world!" 178 | }, 179 | opts = { 180 | ["-authority"] = "grpcs", 181 | } 182 | }) 183 | assert(ok, resp) 184 | assert.truthy(resp) 185 | 186 | helpers.wait_until(function() 187 | local res = assert(admin_client:send { 188 | method = "GET", 189 | path = "/metrics", 190 | }) 191 | local body = assert.res_status(200, res) 192 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 193 | 194 | return body:find('kong_http_status{service="mock-grpcs-service",route="grpcs-route",code="200"} 1', nil, true) 195 | end) 196 | end) 197 | 198 | pending("increments the count for proxied TCP streams", function() 199 | local conn = assert(ngx.socket.connect("127.0.0.1", TCP_PROXY_PORT)) 200 | 201 | assert(conn:send("hi there!\n")) 202 | local gotback = assert(conn:receive("*a")) 203 | assert.equal("hi there!\n", gotback) 204 | 205 | conn:close() 206 | 207 | helpers.wait_until(function() 208 | local res = assert(admin_client:send { 209 | method = "GET", 210 | path = "/metrics", 211 | }) 212 | local body = assert.res_status(200, res) 213 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 214 | 215 | return body:find('kong_stream_status{service="tcp-service",route="tcp-route",code="200"} 1', nil, true) 216 | end) 217 | end) 218 | 219 | it("does not log error if no service was matched", function() 220 | -- cleanup logs 221 | os.execute(":> " .. helpers.test_conf.nginx_err_logs) 222 | 223 | local res = assert(proxy_client:send { 224 | method = "POST", 225 | path = "/no-route-match-in-kong", 226 | }) 227 | assert.res_status(404, res) 228 | 229 | -- make sure no errors 230 | assert.logfile().has.no.line("[error]", true, 10) 231 | end) 232 | 233 | it("does not log error during a scrape", function() 234 | -- cleanup logs 235 | os.execute(":> " .. helpers.test_conf.nginx_err_logs) 236 | 237 | local res = assert(admin_client:send { 238 | method = "GET", 239 | path = "/metrics", 240 | }) 241 | local body = assert.res_status(200, res) 242 | 243 | -- make sure no errors 244 | assert.logfile().has.no.line("[error]", true, 10) 245 | 246 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 247 | end) 248 | 249 | it("scrape response has metrics and comments only", function() 250 | local res = assert(admin_client:send { 251 | method = "GET", 252 | path = "/metrics", 253 | }) 254 | local body = assert.res_status(200, res) 255 | 256 | for line in body:gmatch("[^\r\n]+") do 257 | assert.matches("^[#|kong]", line) 258 | end 259 | 260 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 261 | end) 262 | 263 | it("exposes db reachability metrics", function() 264 | local res = assert(admin_client:send { 265 | method = "GET", 266 | path = "/metrics", 267 | }) 268 | local body = assert.res_status(200, res) 269 | assert.matches('kong_datastore_reachable 1', body, nil, true) 270 | 271 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 272 | end) 273 | 274 | it("exposes Lua worker VM stats", function() 275 | local res = assert(admin_client:send { 276 | method = "GET", 277 | path = "/metrics", 278 | }) 279 | local body = assert.res_status(200, res) 280 | assert.matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="http"} %d+', body) 281 | if stream_available then 282 | assert.matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="stream"} %d+', body) 283 | end 284 | 285 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 286 | end) 287 | 288 | it("exposes lua_shared_dict metrics", function() 289 | local res = assert(admin_client:send { 290 | method = "GET", 291 | path = "/metrics", 292 | }) 293 | local body = assert.res_status(200, res) 294 | assert.matches('kong_memory_lua_shared_dict_total_bytes' .. 295 | '{shared_dict="prometheus_metrics",kong_subsystem="http"} %d+', body) 296 | -- TODO: uncomment below once the ngx.shared iterrator in stream is fixed 297 | -- if stream_available then 298 | -- assert.matches('kong_memory_lua_shared_dict_total_bytes' .. 299 | -- '{shared_dict="stream_prometheus_metrics",kong_subsystem="stream"} %d+', body) 300 | -- end 301 | 302 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 303 | end) 304 | 305 | it("does not expose per consumer metrics by default", function() 306 | local res = assert(admin_client:send { 307 | method = "GET", 308 | path = "/metrics", 309 | }) 310 | local body = assert.res_status(200, res) 311 | assert.not_match('http_consumer_status', body, nil, true) 312 | 313 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 314 | end) 315 | end) 316 | 317 | local test_f 318 | if stream_available then 319 | test_f = describe 320 | else 321 | test_f = pending 322 | end 323 | test_f("Plugin: prometheus (access) no stream listeners", function() 324 | local admin_client 325 | 326 | setup(function() 327 | local bp = helpers.get_db_utils() 328 | 329 | bp.plugins:insert { 330 | protocols = { "http", "https", "grpc", "grpcs", "tcp", "tls" }, 331 | name = "prometheus" 332 | } 333 | 334 | assert(helpers.start_kong { 335 | plugins = "bundled, prometheus", 336 | stream_listen = "off", 337 | }) 338 | admin_client = helpers.admin_client() 339 | end) 340 | 341 | teardown(function() 342 | if admin_client then 343 | admin_client:close() 344 | end 345 | 346 | helpers.stop_kong() 347 | end) 348 | 349 | it("exposes Lua worker VM stats only for http subsystem", function() 350 | local res = assert(admin_client:send { 351 | method = "GET", 352 | path = "/metrics", 353 | }) 354 | local body = assert.res_status(200, res) 355 | assert.matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="http"}', body) 356 | assert.not_matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="stream"}', body) 357 | 358 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 359 | end) 360 | 361 | it("exposes lua_shared_dict metrics only for http subsystem", function() 362 | local res = assert(admin_client:send { 363 | method = "GET", 364 | path = "/metrics", 365 | }) 366 | local body = assert.res_status(200, res) 367 | assert.matches('kong_memory_lua_shared_dict_total_bytes' .. 368 | '{shared_dict="prometheus_metrics",kong_subsystem="http"} %d+', body) 369 | 370 | assert.not_matches('kong_memory_lua_shared_dict_bytes' .. 371 | '{shared_dict="stream_prometheus_metric",kong_subsystem="stream"} %d+', body) 372 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 373 | end) 374 | end) 375 | 376 | describe("Plugin: prometheus (access) per-consumer metrics", function() 377 | local proxy_client 378 | local admin_client 379 | 380 | setup(function() 381 | local bp = helpers.get_db_utils() 382 | 383 | local service = bp.services:insert { 384 | name = "mock-service", 385 | host = helpers.mock_upstream_host, 386 | port = helpers.mock_upstream_port, 387 | protocol = helpers.mock_upstream_protocol, 388 | } 389 | 390 | local route = bp.routes:insert { 391 | protocols = { "http" }, 392 | name = "http-route", 393 | paths = { "/" }, 394 | methods = { "GET" }, 395 | service = service, 396 | } 397 | 398 | bp.plugins:insert { 399 | protocols = { "http", "https", "grpc", "grpcs", "tcp", "tls" }, 400 | name = "prometheus", 401 | config = { 402 | per_consumer = true, 403 | } 404 | } 405 | 406 | bp.plugins:insert { 407 | name = "key-auth", 408 | route = route, 409 | } 410 | 411 | local consumer = bp.consumers:insert { 412 | username = "alice", 413 | } 414 | 415 | bp.keyauth_credentials:insert { 416 | key = "alice-key", 417 | consumer = consumer, 418 | } 419 | 420 | assert(helpers.start_kong { 421 | nginx_conf = nginx_conf, 422 | plugins = "bundled, prometheus", 423 | }) 424 | proxy_client = helpers.proxy_client() 425 | admin_client = helpers.admin_client() 426 | end) 427 | 428 | teardown(function() 429 | if proxy_client then 430 | proxy_client:close() 431 | end 432 | if admin_client then 433 | admin_client:close() 434 | end 435 | 436 | helpers.stop_kong() 437 | end) 438 | 439 | it("increments the count for proxied requests", function() 440 | local res = assert(proxy_client:send { 441 | method = "GET", 442 | path = "/status/200", 443 | headers = { 444 | host = helpers.mock_upstream_host, 445 | apikey = 'alice-key', 446 | } 447 | }) 448 | assert.res_status(200, res) 449 | 450 | helpers.wait_until(function() 451 | local res = assert(admin_client:send { 452 | method = "GET", 453 | path = "/metrics", 454 | }) 455 | local body = assert.res_status(200, res) 456 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 457 | 458 | return body:find('kong_http_consumer_status{service="mock-service",route="http-route",code="200",consumer="alice"} 1', nil, true) 459 | end) 460 | 461 | res = assert(proxy_client:send { 462 | method = "GET", 463 | path = "/status/400", 464 | headers = { 465 | host = helpers.mock_upstream_host, 466 | apikey = 'alice-key', 467 | } 468 | }) 469 | assert.res_status(400, res) 470 | 471 | helpers.wait_until(function() 472 | local res = assert(admin_client:send { 473 | method = "GET", 474 | path = "/metrics", 475 | }) 476 | local body = assert.res_status(200, res) 477 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 478 | 479 | return body:find('kong_http_consumer_status{service="mock-service",route="http-route",code="400",consumer="alice"} 1', nil, true) 480 | end) 481 | end) 482 | 483 | it("behave correctly if consumer is not found", function() 484 | local res = assert(proxy_client:send { 485 | method = "GET", 486 | path = "/status/200", 487 | headers = { 488 | host = helpers.mock_upstream_host, 489 | } 490 | }) 491 | assert.res_status(401, res) 492 | 493 | local body 494 | helpers.wait_until(function() 495 | local res = assert(admin_client:send { 496 | method = "GET", 497 | path = "/metrics", 498 | }) 499 | body = assert.res_status(200, res) 500 | return body:find('kong_http_status{service="mock-service",route="http-route",code="200"} 1', nil, true) 501 | end) 502 | 503 | assert.not_match('kong_http_consumer_status{service="mock-service",route="http-route",code="401",consumer="alice"} 1', body, nil, true) 504 | assert.matches('kong_http_status{service="mock-service",route="http-route",code="401"} 1', body, nil, true) 505 | 506 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 507 | end) 508 | end) 509 | -------------------------------------------------------------------------------- /spec/03-custom-serve_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | 3 | describe("Plugin: prometheus (custom server)",function() 4 | local proxy_client 5 | 6 | describe("with custom nginx server block", function() 7 | setup(function() 8 | local bp = helpers.get_db_utils() 9 | 10 | local service = bp.services:insert { 11 | name = "mock-service", 12 | host = helpers.mock_upstream_host, 13 | port = helpers.mock_upstream_port, 14 | protocol = helpers.mock_upstream_protocol, 15 | } 16 | 17 | bp.routes:insert { 18 | protocols = { "http" }, 19 | name = "http-route", 20 | paths = { "/" }, 21 | service = service, 22 | } 23 | 24 | bp.plugins:insert { 25 | name = "prometheus" 26 | } 27 | 28 | assert(helpers.start_kong({ 29 | nginx_http_include = "../spec/fixtures/prometheus/metrics.conf", 30 | nginx_conf = "spec/fixtures/custom_nginx.template", 31 | plugins = "bundled, prometheus", 32 | })) 33 | 34 | proxy_client = helpers.proxy_client() 35 | end) 36 | teardown(function() 37 | if proxy_client then 38 | proxy_client:close() 39 | end 40 | 41 | helpers.stop_kong() 42 | end) 43 | 44 | it("metrics can be read from a different port", function() 45 | local res = assert(proxy_client:send { 46 | method = "GET", 47 | path = "/status/200", 48 | headers = { 49 | host = helpers.mock_upstream_host, 50 | } 51 | }) 52 | assert.res_status(200, res) 53 | 54 | local client = helpers.http_client("127.0.0.1", 9542) 55 | res = assert(client:send { 56 | method = "GET", 57 | path = "/metrics", 58 | }) 59 | local body = assert.res_status(200, res) 60 | assert.matches('kong_http_status{service="mock-service",route="http-route",code="200"} 1', body, nil, true) 61 | end) 62 | it("custom port returns 404 for anything other than /metrics", function() 63 | local client = helpers.http_client("127.0.0.1", 9542) 64 | local res = assert(client:send { 65 | method = "GET", 66 | path = "/does-not-exists", 67 | }) 68 | local body = assert.res_status(404, res) 69 | assert.matches('{"message":"Not found"}', body, nil, true) 70 | end) 71 | end) 72 | end) 73 | -------------------------------------------------------------------------------- /spec/04-status_api_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | 3 | local TCP_PROXY_PORT = 9007 4 | 5 | -- Note: remove the below hack when https://github.com/Kong/kong/pull/6952 is merged 6 | local stream_available, _ = pcall(require, "kong.tools.stream_api") 7 | 8 | local spec_path = debug.getinfo(1).source:match("@?(.*/)") 9 | 10 | local nginx_conf 11 | if stream_available then 12 | nginx_conf = spec_path .. "/fixtures/prometheus/custom_nginx.template" 13 | else 14 | nginx_conf = "./spec/fixtures/custom_nginx.template" 15 | end 16 | -- Note ends 17 | 18 | describe("Plugin: prometheus (access via status API)", function() 19 | local proxy_client 20 | local status_client 21 | local proxy_client_grpc 22 | local proxy_client_grpcs 23 | 24 | setup(function() 25 | local bp = helpers.get_db_utils() 26 | 27 | local upstream_hc_off = bp.upstreams:insert({ 28 | name = "mock-upstream-healthchecksoff", 29 | }) 30 | bp.targets:insert { 31 | target = helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port, 32 | weight = 1000, 33 | upstream = { id = upstream_hc_off.id }, 34 | } 35 | 36 | local upstream = bp.upstreams:insert({ 37 | name = "mock-upstream", 38 | }) 39 | 40 | upstream.healthchecks = { 41 | active = { 42 | concurrency = 10, 43 | healthy = { 44 | http_statuses = { 200, 302 }, 45 | interval = 0.1, 46 | successes = 2 47 | }, 48 | http_path = "/status/200", 49 | https_verify_certificate = true, 50 | timeout = 1, 51 | type = "http", 52 | unhealthy = { 53 | http_failures = 1, 54 | http_statuses = { 429, 404, 500, 501, 502, 503, 504, 505 }, 55 | interval = 0.1, 56 | tcp_failures = 1, 57 | timeouts = 1 58 | } 59 | }, 60 | passive = { 61 | healthy = { 62 | http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 300, 301, 302, 303, 304, 305, 306, 307, 308 }, 63 | successes = 1 64 | }, 65 | type = "http", 66 | unhealthy = { 67 | http_failures = 1, 68 | http_statuses = { 429, 500, 503 }, 69 | tcp_failures = 1, 70 | timeouts = 1 71 | } 72 | } 73 | } 74 | 75 | upstream = bp.upstreams:update({ id = upstream.id }, { healthchecks = upstream.healthchecks }) 76 | 77 | bp.targets:insert { 78 | target = helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port, 79 | weight = 1000, 80 | upstream = { id = upstream.id }, 81 | } 82 | 83 | bp.targets:insert { 84 | target = helpers.mock_upstream_host .. ':8001', 85 | weight = 1, 86 | upstream = { id = upstream.id }, 87 | } 88 | 89 | bp.targets:insert { 90 | target = 'some-random-dns:80', 91 | weight = 1, 92 | upstream = { id = upstream.id }, 93 | } 94 | 95 | local service = bp.services:insert { 96 | name = "mock-service", 97 | host = upstream.name, 98 | port = helpers.mock_upstream_port, 99 | protocol = helpers.mock_upstream_protocol, 100 | } 101 | 102 | bp.routes:insert { 103 | protocols = { "http" }, 104 | name = "http-route", 105 | paths = { "/" }, 106 | methods = { "GET" }, 107 | service = service, 108 | } 109 | 110 | local grpc_service = bp.services:insert { 111 | name = "mock-grpc-service", 112 | url = "grpc://grpcbin:9000", 113 | } 114 | 115 | bp.routes:insert { 116 | protocols = { "grpc" }, 117 | name = "grpc-route", 118 | hosts = { "grpc" }, 119 | service = grpc_service, 120 | } 121 | 122 | local grpcs_service = bp.services:insert { 123 | name = "mock-grpcs-service", 124 | url = "grpcs://grpcbin:9001", 125 | } 126 | 127 | bp.routes:insert { 128 | protocols = { "grpcs" }, 129 | name = "grpcs-route", 130 | hosts = { "grpcs" }, 131 | service = grpcs_service, 132 | } 133 | 134 | bp.plugins:insert { 135 | name = "prometheus" 136 | } 137 | 138 | assert(helpers.start_kong { 139 | nginx_conf = nginx_conf, 140 | plugins = "bundled, prometheus", 141 | status_listen = "0.0.0.0:9500", 142 | stream_listen = "127.0.0.1:" .. TCP_PROXY_PORT, 143 | }) 144 | proxy_client = helpers.proxy_client() 145 | status_client = helpers.http_client("127.0.0.1", 9500, 20000) 146 | proxy_client_grpc = helpers.proxy_client_grpc() 147 | proxy_client_grpcs = helpers.proxy_client_grpcs() 148 | 149 | require("socket").sleep(1) -- wait 1 second until healthchecks run 150 | end) 151 | 152 | teardown(function() 153 | if proxy_client then 154 | proxy_client:close() 155 | end 156 | if status_client then 157 | status_client:close() 158 | end 159 | 160 | helpers.stop_kong() 161 | end) 162 | 163 | it("increments the count for proxied requests", function() 164 | local res = assert(proxy_client:send { 165 | method = "GET", 166 | path = "/status/200", 167 | headers = { 168 | host = helpers.mock_upstream_host, 169 | } 170 | }) 171 | assert.res_status(200, res) 172 | 173 | helpers.wait_until(function() 174 | local res = assert(status_client:send { 175 | method = "GET", 176 | path = "/metrics", 177 | }) 178 | local body = assert.res_status(200, res) 179 | return body:find('kong_http_status{service="mock-service",route="http-route",code="200"} 1', nil, true) 180 | end) 181 | 182 | res = assert(proxy_client:send { 183 | method = "GET", 184 | path = "/status/400", 185 | headers = { 186 | host = helpers.mock_upstream_host, 187 | } 188 | }) 189 | assert.res_status(400, res) 190 | 191 | helpers.wait_until(function() 192 | local res = assert(status_client:send { 193 | method = "GET", 194 | path = "/metrics", 195 | }) 196 | local body = assert.res_status(200, res) 197 | return body:find('kong_http_status{service="mock-service",route="http-route",code="400"} 1', nil, true) 198 | end) 199 | end) 200 | 201 | it("increments the count for proxied grpc requests", function() 202 | local ok, resp = proxy_client_grpc({ 203 | service = "hello.HelloService.SayHello", 204 | body = { 205 | greeting = "world!" 206 | }, 207 | opts = { 208 | ["-authority"] = "grpc", 209 | } 210 | }) 211 | assert(ok, resp) 212 | assert.truthy(resp) 213 | 214 | helpers.wait_until(function() 215 | local res = assert(status_client:send { 216 | method = "GET", 217 | path = "/metrics", 218 | }) 219 | local body = assert.res_status(200, res) 220 | return body:find('kong_http_status{service="mock-grpc-service",route="grpc-route",code="200"} 1', nil, true) 221 | end) 222 | 223 | ok, resp = proxy_client_grpcs({ 224 | service = "hello.HelloService.SayHello", 225 | body = { 226 | greeting = "world!" 227 | }, 228 | opts = { 229 | ["-authority"] = "grpcs", 230 | } 231 | }) 232 | assert(ok, resp) 233 | assert.truthy(resp) 234 | 235 | helpers.wait_until(function() 236 | local res = assert(status_client:send { 237 | method = "GET", 238 | path = "/metrics", 239 | }) 240 | local body = assert.res_status(200, res) 241 | return body:find('kong_http_status{service="mock-grpcs-service",route="grpcs-route",code="200"} 1', nil, true) 242 | end) 243 | end) 244 | 245 | it("does not log error if no service was matched", function() 246 | -- cleanup logs 247 | os.execute(":> " .. helpers.test_conf.nginx_err_logs) 248 | 249 | local res = assert(proxy_client:send { 250 | method = "POST", 251 | path = "/no-route-match-in-kong", 252 | }) 253 | assert.res_status(404, res) 254 | 255 | -- make sure no errors 256 | assert.logfile().has.no.line("[error]", true, 10) 257 | end) 258 | 259 | it("does not log error during a scrape", function() 260 | -- cleanup logs 261 | os.execute(":> " .. helpers.test_conf.nginx_err_logs) 262 | 263 | local res = assert(status_client:send { 264 | method = "GET", 265 | path = "/metrics", 266 | }) 267 | assert.res_status(200, res) 268 | 269 | -- make sure no errors 270 | assert.logfile().has.no.line("[error]", true, 10) 271 | end) 272 | 273 | it("scrape response has metrics and comments only", function() 274 | local res = assert(status_client:send { 275 | method = "GET", 276 | path = "/metrics", 277 | }) 278 | local body = assert.res_status(200, res) 279 | 280 | for line in body:gmatch("[^\r\n]+") do 281 | assert.matches("^[#|kong]", line) 282 | end 283 | 284 | end) 285 | 286 | it("exposes db reachability metrics", function() 287 | local res = assert(status_client:send { 288 | method = "GET", 289 | path = "/metrics", 290 | }) 291 | local body = assert.res_status(200, res) 292 | assert.matches('kong_datastore_reachable 1', body, nil, true) 293 | end) 294 | 295 | it("exposes upstream's target health metrics - healthchecks-off", function() 296 | local body 297 | helpers.wait_until(function() 298 | local res = assert(status_client:send { 299 | method = "GET", 300 | path = "/metrics", 301 | }) 302 | 303 | body = assert.res_status(200, res) 304 | return body:find('kong_upstream_target_health{upstream="mock-upstream-healthchecksoff",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="healthchecks_off"} 1', nil, true) 305 | end) 306 | assert.matches('kong_upstream_target_health{upstream="mock-upstream-healthchecksoff",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="healthy"} 0', body, nil, true) 307 | assert.matches('kong_upstream_target_health{upstream="mock-upstream-healthchecksoff",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="unhealthy"} 0', body, nil, true) 308 | assert.matches('kong_upstream_target_health{upstream="mock-upstream-healthchecksoff",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="dns_error"} 0', body, nil, true) 309 | end) 310 | 311 | it("exposes upstream's target health metrics - healthy", function() 312 | local body 313 | helpers.wait_until(function() 314 | local res = assert(status_client:send { 315 | method = "GET", 316 | path = "/metrics", 317 | }) 318 | 319 | body = assert.res_status(200, res) 320 | return body:find('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="healthy"} 1', nil, true) 321 | end) 322 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="healthchecks_off"} 0', body, nil, true) 323 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="unhealthy"} 0', body, nil, true) 324 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",address="' .. helpers.mock_upstream_host .. ':' .. helpers.mock_upstream_port .. '",state="dns_error"} 0', body, nil, true) 325 | end) 326 | 327 | it("exposes upstream's target health metrics - unhealthy", function() 328 | local body 329 | helpers.wait_until(function() 330 | local res = assert(status_client:send { 331 | method = "GET", 332 | path = "/metrics", 333 | }) 334 | 335 | body = assert.res_status(200, res) 336 | return body:find('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':8001",address="' .. helpers.mock_upstream_host .. ':8001",state="unhealthy"} 1', nil, true) 337 | end) 338 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':8001",address="' .. helpers.mock_upstream_host .. ':8001",state="healthy"} 0', body, nil, true) 339 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':8001",address="' .. helpers.mock_upstream_host .. ':8001",state="healthchecks_off"} 0', body, nil, true) 340 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="' .. helpers.mock_upstream_host .. ':8001",address="' .. helpers.mock_upstream_host .. ':8001",state="dns_error"} 0', body, nil, true) 341 | end) 342 | 343 | it("exposes upstream's target health metrics - dns_error", function() 344 | local body 345 | helpers.wait_until(function() 346 | local res = assert(status_client:send { 347 | method = "GET", 348 | path = "/metrics", 349 | }) 350 | 351 | body = assert.res_status(200, res) 352 | return body:find('kong_upstream_target_health{upstream="mock-upstream",target="some-random-dns:80",address="",state="dns_error"} 1', nil, true) 353 | end) 354 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="some-random-dns:80",address="",state="healthy"} 0', body, nil, true) 355 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="some-random-dns:80",address="",state="unhealthy"} 0', body, nil, true) 356 | assert.matches('kong_upstream_target_health{upstream="mock-upstream",target="some-random-dns:80",address="",state="healthchecks_off"} 0', body, nil, true) 357 | end) 358 | 359 | it("remove metrics from deleted upstreams and targets", function() 360 | local admin_client = helpers.admin_client() 361 | admin_client:send { 362 | method = "DELETE", 363 | path = "/upstreams/mock-upstream-healthchecksoff", 364 | } 365 | admin_client:send { 366 | method = "DELETE", 367 | path = "/upstreams/mock-upstream/targets/some-random-dns:80", 368 | } 369 | admin_client:close() 370 | 371 | local body 372 | helpers.wait_until(function() 373 | local res = assert(status_client:send { 374 | method = "GET", 375 | path = "/metrics", 376 | }) 377 | body = assert.res_status(200, res) 378 | return not body:find('kong_upstream_target_health{upstream="mock-upstream-healthchecksoff"', nil, true) 379 | end) 380 | assert.not_match('kong_upstream_target_health{upstream="mock-upstream",target="some-random-dns:80"', body, nil, true) 381 | end) 382 | 383 | it("exposes Lua worker VM stats", function() 384 | local res = assert(status_client:send { 385 | method = "GET", 386 | path = "/metrics", 387 | }) 388 | local body = assert.res_status(200, res) 389 | assert.matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="http"}', body) 390 | if stream_available then 391 | assert.matches('kong_memory_workers_lua_vms_bytes{pid="%d+",kong_subsystem="stream"}', body) 392 | end 393 | 394 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 395 | end) 396 | 397 | it("exposes lua_shared_dict metrics", function() 398 | local res = assert(status_client:send { 399 | method = "GET", 400 | path = "/metrics", 401 | }) 402 | local body = assert.res_status(200, res) 403 | assert.matches('kong_memory_lua_shared_dict_total_bytes' .. 404 | '{shared_dict="prometheus_metrics",kong_subsystem="http"} %d+', body) 405 | -- TODO: uncomment below once the ngx.shared iterrator in stream is fixed 406 | -- if stream_available then 407 | -- assert.matches('kong_memory_lua_shared_dict_total_bytes' .. 408 | -- '{shared_dict="prometheus_metrics",kong_subsystem="stream"} %d+', body) 409 | -- end 410 | 411 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 412 | end) 413 | end) 414 | -------------------------------------------------------------------------------- /spec/05-enterprise-exporter_spec.lua: -------------------------------------------------------------------------------- 1 | local helpers = require "spec.helpers" 2 | 3 | -- Note: remove the below hack when https://github.com/Kong/kong/pull/6952 is merged 4 | local stream_available, _ = pcall(require, "kong.tools.stream_api") 5 | 6 | local spec_path = debug.getinfo(1).source:match("@?(.*/)") 7 | 8 | local nginx_conf 9 | if stream_available then 10 | nginx_conf = spec_path .. "/fixtures/prometheus/custom_nginx.template" 11 | else 12 | nginx_conf = "./spec/fixtures/custom_nginx.template" 13 | end 14 | -- Note ends 15 | 16 | local t = pending 17 | local pok = pcall(require, "kong.enterprise_edition.licensing") 18 | if pok then 19 | t = describe 20 | end 21 | 22 | t("Plugin: prometheus (exporter) enterprise licenses", function() 23 | local admin_client 24 | 25 | setup(function() 26 | local bp = helpers.get_db_utils() 27 | 28 | bp.plugins:insert { 29 | protocols = { "http", "https", "grpc", "grpcs", "tcp", "tls" }, 30 | name = "prometheus", 31 | } 32 | 33 | assert(helpers.start_kong { 34 | nginx_conf = nginx_conf, 35 | plugins = "bundled, prometheus", 36 | }) 37 | admin_client = helpers.admin_client() 38 | end) 39 | 40 | teardown(function() 41 | if admin_client then 42 | admin_client:close() 43 | end 44 | 45 | helpers.stop_kong() 46 | end) 47 | 48 | it("exports enterprise licenses", function() 49 | 50 | local res = assert(admin_client:send { 51 | method = "GET", 52 | path = "/metrics", 53 | }) 54 | local body = assert.res_status(200, res) 55 | 56 | assert.matches('kong_enterprise_license_signature %d+', body) 57 | assert.matches('kong_enterprise_license_expiration %d+', body) 58 | assert.matches('kong_enterprise_license_features{feature="ee_plugins"}', body, nil, true) 59 | assert.matches('kong_enterprise_license_features{feature="write_admin_api"}', body, nil, true) 60 | 61 | assert.matches('kong_enterprise_license_errors 0', body, nil, true) 62 | assert.matches('kong_nginx_metric_errors_total 0', body, nil, true) 63 | end) 64 | end) 65 | -------------------------------------------------------------------------------- /spec/fixtures/prometheus/custom_nginx.template: -------------------------------------------------------------------------------- 1 | # This is a custom nginx configuration template for Kong specs 2 | 3 | pid pids/nginx.pid; # mandatory even for custom config templates 4 | error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; 5 | 6 | # injected nginx_main_* directives 7 | > for _, el in ipairs(nginx_main_directives) do 8 | $(el.name) $(el.value); 9 | > end 10 | 11 | events { 12 | # injected nginx_events_* directives 13 | > for _, el in ipairs(nginx_events_directives) do 14 | $(el.name) $(el.value); 15 | > end 16 | } 17 | 18 | > if role == "control_plane" or #proxy_listeners > 0 or #admin_listeners > 0 or #status_listeners > 0 then 19 | http { 20 | charset UTF-8; 21 | server_tokens off; 22 | 23 | error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; 24 | 25 | lua_package_path '${{LUA_PACKAGE_PATH}};;'; 26 | lua_package_cpath '${{LUA_PACKAGE_CPATH}};;'; 27 | lua_socket_pool_size ${{LUA_SOCKET_POOL_SIZE}}; 28 | lua_socket_log_errors off; 29 | lua_max_running_timers 4096; 30 | lua_max_pending_timers 16384; 31 | lua_ssl_verify_depth ${{LUA_SSL_VERIFY_DEPTH}}; 32 | > if lua_ssl_trusted_certificate_combined then 33 | lua_ssl_trusted_certificate '${{LUA_SSL_TRUSTED_CERTIFICATE_COMBINED}}'; 34 | > end 35 | 36 | lua_shared_dict kong 5m; 37 | lua_shared_dict kong_locks 8m; 38 | lua_shared_dict kong_healthchecks 5m; 39 | lua_shared_dict kong_process_events 5m; 40 | lua_shared_dict kong_cluster_events 5m; 41 | lua_shared_dict kong_rate_limiting_counters 12m; 42 | lua_shared_dict kong_core_db_cache ${{MEM_CACHE_SIZE}}; 43 | lua_shared_dict kong_core_db_cache_miss 12m; 44 | lua_shared_dict kong_db_cache ${{MEM_CACHE_SIZE}}; 45 | lua_shared_dict kong_db_cache_miss 12m; 46 | > if database == "off" then 47 | lua_shared_dict kong_core_db_cache_2 ${{MEM_CACHE_SIZE}}; 48 | lua_shared_dict kong_core_db_cache_miss_2 12m; 49 | lua_shared_dict kong_db_cache_2 ${{MEM_CACHE_SIZE}}; 50 | lua_shared_dict kong_db_cache_miss_2 12m; 51 | > end 52 | > if database == "cassandra" then 53 | lua_shared_dict kong_cassandra 5m; 54 | > end 55 | > if role == "control_plane" then 56 | lua_shared_dict kong_clustering 5m; 57 | > end 58 | lua_shared_dict kong_mock_upstream_loggers 10m; 59 | 60 | lua_shared_dict kong_vitals_counters 50m; 61 | lua_shared_dict kong_counters 50m; 62 | lua_shared_dict kong_vitals_lists 1m; 63 | lua_shared_dict kong_vitals 1m; 64 | 65 | underscores_in_headers on; 66 | > if ssl_ciphers then 67 | ssl_ciphers ${{SSL_CIPHERS}}; 68 | > end 69 | 70 | # injected nginx_http_* directives 71 | > for _, el in ipairs(nginx_http_directives) do 72 | $(el.name) $(el.value); 73 | > end 74 | 75 | init_by_lua_block { 76 | Kong = require 'kong' 77 | Kong.init() 78 | } 79 | 80 | init_worker_by_lua_block { 81 | Kong.init_worker() 82 | } 83 | 84 | > if (role == "traditional" or role == "data_plane") and #proxy_listeners > 0 then 85 | upstream kong_upstream { 86 | server 0.0.0.1; 87 | 88 | # injected nginx_upstream_* directives 89 | > for _, el in ipairs(nginx_upstream_directives) do 90 | $(el.name) $(el.value); 91 | > end 92 | 93 | balancer_by_lua_block { 94 | Kong.balancer() 95 | } 96 | } 97 | 98 | server { 99 | server_name kong; 100 | > for _, entry in ipairs(proxy_listeners) do 101 | listen $(entry.listener); 102 | > end 103 | 104 | error_page 400 404 408 411 412 413 414 417 494 /kong_error_handler; 105 | error_page 500 502 503 504 /kong_error_handler; 106 | 107 | access_log ${{PROXY_ACCESS_LOG}}; 108 | error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; 109 | 110 | > if proxy_ssl_enabled then 111 | > for i = 1, #ssl_cert do 112 | ssl_certificate $(ssl_cert[i]); 113 | ssl_certificate_key $(ssl_cert_key[i]); 114 | > end 115 | ssl_session_cache shared:SSL:10m; 116 | ssl_certificate_by_lua_block { 117 | Kong.ssl_certificate() 118 | } 119 | > end 120 | 121 | # injected nginx_proxy_* directives 122 | > for _, el in ipairs(nginx_proxy_directives) do 123 | $(el.name) $(el.value); 124 | > end 125 | > for _, ip in ipairs(trusted_ips) do 126 | set_real_ip_from $(ip); 127 | > end 128 | 129 | rewrite_by_lua_block { 130 | Kong.rewrite() 131 | } 132 | 133 | access_by_lua_block { 134 | Kong.access() 135 | } 136 | 137 | header_filter_by_lua_block { 138 | Kong.header_filter() 139 | } 140 | 141 | body_filter_by_lua_block { 142 | Kong.body_filter() 143 | } 144 | 145 | log_by_lua_block { 146 | Kong.log() 147 | } 148 | 149 | location / { 150 | default_type ''; 151 | 152 | set $ctx_ref ''; 153 | set $upstream_te ''; 154 | set $upstream_host ''; 155 | set $upstream_upgrade ''; 156 | set $upstream_connection ''; 157 | set $upstream_scheme ''; 158 | set $upstream_uri ''; 159 | set $upstream_x_forwarded_for ''; 160 | set $upstream_x_forwarded_proto ''; 161 | set $upstream_x_forwarded_host ''; 162 | set $upstream_x_forwarded_port ''; 163 | set $upstream_x_forwarded_path ''; 164 | set $upstream_x_forwarded_prefix ''; 165 | set $kong_proxy_mode 'http'; 166 | 167 | proxy_http_version 1.1; 168 | proxy_buffering on; 169 | proxy_request_buffering on; 170 | 171 | proxy_set_header TE $upstream_te; 172 | proxy_set_header Host $upstream_host; 173 | proxy_set_header Upgrade $upstream_upgrade; 174 | proxy_set_header Connection $upstream_connection; 175 | proxy_set_header X-Forwarded-For $upstream_x_forwarded_for; 176 | proxy_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 177 | proxy_set_header X-Forwarded-Host $upstream_x_forwarded_host; 178 | proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; 179 | proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; 180 | proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 181 | proxy_set_header X-Real-IP $remote_addr; 182 | proxy_pass_header Server; 183 | proxy_pass_header Date; 184 | proxy_ssl_name $upstream_host; 185 | proxy_ssl_server_name on; 186 | > if client_ssl then 187 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 188 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 189 | > end 190 | proxy_pass $upstream_scheme://kong_upstream$upstream_uri; 191 | } 192 | 193 | location @unbuffered { 194 | internal; 195 | default_type ''; 196 | set $kong_proxy_mode 'unbuffered'; 197 | 198 | proxy_http_version 1.1; 199 | proxy_buffering off; 200 | proxy_request_buffering off; 201 | 202 | proxy_set_header TE $upstream_te; 203 | proxy_set_header Host $upstream_host; 204 | proxy_set_header Upgrade $upstream_upgrade; 205 | proxy_set_header Connection $upstream_connection; 206 | proxy_set_header X-Forwarded-For $upstream_x_forwarded_for; 207 | proxy_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 208 | proxy_set_header X-Forwarded-Host $upstream_x_forwarded_host; 209 | proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; 210 | proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; 211 | proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 212 | proxy_set_header X-Real-IP $remote_addr; 213 | proxy_pass_header Server; 214 | proxy_pass_header Date; 215 | proxy_ssl_name $upstream_host; 216 | proxy_ssl_server_name on; 217 | > if client_ssl then 218 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 219 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 220 | > end 221 | proxy_pass $upstream_scheme://kong_upstream$upstream_uri; 222 | } 223 | 224 | location @unbuffered_request { 225 | internal; 226 | default_type ''; 227 | set $kong_proxy_mode 'unbuffered'; 228 | 229 | proxy_http_version 1.1; 230 | proxy_buffering on; 231 | proxy_request_buffering off; 232 | 233 | proxy_set_header TE $upstream_te; 234 | proxy_set_header Host $upstream_host; 235 | proxy_set_header Upgrade $upstream_upgrade; 236 | proxy_set_header Connection $upstream_connection; 237 | proxy_set_header X-Forwarded-For $upstream_x_forwarded_for; 238 | proxy_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 239 | proxy_set_header X-Forwarded-Host $upstream_x_forwarded_host; 240 | proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; 241 | proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 242 | proxy_set_header X-Real-IP $remote_addr; 243 | proxy_pass_header Server; 244 | proxy_pass_header Date; 245 | proxy_ssl_name $upstream_host; 246 | proxy_ssl_server_name on; 247 | > if client_ssl then 248 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 249 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 250 | > end 251 | proxy_pass $upstream_scheme://kong_upstream$upstream_uri; 252 | } 253 | 254 | location @unbuffered_response { 255 | internal; 256 | default_type ''; 257 | set $kong_proxy_mode 'unbuffered'; 258 | 259 | proxy_http_version 1.1; 260 | proxy_buffering off; 261 | proxy_request_buffering on; 262 | 263 | proxy_set_header TE $upstream_te; 264 | proxy_set_header Host $upstream_host; 265 | proxy_set_header Upgrade $upstream_upgrade; 266 | proxy_set_header Connection $upstream_connection; 267 | proxy_set_header X-Forwarded-For $upstream_x_forwarded_for; 268 | proxy_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 269 | proxy_set_header X-Forwarded-Host $upstream_x_forwarded_host; 270 | proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; 271 | proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 272 | proxy_set_header X-Real-IP $remote_addr; 273 | proxy_pass_header Server; 274 | proxy_pass_header Date; 275 | proxy_ssl_name $upstream_host; 276 | proxy_ssl_server_name on; 277 | > if client_ssl then 278 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 279 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 280 | > end 281 | proxy_pass $upstream_scheme://kong_upstream$upstream_uri; 282 | } 283 | 284 | location @grpc { 285 | internal; 286 | default_type ''; 287 | set $kong_proxy_mode 'grpc'; 288 | 289 | grpc_set_header TE $upstream_te; 290 | grpc_set_header X-Forwarded-For $upstream_x_forwarded_for; 291 | grpc_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 292 | grpc_set_header X-Forwarded-Host $upstream_x_forwarded_host; 293 | grpc_set_header X-Forwarded-Port $upstream_x_forwarded_port; 294 | grpc_set_header X-Forwarded-Path $upstream_x_forwarded_path; 295 | grpc_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 296 | grpc_set_header X-Real-IP $remote_addr; 297 | grpc_pass_header Server; 298 | grpc_pass_header Date; 299 | grpc_ssl_name $upstream_host; 300 | grpc_ssl_server_name on; 301 | > if client_ssl then 302 | grpc_ssl_certificate ${{CLIENT_SSL_CERT}}; 303 | grpc_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 304 | > end 305 | grpc_pass $upstream_scheme://kong_upstream; 306 | } 307 | 308 | location = /kong_buffered_http { 309 | internal; 310 | default_type ''; 311 | set $kong_proxy_mode 'http'; 312 | 313 | rewrite_by_lua_block {;} 314 | access_by_lua_block {;} 315 | header_filter_by_lua_block {;} 316 | body_filter_by_lua_block {;} 317 | log_by_lua_block {;} 318 | 319 | proxy_http_version 1.1; 320 | proxy_set_header TE $upstream_te; 321 | proxy_set_header Host $upstream_host; 322 | proxy_set_header Upgrade $upstream_upgrade; 323 | proxy_set_header Connection $upstream_connection; 324 | proxy_set_header X-Forwarded-For $upstream_x_forwarded_for; 325 | proxy_set_header X-Forwarded-Proto $upstream_x_forwarded_proto; 326 | proxy_set_header X-Forwarded-Host $upstream_x_forwarded_host; 327 | proxy_set_header X-Forwarded-Port $upstream_x_forwarded_port; 328 | proxy_set_header X-Forwarded-Path $upstream_x_forwarded_path; 329 | proxy_set_header X-Forwarded-Prefix $upstream_x_forwarded_prefix; 330 | proxy_set_header X-Real-IP $remote_addr; 331 | proxy_pass_header Server; 332 | proxy_pass_header Date; 333 | proxy_ssl_name $upstream_host; 334 | proxy_ssl_server_name on; 335 | > if client_ssl then 336 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 337 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 338 | > end 339 | proxy_pass $upstream_scheme://kong_upstream$upstream_uri; 340 | } 341 | 342 | location = /kong_error_handler { 343 | internal; 344 | default_type ''; 345 | 346 | uninitialized_variable_warn off; 347 | 348 | rewrite_by_lua_block {;} 349 | access_by_lua_block {;} 350 | 351 | content_by_lua_block { 352 | Kong.handle_error() 353 | } 354 | } 355 | } 356 | > end -- (role == "traditional" or role == "data_plane") and #proxy_listeners > 0 357 | 358 | > if (role == "control_plane" or role == "traditional") and #admin_listeners > 0 then 359 | server { 360 | server_name kong_admin; 361 | > for _, entry in ipairs(admin_listeners) do 362 | listen $(entry.listener); 363 | > end 364 | 365 | access_log ${{ADMIN_ACCESS_LOG}}; 366 | error_log ${{ADMIN_ERROR_LOG}} ${{LOG_LEVEL}}; 367 | 368 | > if admin_ssl_enabled then 369 | > for i = 1, #admin_ssl_cert do 370 | ssl_certificate $(admin_ssl_cert[i]); 371 | ssl_certificate_key $(admin_ssl_cert_key[i]); 372 | > end 373 | ssl_session_cache shared:AdminSSL:10m; 374 | > end 375 | 376 | # injected nginx_admin_* directives 377 | > for _, el in ipairs(nginx_admin_directives) do 378 | $(el.name) $(el.value); 379 | > end 380 | 381 | location / { 382 | default_type application/json; 383 | content_by_lua_block { 384 | Kong.admin_content() 385 | } 386 | header_filter_by_lua_block { 387 | Kong.admin_header_filter() 388 | } 389 | } 390 | 391 | location /nginx_status { 392 | internal; 393 | access_log off; 394 | stub_status; 395 | } 396 | 397 | location /robots.txt { 398 | return 200 'User-agent: *\nDisallow: /'; 399 | } 400 | } 401 | > end -- (role == "control_plane" or role == "traditional") and #admin_listeners > 0 402 | 403 | > if #status_listeners > 0 then 404 | server { 405 | server_name kong_status; 406 | > for _, entry in ipairs(status_listeners) do 407 | listen $(entry.listener); 408 | > end 409 | 410 | access_log ${{STATUS_ACCESS_LOG}}; 411 | error_log ${{STATUS_ERROR_LOG}} ${{LOG_LEVEL}}; 412 | 413 | > if status_ssl_enabled then 414 | > for i = 1, #status_ssl_cert do 415 | ssl_certificate $(status_ssl_cert[i]); 416 | ssl_certificate_key $(status_ssl_cert_key[i]); 417 | > end 418 | ssl_session_cache shared:StatusSSL:1m; 419 | > end 420 | 421 | # injected nginx_status_* directives 422 | > for _, el in ipairs(nginx_status_directives) do 423 | $(el.name) $(el.value); 424 | > end 425 | 426 | location / { 427 | default_type application/json; 428 | content_by_lua_block { 429 | Kong.status_content() 430 | } 431 | header_filter_by_lua_block { 432 | Kong.status_header_filter() 433 | } 434 | } 435 | 436 | location /nginx_status { 437 | internal; 438 | access_log off; 439 | stub_status; 440 | } 441 | 442 | location /robots.txt { 443 | return 200 'User-agent: *\nDisallow: /'; 444 | } 445 | } 446 | > end 447 | 448 | > if role == "control_plane" then 449 | server { 450 | server_name kong_cluster_listener; 451 | > for _, entry in ipairs(cluster_listeners) do 452 | listen $(entry.listener) ssl; 453 | > end 454 | 455 | access_log ${{ADMIN_ACCESS_LOG}}; 456 | 457 | ssl_verify_client optional_no_ca; 458 | ssl_certificate ${{CLUSTER_CERT}}; 459 | ssl_certificate_key ${{CLUSTER_CERT_KEY}}; 460 | ssl_session_cache shared:ClusterSSL:10m; 461 | 462 | location = /v1/outlet { 463 | content_by_lua_block { 464 | Kong.serve_cluster_listener() 465 | } 466 | } 467 | } 468 | > end -- role == "control_plane" 469 | 470 | > if role ~= "data_plane" then 471 | server { 472 | server_name mock_upstream; 473 | 474 | listen 15555; 475 | listen 15556 ssl; 476 | 477 | > for i = 1, #ssl_cert do 478 | ssl_certificate $(ssl_cert[i]); 479 | ssl_certificate_key $(ssl_cert_key[i]); 480 | > end 481 | ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; 482 | 483 | set_real_ip_from 127.0.0.1; 484 | 485 | location / { 486 | content_by_lua_block { 487 | local mu = require "spec.fixtures.mock_upstream" 488 | ngx.status = 404 489 | return mu.send_default_json_response() 490 | } 491 | } 492 | 493 | location = / { 494 | content_by_lua_block { 495 | local mu = require "spec.fixtures.mock_upstream" 496 | return mu.send_default_json_response({ 497 | valid_routes = { 498 | ["/ws"] = "Websocket echo server", 499 | ["/get"] = "Accepts a GET request and returns it in JSON format", 500 | ["/xml"] = "Returns a simple XML document", 501 | ["/post"] = "Accepts a POST request and returns it in JSON format", 502 | ["/response-headers?:key=:val"] = "Returns given response headers", 503 | ["/cache/:n"] = "Sets a Cache-Control header for n seconds", 504 | ["/anything"] = "Accepts any request and returns it in JSON format", 505 | ["/request"] = "Alias to /anything", 506 | ["/delay/:duration"] = "Delay the response for seconds", 507 | ["/basic-auth/:user/:pass"] = "Performs HTTP basic authentication with the given credentials", 508 | ["/status/:code"] = "Returns a response with the specified ", 509 | ["/stream/:num"] = "Stream chunks of JSON data via chunked Transfer Encoding", 510 | }, 511 | }) 512 | } 513 | } 514 | 515 | location = /ws { 516 | content_by_lua_block { 517 | local mu = require "spec.fixtures.mock_upstream" 518 | return mu.serve_web_sockets() 519 | } 520 | } 521 | 522 | location /get { 523 | access_by_lua_block { 524 | local mu = require "spec.fixtures.mock_upstream" 525 | return mu.filter_access_by_method("GET") 526 | } 527 | content_by_lua_block { 528 | local mu = require "spec.fixtures.mock_upstream" 529 | return mu.send_default_json_response() 530 | } 531 | } 532 | 533 | location /xml { 534 | content_by_lua_block { 535 | local mu = require "spec.fixtures.mock_upstream" 536 | local xml = [[ 537 | 538 | 539 | Kong, Monolith destroyer. 540 | 541 | ]] 542 | return mu.send_text_response(xml, "application/xml") 543 | } 544 | } 545 | 546 | location /post { 547 | access_by_lua_block { 548 | local mu = require "spec.fixtures.mock_upstream" 549 | return mu.filter_access_by_method("POST") 550 | } 551 | content_by_lua_block { 552 | local mu = require "spec.fixtures.mock_upstream" 553 | return mu.send_default_json_response() 554 | } 555 | } 556 | 557 | location = /response-headers { 558 | access_by_lua_block { 559 | local mu = require "spec.fixtures.mock_upstream" 560 | return mu.filter_access_by_method("GET") 561 | } 562 | content_by_lua_block { 563 | local mu = require "spec.fixtures.mock_upstream" 564 | return mu.send_default_json_response({}, ngx.req.get_uri_args()) 565 | } 566 | } 567 | 568 | location = /hop-by-hop { 569 | content_by_lua_block { 570 | local header = ngx.header 571 | header["Keep-Alive"] = "timeout=5, max=1000" 572 | header["Proxy"] = "Remove-Me" 573 | header["Proxy-Connection"] = "close" 574 | header["Proxy-Authenticate"] = "Basic" 575 | header["Proxy-Authorization"] = "Basic YWxhZGRpbjpvcGVuc2VzYW1l" 576 | header["Transfer-Encoding"] = "chunked" 577 | header["Content-Length"] = nil 578 | header["TE"] = "trailers, deflate;q=0.5" 579 | header["Trailer"] = "Expires" 580 | header["Upgrade"] = "example/1, foo/2" 581 | 582 | ngx.print("hello\r\n\r\nExpires: Wed, 21 Oct 2015 07:28:00 GMT\r\n\r\n") 583 | ngx.exit(200) 584 | } 585 | } 586 | 587 | location ~ "^/cache/(?\d+)$" { 588 | content_by_lua_block { 589 | local mu = require "spec.fixtures.mock_upstream" 590 | return mu.send_default_json_response({}, { 591 | ["Cache-Control"] = "public, max-age=" .. ngx.var.n, 592 | }) 593 | } 594 | } 595 | 596 | location ~ "^/basic-auth/(?[a-zA-Z0-9_]+)/(?.+)$" { 597 | access_by_lua_block { 598 | local mu = require "spec.fixtures.mock_upstream" 599 | return mu.filter_access_by_basic_auth(ngx.var.username, 600 | ngx.var.password) 601 | } 602 | content_by_lua_block { 603 | local mu = require "spec.fixtures.mock_upstream" 604 | return mu.send_default_json_response({ 605 | authenticated = true, 606 | user = ngx.var.username, 607 | }) 608 | } 609 | } 610 | 611 | location ~ "^/(request|anything)" { 612 | content_by_lua_block { 613 | local mu = require "spec.fixtures.mock_upstream" 614 | return mu.send_default_json_response() 615 | } 616 | } 617 | 618 | location ~ "^/delay/(?\d{1,3})$" { 619 | content_by_lua_block { 620 | local mu = require "spec.fixtures.mock_upstream" 621 | local delay_seconds = tonumber(ngx.var.delay_seconds) 622 | if not delay_seconds then 623 | return ngx.exit(ngx.HTTP_NOT_FOUND) 624 | end 625 | 626 | ngx.sleep(delay_seconds) 627 | 628 | return mu.send_default_json_response({ 629 | delay = delay_seconds, 630 | }) 631 | } 632 | } 633 | 634 | location ~ "^/status/(?\d{3})$" { 635 | content_by_lua_block { 636 | local mu = require "spec.fixtures.mock_upstream" 637 | local code = tonumber(ngx.var.code) 638 | if not code then 639 | return ngx.exit(ngx.HTTP_NOT_FOUND) 640 | end 641 | ngx.status = code 642 | return mu.send_default_json_response({ 643 | code = code, 644 | }) 645 | } 646 | } 647 | 648 | location ~ "^/stream/(?\d+)$" { 649 | content_by_lua_block { 650 | local mu = require "spec.fixtures.mock_upstream" 651 | local rep = tonumber(ngx.var.num) 652 | local res = require("cjson").encode(mu.get_default_json_response()) 653 | 654 | ngx.header["X-Powered-By"] = "mock_upstream" 655 | ngx.header["Content-Type"] = "application/json" 656 | 657 | for i = 1, rep do 658 | ngx.say(res) 659 | end 660 | } 661 | } 662 | 663 | location ~ "^/post_log/(?[a-z0-9_]+)$" { 664 | content_by_lua_block { 665 | local mu = require "spec.fixtures.mock_upstream" 666 | return mu.store_log(ngx.var.logname) 667 | } 668 | } 669 | 670 | location ~ "^/post_auth_log/(?[a-z0-9_]+)/(?[a-zA-Z0-9_]+)/(?.+)$" { 671 | access_by_lua_block { 672 | local mu = require "spec.fixtures.mock_upstream" 673 | return mu.filter_access_by_basic_auth(ngx.var.username, 674 | ngx.var.password) 675 | } 676 | content_by_lua_block { 677 | local mu = require "spec.fixtures.mock_upstream" 678 | return mu.store_log(ngx.var.logname) 679 | } 680 | } 681 | 682 | location ~ "^/read_log/(?[a-z0-9_]+)$" { 683 | content_by_lua_block { 684 | local mu = require "spec.fixtures.mock_upstream" 685 | return mu.retrieve_log(ngx.var.logname) 686 | } 687 | } 688 | 689 | location ~ "^/count_log/(?[a-z0-9_]+)$" { 690 | content_by_lua_block { 691 | local mu = require "spec.fixtures.mock_upstream" 692 | return mu.count_log(ngx.var.logname) 693 | } 694 | } 695 | 696 | location ~ "^/reset_log/(?[a-z0-9_]+)$" { 697 | content_by_lua_block { 698 | local mu = require "spec.fixtures.mock_upstream" 699 | return mu.reset_log(ngx.var.logname) 700 | } 701 | } 702 | 703 | location = /echo_sni { 704 | return 200 'SNI=$ssl_server_name\n'; 705 | } 706 | } 707 | > end -- role ~= "data_plane" 708 | 709 | include '*.http_mock'; 710 | } 711 | > end 712 | 713 | > if #stream_listeners > 0 then 714 | stream { 715 | log_format basic '$remote_addr [$time_local] ' 716 | '$protocol $status $bytes_sent $bytes_received ' 717 | '$session_time'; 718 | 719 | lua_package_path '${{LUA_PACKAGE_PATH}};;'; 720 | lua_package_cpath '${{LUA_PACKAGE_CPATH}};;'; 721 | lua_socket_pool_size ${{LUA_SOCKET_POOL_SIZE}}; 722 | lua_socket_log_errors off; 723 | lua_max_running_timers 4096; 724 | lua_max_pending_timers 16384; 725 | lua_ssl_verify_depth ${{LUA_SSL_VERIFY_DEPTH}}; 726 | > if lua_ssl_trusted_certificate_combined then 727 | lua_ssl_trusted_certificate '${{LUA_SSL_TRUSTED_CERTIFICATE_COMBINED}}'; 728 | > end 729 | 730 | lua_shared_dict stream_kong 5m; 731 | lua_shared_dict stream_kong_locks 8m; 732 | lua_shared_dict stream_kong_healthchecks 5m; 733 | lua_shared_dict stream_kong_process_events 5m; 734 | lua_shared_dict stream_kong_cluster_events 5m; 735 | lua_shared_dict stream_kong_rate_limiting_counters 12m; 736 | lua_shared_dict stream_kong_core_db_cache ${{MEM_CACHE_SIZE}}; 737 | lua_shared_dict stream_kong_core_db_cache_miss 12m; 738 | lua_shared_dict stream_kong_db_cache ${{MEM_CACHE_SIZE}}; 739 | lua_shared_dict stream_kong_db_cache_miss 12m; 740 | > if database == "off" then 741 | lua_shared_dict stream_kong_core_db_cache_2 ${{MEM_CACHE_SIZE}}; 742 | lua_shared_dict stream_kong_core_db_cache_miss_2 12m; 743 | lua_shared_dict stream_kong_db_cache_2 ${{MEM_CACHE_SIZE}}; 744 | lua_shared_dict stream_kong_db_cache_miss_2 12m; 745 | > end 746 | > if database == "cassandra" then 747 | lua_shared_dict stream_kong_cassandra 5m; 748 | > end 749 | 750 | lua_shared_dict stream_kong_vitals_counters 50m; 751 | lua_shared_dict stream_kong_counters 50m; 752 | lua_shared_dict stream_kong_vitals_lists 1m; 753 | lua_shared_dict stream_kong_vitals 1m; 754 | 755 | > if ssl_ciphers then 756 | ssl_ciphers ${{SSL_CIPHERS}}; 757 | > end 758 | 759 | # injected nginx_stream_* directives 760 | > for _, el in ipairs(nginx_stream_directives) do 761 | $(el.name) $(el.value); 762 | > end 763 | 764 | init_by_lua_block { 765 | -- shared dictionaries conflict between stream/http modules. use a prefix. 766 | local shared = ngx.shared 767 | ngx.shared = setmetatable({}, { 768 | __index = function(t, k) 769 | return shared["stream_" .. k] 770 | end, 771 | }) 772 | 773 | Kong = require 'kong' 774 | Kong.init() 775 | } 776 | 777 | init_worker_by_lua_block { 778 | Kong.init_worker() 779 | } 780 | 781 | upstream kong_upstream { 782 | server 0.0.0.1:1; 783 | balancer_by_lua_block { 784 | Kong.balancer() 785 | } 786 | 787 | # injected nginx_supstream_* directives 788 | > for _, el in ipairs(nginx_supstream_directives) do 789 | $(el.name) $(el.value); 790 | > end 791 | } 792 | 793 | > if #stream_listeners > 0 then 794 | server { 795 | > for _, entry in ipairs(stream_listeners) do 796 | listen $(entry.listener); 797 | > end 798 | 799 | access_log ${{PROXY_ACCESS_LOG}} basic; 800 | error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; 801 | 802 | > for _, ip in ipairs(trusted_ips) do 803 | set_real_ip_from $(ip); 804 | > end 805 | # injected nginx_sproxy_* directives 806 | > for _, el in ipairs(nginx_sproxy_directives) do 807 | $(el.name) $(el.value); 808 | > end 809 | 810 | > if stream_proxy_ssl_enabled then 811 | > for i = 1, #ssl_cert do 812 | ssl_certificate $(ssl_cert[i]); 813 | ssl_certificate_key $(ssl_cert_key[i]); 814 | > end 815 | ssl_session_cache shared:StreamSSL:10m; 816 | ssl_certificate_by_lua_block { 817 | Kong.ssl_certificate() 818 | } 819 | > end 820 | preread_by_lua_block { 821 | Kong.preread() 822 | } 823 | 824 | proxy_ssl on; 825 | proxy_ssl_server_name on; 826 | > if client_ssl then 827 | proxy_ssl_certificate ${{CLIENT_SSL_CERT}}; 828 | proxy_ssl_certificate_key ${{CLIENT_SSL_CERT_KEY}}; 829 | > end 830 | proxy_pass kong_upstream; 831 | 832 | log_by_lua_block { 833 | Kong.log() 834 | } 835 | } 836 | 837 | > if database == "off" then 838 | server { 839 | listen unix:${{PREFIX}}/stream_config.sock; 840 | 841 | error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; 842 | 843 | content_by_lua_block { 844 | Kong.stream_config_listener() 845 | } 846 | } 847 | > end -- database == "off" 848 | 849 | server { # ignore (and close }, to ignore content) 850 | listen unix:${{PREFIX}}/stream_rpc.sock udp; 851 | error_log ${{ADMIN_ERROR_LOG}} ${{LOG_LEVEL}}; 852 | content_by_lua_block { 853 | Kong.stream_api() 854 | } 855 | } 856 | 857 | > end -- #stream_listeners > 0 858 | 859 | server { 860 | listen 15557; 861 | listen 15558 ssl; 862 | listen 15557 udp; 863 | 864 | > for i = 1, #ssl_cert do 865 | ssl_certificate $(ssl_cert[i]); 866 | ssl_certificate_key $(ssl_cert_key[i]); 867 | > end 868 | ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; 869 | 870 | content_by_lua_block { 871 | local sock = assert(ngx.req.socket()) 872 | local data = sock:receive() -- read a line from downstream 873 | 874 | if ngx.var.protocol == "TCP" then 875 | ngx.say(data) 876 | 877 | else 878 | sock:send(data) -- echo whatever was sent 879 | end 880 | } 881 | } 882 | 883 | include '*.stream_mock'; 884 | } 885 | > end 886 | -------------------------------------------------------------------------------- /spec/fixtures/prometheus/metrics.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name kong_prometheus_exporter; 3 | listen 0.0.0.0:9542; 4 | 5 | location / { 6 | default_type text/plain; 7 | content_by_lua_block { 8 | local serve = require "kong.plugins.prometheus.serve" 9 | serve.prometheus_server() 10 | } 11 | } 12 | 13 | location /nginx_status { 14 | internal; 15 | access_log off; 16 | stub_status; 17 | } 18 | } 19 | --------------------------------------------------------------------------------