├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── beat ├── redisbeat.go └── redisbeat_test.go ├── config └── config.go ├── docs └── fields.asciidoc ├── etc ├── beat.yml ├── fields.yml └── redisbeat.template.json ├── main.go ├── main_test.go ├── redisbeat.yml ├── scripts ├── Makefile ├── generate_field_docs.py └── generate_template.py └── tests └── redisbeat.yml /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/intellij,go 3 | 4 | ### Intellij ### 5 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 6 | 7 | *.iml 8 | 9 | ## Directory-based project format: 10 | .idea/ 11 | # if you remove the above rule, at least ignore the following: 12 | 13 | # User-specific stuff: 14 | # .idea/workspace.xml 15 | # .idea/tasks.xml 16 | # .idea/dictionaries 17 | # .idea/shelf 18 | 19 | # Sensitive or high-churn files: 20 | # .idea/dataSources.ids 21 | # .idea/dataSources.xml 22 | # .idea/sqlDataSources.xml 23 | # .idea/dynamic.xml 24 | # .idea/uiDesigner.xml 25 | 26 | # Gradle: 27 | # .idea/gradle.xml 28 | # .idea/libraries 29 | 30 | # Mongo Explorer plugin: 31 | # .idea/mongoSettings.xml 32 | 33 | ## File-based project format: 34 | *.ipr 35 | *.iws 36 | 37 | ## Plugin-specific files: 38 | 39 | # IntelliJ 40 | /out/ 41 | 42 | # mpeltonen/sbt-idea plugin 43 | .idea_modules/ 44 | 45 | # JIRA plugin 46 | atlassian-ide-plugin.xml 47 | 48 | # Crashlytics plugin (for Android Studio and IntelliJ) 49 | com_crashlytics_export_strings.xml 50 | crashlytics.properties 51 | crashlytics-build.properties 52 | fabric.properties 53 | 54 | 55 | ### Go ### 56 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 57 | *.o 58 | *.a 59 | *.so 60 | 61 | # Folders 62 | _obj 63 | _test 64 | 65 | # Architecture specific extensions/prefixes 66 | *.[568vq] 67 | [568vq].out 68 | 69 | *.cgo1.go 70 | *.cgo2.c 71 | _cgo_defun.c 72 | _cgo_gotypes.go 73 | _cgo_export.* 74 | 75 | _testmain.go 76 | 77 | *.exe 78 | *.test 79 | *.prof 80 | 81 | # build 82 | build 83 | redisbeat -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: go 3 | 4 | go: 5 | - 1.7 6 | 7 | os: 8 | - linux 9 | - osx 10 | 11 | install: 12 | - go get github.com/elastic/beats/libbeat 13 | - go get github.com/garyburd/redigo/redis 14 | - go get github.com/stretchr/testify/assert 15 | 16 | script: make testsuite 17 | 18 | after_success: 19 | - test -f build/coverage/full.cov && bash <(curl -s https://codecov.io/bash) -f build/coverage/full.cov 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2016 Chris Black 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BEATNAME=redisbeat 2 | BEAT_DIR=github.com/chrsblck 3 | 4 | include scripts/Makefile -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/chrsblck/redisbeat.svg?branch=master)](https://travis-ci.org/chrsblck/redisbeat) 2 | [![codecov.io](https://codecov.io/github/chrsblck/redisbeat/coverage.svg?branch=master)](https://codecov.io/github/chrsblck/redisbeat?branch=master) 3 | 4 | # Redisbeat 5 | 6 | Redisbeat is the Beat used for Redis monitoring. It is a lightweight agent that reads status from the Redis `INFO` command periodically. 7 | 8 | 9 | ## Elasticsearch template 10 | 11 | To apply Redisbeat template for Redis status: 12 | 13 | ``` 14 | curl -XPUT 'http://localhost:9200/_template/redisbeat' -d@etc/redisbeat.template.json 15 | ``` 16 | 17 | 18 | # Build, Test, Run 19 | 20 | ``` 21 | # Build 22 | export GO15VENDOREXPERIMENT=1 23 | GOPATH= make 24 | 25 | # Test 26 | GOPATH= make unit-tests 27 | 28 | # Run Test Suite 29 | GOPATH= make testsuite 30 | 31 | # Run 32 | ./redisbeat -c redisbeat.yml 33 | ``` 34 | 35 | 36 | ## Exported fields 37 | 38 | Redisbeat exports each `INFO` section. 39 | 40 | - `type: server` General information about the Redis server 41 | - `type: clients` Client connections section 42 | - `type: memory` Memory consumption related information 43 | - `type: persistence` RDB and AOF related information 44 | - `type: stats` General statistics 45 | - `type: replication` Master/slave replication information 46 | - `type: cpu` CPU consumption statistics 47 | - `type: commandstats` Redis command statistics 48 | - `type: cluster` Redis Cluster section 49 | - `type: keyspace` Database related statistics 50 | 51 | **Server stats:** 52 | ``` 53 | { 54 | "_index": "redisbeat-2016.01.27", 55 | "_type": "server", 56 | "_id": "AVKAgQBCRp3uTbpE00b9", 57 | "_version": 1, 58 | "_score": 1, 59 | "_source": { 60 | "@timestamp": "2016-01-27T00:34:42.952Z", 61 | "beat": { 62 | "hostname": "localhost", 63 | "name": "localhost" 64 | }, 65 | "count": 1, 66 | "stats": { 67 | "arch_bits": "64", 68 | "config_file": "/etc/redis/redis-config.conf", 69 | "gcc_version": "4.8.2", 70 | "hz": "10", 71 | "lru_clock": "11014306", 72 | "multiplexing_api": "epoll", 73 | "os": "Linux 3.16.0-37-generic x86_64", 74 | "process_id": "26866", 75 | "redis_build_id": "7b6a15f737658530", 76 | "redis_git_dirty": "0", 77 | "redis_git_sha1": "00000000", 78 | "redis_mode": "standalone", 79 | "redis_version": "3.0.0", 80 | "run_id": "973eb4c0772fe7846015e295c1ddaa0cd4d92b96", 81 | "tcp_port": "6379", 82 | "uptime_in_days": "6", 83 | "uptime_in_seconds": "519706" 84 | }, 85 | "type": "server" 86 | } 87 | } 88 | ``` 89 | **Clients stats:** 90 | ``` 91 | { 92 | "_index": "redisbeat-2016.01.27", 93 | "_type": "clients", 94 | "_id": "AVKAgXVxRp3uTbpE00cc", 95 | "_version": 1, 96 | "_score": 1, 97 | "_source": { 98 | "@timestamp": "2016-01-27T00:35:12.963Z", 99 | "beat": { 100 | "hostname": "localhost", 101 | "name": "localhost" 102 | }, 103 | "count": 1, 104 | "stats": { 105 | "blocked_clients": "0", 106 | "client_biggest_input_buf": "0", 107 | "client_longest_output_list": "0", 108 | "connected_clients": "3" 109 | }, 110 | "type": "clients" 111 | } 112 | } 113 | ``` 114 | **Memory stats:** 115 | ``` 116 | { 117 | "_index": "redisbeat-2016.01.27", 118 | "_type": "memory", 119 | "_id": "AVKAgZyARp3uTbpE00cn", 120 | "_version": 1, 121 | "_score": 1, 122 | "_source": { 123 | "@timestamp": "2016-01-27T00:35:22.971Z", 124 | "beat": { 125 | "hostname": "localhost", 126 | "name": "localhost" 127 | }, 128 | "count": 1, 129 | "stats": { 130 | "mem_allocator": "jemalloc-3.6.0", 131 | "mem_fragmentation_ratio": "0.98", 132 | "used_memory": "2839218984", 133 | "used_memory_human": "2.64G", 134 | "used_memory_lua": "35840", 135 | "used_memory_peak": "2839451672", 136 | "used_memory_peak_human": "2.64G", 137 | "used_memory_rss": "2791378944" 138 | }, 139 | "type": "memory" 140 | } 141 | } 142 | ``` 143 | **Persistence stats:** 144 | ``` 145 | { 146 | "_index": "redisbeat-2016.01.27", 147 | "_type": "persistence", 148 | "_id": "AVKAgQBCRp3uTbpE00cA", 149 | "_version": 1, 150 | "_score": 1, 151 | "_source": { 152 | "@timestamp": "2016-01-27T00:34:42.979Z", 153 | "beat": { 154 | "hostname": "localhost", 155 | "name": "localhost" 156 | }, 157 | "count": 1, 158 | "stats": { 159 | "aof_base_size": "2141338459", 160 | "aof_buffer_length": "0", 161 | "aof_current_rewrite_time_sec": "-1", 162 | "aof_current_size": "2148807529", 163 | "aof_delayed_fsync": "1", 164 | "aof_enabled": "1", 165 | "aof_last_bgrewrite_status": "ok", 166 | "aof_last_rewrite_time_sec": "-1", 167 | "aof_last_write_status": "ok", 168 | "aof_pending_bio_fsync": "0", 169 | "aof_pending_rewrite": "0", 170 | "aof_rewrite_buffer_length": "0", 171 | "aof_rewrite_in_progress": "0", 172 | "aof_rewrite_scheduled": "0", 173 | "loading": "0", 174 | "rdb_bgsave_in_progress": "0", 175 | "rdb_changes_since_last_save": "0", 176 | "rdb_current_bgsave_time_sec": "-1", 177 | "rdb_last_bgsave_status": "ok", 178 | "rdb_last_bgsave_time_sec": "36", 179 | "rdb_last_save_time": "1453843449" 180 | }, 181 | "type": "persistence" 182 | } 183 | } 184 | ``` 185 | **Redis-stats:** 186 | ``` 187 | { 188 | "_index": "redisbeat-2016.01.27", 189 | "_type": "stats", 190 | "_id": "AVKAgNvXRp3uTbpE00b3", 191 | "_version": 1, 192 | "_score": 1, 193 | "_source": { 194 | "@timestamp": "2016-01-27T00:34:32.993Z", 195 | "beat": { 196 | "hostname": "localhost", 197 | "name": "localhost" 198 | }, 199 | "count": 1, 200 | "stats": { 201 | "evicted_keys": "0", 202 | "expired_keys": "0", 203 | "instantaneous_input_kbps": "0.03", 204 | "instantaneous_ops_per_sec": "1", 205 | "instantaneous_output_kbps": "0.33", 206 | "keyspace_hits": "487338", 207 | "keyspace_misses": "10", 208 | "latest_fork_usec": "39656", 209 | "migrate_cached_sockets": "0", 210 | "pubsub_channels": "0", 211 | "pubsub_patterns": "0", 212 | "rejected_connections": "0", 213 | "sync_full": "0", 214 | "sync_partial_err": "0", 215 | "sync_partial_ok": "0", 216 | "total_commands_processed": "523535", 217 | "total_connections_received": "385", 218 | "total_net_input_bytes": "39050370", 219 | "total_net_output_bytes": "117696185" 220 | }, 221 | "type": "stats" 222 | } 223 | } 224 | ``` 225 | **Replication stats:** 226 | ``` 227 | { 228 | "_index": "redisbeat-2016.01.27", 229 | "_type": "replication", 230 | "_id": "AVKAhRIoRp3uTbpE00c0", 231 | "_version": 1, 232 | "_score": 1, 233 | "_source": { 234 | "@timestamp": "2016-01-27T00:39:09.700Z", 235 | "beat": { 236 | "hostname": "localhost", 237 | "name": "localhost" 238 | }, 239 | "count": 1, 240 | "stats": { 241 | "connected_slaves": "0", 242 | "master_repl_offset": "0", 243 | "repl_backlog_active": "0", 244 | "repl_backlog_first_byte_offset": "0", 245 | "repl_backlog_histlen": "0", 246 | "repl_backlog_size": "1048576", 247 | "role": "master" 248 | }, 249 | "type": "replication" 250 | } 251 | } 252 | ``` 253 | **Cpu stats:** 254 | ``` 255 | { 256 | "_index": "redisbeat-2016.01.27", 257 | "_type": "cpu", 258 | "_id": "AVKAgQBCRp3uTbpE00cD", 259 | "_version": 1, 260 | "_score": 1, 261 | "_source": { 262 | "@timestamp": "2016-01-27T00:34:43.008Z", 263 | "beat": { 264 | "hostname": "localhost", 265 | "name": "localhost" 266 | }, 267 | "count": 1, 268 | "stats": { 269 | "used_cpu_sys": "450.40", 270 | "used_cpu_sys_children": "35.24", 271 | "used_cpu_user": "185.76", 272 | "used_cpu_user_children": "273.30" 273 | }, 274 | "type": "cpu" 275 | } 276 | } 277 | ``` 278 | **Commandstats:** 279 | ``` 280 | { 281 | "_index": "redisbeat-2016.01.27", 282 | "_type": "commandstats", 283 | "_id": "AVKAgSdSRp3uTbpE00cO", 284 | "_version": 1, 285 | "_score": 1, 286 | "_source": { 287 | "@timestamp": "2016-01-27T00:34:53.027Z", 288 | "beat": { 289 | "hostname": "localhost", 290 | "name": "localhost" 291 | }, 292 | "count": 1, 293 | "stats": { 294 | "cmdstat_auth": "calls=337,usec=2031,usec_per_call=6.03", 295 | "cmdstat_dbsize": "calls=2,usec=8,usec_per_call=4.00", 296 | "cmdstat_flushdb": "calls=1,usec=8155,usec_per_call=8155.00", 297 | "cmdstat_get": "calls=162264,usec=294211,usec_per_call=1.81", 298 | "cmdstat_hexists": "calls=308,usec=14478,usec_per_call=47.01", 299 | "cmdstat_hget": "calls=324527,usec=700351,usec_per_call=2.16", 300 | "cmdstat_hgetall": "calls=249,usec=145421,usec_per_call=584.02", 301 | "cmdstat_hset": "calls=30060,usec=143089,usec_per_call=4.76", 302 | "cmdstat_info": "calls=3546,usec=159072,usec_per_call=44.86", 303 | "cmdstat_keys": "calls=688,usec=269581,usec_per_call=391.83", 304 | "cmdstat_ping": "calls=2,usec=10,usec_per_call=5.00", 305 | "cmdstat_select": "calls=270,usec=1408,usec_per_call=5.21", 306 | "cmdstat_set": "calls=1304,usec=4397,usec_per_call=3.37" 307 | }, 308 | "type": "commandstats" 309 | } 310 | } 311 | ``` 312 | **Cluster stats:** 313 | ``` 314 | { 315 | "_index": "redisbeat-2016.01.27", 316 | "_type": "cluster", 317 | "_id": "AVKAgZyARp3uTbpE00ct", 318 | "_version": 1, 319 | "_score": 1, 320 | "_source": { 321 | "@timestamp": "2016-01-27T00:35:23.029Z", 322 | "beat": { 323 | "hostname": "localhost", 324 | "name": "localhost" 325 | }, 326 | "count": 1, 327 | "stats": { 328 | "cluster_enabled": "0" 329 | }, 330 | "type": "cluster" 331 | } 332 | } 333 | ``` 334 | **Keyspace stats:** 335 | ``` 336 | { 337 | "_index": "redisbeat-2016.01.27", 338 | "_type": "keyspace", 339 | "_id": "AVKAgU5jRp3uTbpE00ca", 340 | "_version": 1, 341 | "_score": 1, 342 | "_source": { 343 | "@timestamp": "2016-01-27T00:35:03.030Z", 344 | "beat": { 345 | "hostname": "localhost", 346 | "name": "localhost" 347 | }, 348 | "count": 1, 349 | "stats": { 350 | "db0": "keys=716456,expires=0,avg_ttl=0", 351 | "db1": "keys=755,expires=0,avg_ttl=0", 352 | "db2": "keys=755,expires=0,avg_ttl=0" 353 | }, 354 | "type": "keyspace" 355 | } 356 | } 357 | ``` -------------------------------------------------------------------------------- /beat/redisbeat.go: -------------------------------------------------------------------------------- 1 | package beat 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | "strconv" 8 | "strings" 9 | "time" 10 | 11 | "github.com/chrsblck/redisbeat/config" 12 | "github.com/elastic/beats/libbeat/beat" 13 | "github.com/elastic/beats/libbeat/common" 14 | "github.com/elastic/beats/libbeat/logp" 15 | "github.com/elastic/beats/libbeat/publisher" 16 | "github.com/garyburd/redigo/redis" 17 | ) 18 | 19 | type Redisbeat struct { 20 | period time.Duration 21 | config config.Config 22 | events publisher.Client 23 | 24 | redisPool *redis.Pool 25 | done chan struct{} 26 | } 27 | 28 | func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { 29 | defaultConfig := config.DefaultConfig 30 | err := cfg.Unpack(&defaultConfig) 31 | if err != nil { 32 | return nil, fmt.Errorf("Error reading configuration file: %v", err) 33 | } 34 | 35 | rb := &Redisbeat{ 36 | done: make(chan struct{}), 37 | config: defaultConfig, 38 | } 39 | 40 | logp.Debug("redisbeat", "Redisbeat configuration:") 41 | logp.Debug("redisbeat", "Period %v", rb.config.Period) 42 | logp.Debug("redisbeat", "Host %v", rb.config.Host) 43 | logp.Debug("redisbeat", "Port %v", rb.config.Port) 44 | logp.Debug("redisbeat", "Network %v", rb.config.Network) 45 | logp.Debug("redisbeat", "Max Connections %v", rb.config.MaxConn) 46 | logp.Debug("redisbeat", "Auth %t", rb.config.Auth.Required) 47 | logp.Debug("redisbeat", "Server statistics %t", rb.config.Stats.Server) 48 | logp.Debug("redisbeat", "Client statistics %t", rb.config.Stats.Clients) 49 | logp.Debug("redisbeat", "Memory statistics %t", rb.config.Stats.Memory) 50 | logp.Debug("redisbeat", "Persistence statistics %t", rb.config.Stats.Persistence) 51 | logp.Debug("redisbeat", "Stats statistics %t", rb.config.Stats.Stats) 52 | logp.Debug("redisbeat", "Replication statistics %t", rb.config.Stats.Replication) 53 | logp.Debug("redisbeat", "Cpu statistics %t", rb.config.Stats.Cpu) 54 | logp.Debug("redisbeat", "Command statistics %t", rb.config.Stats.Commandstats) 55 | logp.Debug("redisbeat", "Cluster statistics %t", rb.config.Stats.Cluster) 56 | logp.Debug("redisbeat", "Keyspace statistics %t", rb.config.Stats.Keyspace) 57 | 58 | return rb, nil 59 | } 60 | 61 | func (rb *Redisbeat) setup(b *beat.Beat) error { 62 | rb.events = b.Publisher.Connect() 63 | rb.done = make(chan struct{}) 64 | 65 | // Set up redis pool 66 | redisPool := redis.NewPool(func() (redis.Conn, error) { 67 | c, err := redis.Dial(rb.config.Network, rb.config.Host+":"+strconv.Itoa(rb.config.Port)) 68 | 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | return c, err 74 | }, rb.config.MaxConn) 75 | 76 | rb.redisPool = redisPool 77 | 78 | if rb.config.Auth.Required { 79 | c := rb.redisPool.Get() 80 | defer c.Close() 81 | 82 | authed, err := c.Do("AUTH", rb.config.Auth.RequiredPass) 83 | if err != nil { 84 | return err 85 | } else { 86 | logp.Debug("redisbeat", "AUTH %v", authed) 87 | } 88 | } 89 | 90 | return nil 91 | } 92 | 93 | func (rb *Redisbeat) Run(b *beat.Beat) error { 94 | var err error 95 | 96 | rb.setup(b) 97 | 98 | ticker := time.NewTicker(rb.config.Period) 99 | defer ticker.Stop() 100 | 101 | for { 102 | select { 103 | case <-rb.done: 104 | return nil 105 | case <-ticker.C: 106 | } 107 | 108 | timerStart := time.Now() 109 | 110 | if rb.config.Stats.Server { 111 | err = rb.exportStats("server") 112 | if err != nil { 113 | logp.Err("Error reading server stats: %v", err) 114 | break 115 | } 116 | } 117 | if rb.config.Stats.Clients { 118 | err = rb.exportStats("clients") 119 | if err != nil { 120 | logp.Err("Error reading clients stats: %v", err) 121 | break 122 | } 123 | } 124 | if rb.config.Stats.Memory { 125 | err = rb.exportStats("memory") 126 | if err != nil { 127 | logp.Err("Error reading memory stats: %v", err) 128 | break 129 | } 130 | } 131 | if rb.config.Stats.Persistence { 132 | err = rb.exportStats("persistence") 133 | if err != nil { 134 | logp.Err("Error reading persistence stats: %v", err) 135 | break 136 | } 137 | } 138 | if rb.config.Stats.Stats { 139 | err = rb.exportStats("stats") 140 | if err != nil { 141 | logp.Err("Error reading stats stats: %v", err) 142 | break 143 | } 144 | } 145 | if rb.config.Stats.Replication { 146 | err = rb.exportStats("replication") 147 | if err != nil { 148 | logp.Err("Error reading replication stats: %v", err) 149 | break 150 | } 151 | } 152 | if rb.config.Stats.Cpu { 153 | err = rb.exportStats("cpu") 154 | if err != nil { 155 | logp.Err("Error reading cpu stats: %v", err) 156 | break 157 | } 158 | } 159 | if rb.config.Stats.Commandstats { 160 | err = rb.exportStats("commandstats") 161 | if err != nil { 162 | logp.Err("Error reading commandstats: %v", err) 163 | break 164 | } 165 | } 166 | if rb.config.Stats.Cluster { 167 | err = rb.exportStats("cluster") 168 | if err != nil { 169 | logp.Err("Error reading cluster stats: %v", err) 170 | break 171 | } 172 | } 173 | if rb.config.Stats.Keyspace { 174 | err = rb.exportStats("keyspace") 175 | if err != nil { 176 | logp.Err("Error reading keypsace stats: %v", err) 177 | break 178 | } 179 | } 180 | 181 | timerEnd := time.Now() 182 | duration := timerEnd.Sub(timerStart) 183 | if duration.Nanoseconds() > rb.period.Nanoseconds() { 184 | logp.Warn("Ignoring tick(s) due to processing taking longer than one period") 185 | } 186 | } 187 | 188 | return err 189 | } 190 | 191 | func (rb *Redisbeat) Cleanup(b *beat.Beat) error { 192 | // I wonder if the redis pool should released here, after the main loop exists. 193 | return nil 194 | } 195 | 196 | // Stop is triggered on exit, closing the done channel and redis pool 197 | func (rb *Redisbeat) Stop() { 198 | close(rb.done) 199 | rb.redisPool.Close() 200 | } 201 | 202 | func (rb *Redisbeat) exportStats(statType string) error { 203 | stats, err := rb.getInfoReply(statType) 204 | if err != nil { 205 | logp.Warn("Failed to fetch server stats: %v", err) 206 | return err 207 | } 208 | 209 | event := common.MapStr{ 210 | "@timestamp": common.Time(time.Now()), 211 | "type": statType, 212 | "count": 1, 213 | "stats": stats, 214 | } 215 | 216 | rb.events.PublishEvent(event) 217 | 218 | return nil 219 | } 220 | 221 | // getInfoReply sends INFO type command and returns the response as a map 222 | func (rb *Redisbeat) getInfoReply(infoType string) (map[string]string, error) { 223 | c := rb.redisPool.Get() 224 | defer c.Close() 225 | 226 | if rb.config.Auth.Required { 227 | authed, err := c.Do("AUTH", rb.config.Auth.RequiredPass) 228 | if err != nil { 229 | return nil, err 230 | } else { 231 | logp.Debug("redisbeat", "AUTH %v", authed) 232 | } 233 | } 234 | 235 | reply, err := redis.Bytes(c.Do("INFO", infoType)) 236 | 237 | if err != nil { 238 | return nil, err 239 | } else { 240 | s := string(reply[:]) 241 | return convertReplyToMap(s) 242 | } 243 | } 244 | 245 | // convertReplyToMap converts a bulk string reply from Redis to a map 246 | func convertReplyToMap(s string) (map[string]string, error) { 247 | var info map[string]string 248 | info = make(map[string]string) 249 | 250 | // Regex for INFO type property 251 | infoRegex := `^\s*#\s*\w+\s*$` 252 | r, err := regexp.Compile(infoRegex) 253 | if err != nil { 254 | return nil, errors.New("Regex failed to compile") 255 | } 256 | 257 | // http://redis.io/topics/protocol#bulk-string-reply 258 | a := strings.Split(s, "\r\n") 259 | 260 | for _, v := range a { 261 | if r.MatchString(v) || v == "" { 262 | logp.Debug("redisbeat", "Skipping reply string - \"%v\"", v) 263 | continue 264 | } 265 | entry := strings.Split(v, ":") 266 | logp.Debug("redisbeat", "Entry: %#v\n", entry) 267 | info[entry[0]] = entry[1] 268 | } 269 | return info, nil 270 | } 271 | -------------------------------------------------------------------------------- /beat/redisbeat_test.go: -------------------------------------------------------------------------------- 1 | package beat 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestConvertReplyToMap(t *testing.T) { 10 | testReplyString := "# Server\r\nredis_version:3.0.0\r\nredis_mode:standalone\r\nmultiplexing_api:epoll\r\n" 11 | replyMap, err := convertReplyToMap(testReplyString) 12 | assert.Nil(t, err, "Valid string reply should not throw an error") 13 | assert.Equal(t, "3.0.0", replyMap["redis_version"], "Redis version should be 3.0.0") 14 | assert.Equal(t, "standalone", replyMap["redis_mode"], "Redis mode should be standalone") 15 | assert.Equal(t, "epoll", replyMap["multiplexing_api"], "Redis multiplexing api should be epoll") 16 | } 17 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "time" 4 | 5 | type Config struct { 6 | Period time.Duration 7 | Host string 8 | Port int 9 | Network string 10 | MaxConn int 11 | Auth AuthConfig 12 | Stats StatsConfig 13 | } 14 | 15 | type AuthConfig struct { 16 | Required bool `config:"required"` 17 | RequiredPass string `config:"requiredpass"` 18 | } 19 | 20 | type StatsConfig struct { 21 | Server bool `config:"server"` 22 | Clients bool `config:"clients"` 23 | Memory bool `config:"memory"` 24 | Persistence bool `config:"persistence"` 25 | Stats bool `config:"stats"` 26 | Replication bool `config:"replication"` 27 | Cpu bool `config:"cpu"` 28 | Commandstats bool `config:"commandstats"` 29 | Cluster bool `config:"cluster"` 30 | Keyspace bool `config:"keyspace"` 31 | } 32 | 33 | var DefaultConfig = Config{ 34 | Period: 10 * time.Second, 35 | Host: "localhost", 36 | Port: 6379, 37 | Network: "tcp", 38 | MaxConn: 10, 39 | Auth: AuthConfig{ 40 | Required: false, 41 | RequiredPass: "", 42 | }, 43 | Stats: StatsConfig{ 44 | Server: true, 45 | Clients: true, 46 | Memory: true, 47 | Persistence: true, 48 | Stats: true, 49 | Replication: true, 50 | Cpu: true, 51 | Commandstats: true, 52 | Cluster: true, 53 | Keyspace: true, 54 | }, 55 | } 56 | -------------------------------------------------------------------------------- /docs/fields.asciidoc: -------------------------------------------------------------------------------- 1 | 2 | //// 3 | This file is generated! See etc/fields.yml and scripts/generate_field_docs.py 4 | //// 5 | 6 | [[exported-fields]] 7 | == Exported Fields 8 | 9 | This document describes the fields that are exported by 10 | Redisbeat. They are grouped in the 11 | following categories: 12 | 13 | * <> 14 | * <> 15 | * <> 16 | * <> 17 | * <> 18 | * <> 19 | * <> 20 | * <> 21 | * <> 22 | * <> 23 | * <> 24 | 25 | [[exported-fields-env]] 26 | === Common Fields 27 | 28 | Contains common fields available in all event types. 29 | 30 | 31 | 32 | ==== @timestamp 33 | 34 | type: date 35 | 36 | example: 2015-01-24 14:06:05.071000 37 | 38 | format: YYYY-MM-DDTHH:MM:SS.milliZ 39 | 40 | required: True 41 | 42 | The timestamp of when the measurements were taken. The precision is in milliseconds. The timezone is UTC. 43 | 44 | 45 | ==== type 46 | 47 | required: True 48 | 49 | Set to "system" to indicate that the statistics are system-wide. 50 | 51 | 52 | ==== count 53 | 54 | type: int 55 | 56 | required: True 57 | 58 | The number of transactions that this event represents. This is generally the inverse of the sampling rate. For example, for a sample rate of 1/10, the count is 10. The count is used by the UIs to return estimated values. Reserved for future usage. 59 | 60 | 61 | ==== beat.name 62 | 63 | Name of the Beat sending the events. If the shipper name is set in the configuration file, then that value is used. If it is not set, the hostname is used. 64 | 65 | 66 | ==== beat.hostname 67 | 68 | The hostname as returned by the operating system on which the Beat is running. 69 | 70 | 71 | [[exported-fields-server]] 72 | === Redis Server Statistics Fields 73 | 74 | Contains server statistics. These statistics are returned from the redis command *INFO server* 75 | 76 | 77 | 78 | [[exported-fields-server]] 79 | === Redis Server Statistics Fields 80 | 81 | General information about the Redis server 82 | 83 | 84 | 85 | ==== stats 86 | 87 | type: object 88 | 89 | A *string => string* map of server stats 90 | 91 | 92 | [[exported-fields-clients]] 93 | === Redis Client Statistics Fields 94 | 95 | Contains client statistics. These statistics are returned from the redis command *INFO clients* 96 | 97 | 98 | 99 | [[exported-fields-clients]] 100 | === Redis Client Statistics Fields 101 | 102 | Client connections section 103 | 104 | 105 | 106 | ==== stats 107 | 108 | type: object 109 | 110 | A *string => string* map of client stats 111 | 112 | 113 | [[exported-fields-memory]] 114 | === Redis Memory Statistics Fields 115 | 116 | Contains server statistics. These statistics are returned from the redis command *INFO memory* 117 | 118 | 119 | 120 | [[exported-fields-memory]] 121 | === Redis Memory Statistics Fields 122 | 123 | Memory consumption related information 124 | 125 | 126 | 127 | ==== stats 128 | 129 | type: object 130 | 131 | A *string => string* map of memory stats 132 | 133 | 134 | [[exported-fields-persistence]] 135 | === Redis Persistence Statistics Fields 136 | 137 | Contains persistence statistics. These statistics are returned from the redis command *INFO persistence* 138 | 139 | 140 | 141 | [[exported-fields-persistence]] 142 | === Redis Persistence Statistics Fields 143 | 144 | RDB and AOF related information 145 | 146 | 147 | 148 | ==== stats 149 | 150 | type: object 151 | 152 | A *string => string* map of persistence stats 153 | 154 | 155 | [[exported-fields-stats]] 156 | === Redis Stats Statistics Fields 157 | 158 | Contains redis-stats statistics. These statistics are returned from the redis command *INFO stats* 159 | 160 | 161 | 162 | [[exported-fields-stats]] 163 | === Redis Stats Statistics Fields 164 | 165 | General statistics 166 | 167 | 168 | 169 | ==== stats 170 | 171 | type: object 172 | 173 | A *string => string* map of redis-stats stats 174 | 175 | 176 | [[exported-fields-replication]] 177 | === Redis Replication Statistics Fields 178 | 179 | Contains replication statistics. These statistics are returned from the redis command *INFO replication* 180 | 181 | 182 | 183 | [[exported-fields-replication]] 184 | === Redis Replication Statistics Fields 185 | 186 | Master/slave replication information 187 | 188 | 189 | 190 | ==== stats 191 | 192 | type: object 193 | 194 | A *string => string* map of replication stats 195 | 196 | 197 | [[exported-fields-cpu]] 198 | === Redis CPU Statistics Fields 199 | 200 | Contains cpu statistics. These statistics are returned from the redis command *INFO cpu* 201 | 202 | 203 | 204 | [[exported-fields-cpu]] 205 | === Redis CPU Statistics Fields 206 | 207 | CPU consumption statistics 208 | 209 | 210 | 211 | ==== stats 212 | 213 | type: object 214 | 215 | A *string => string* map of cpu stats 216 | 217 | 218 | [[exported-fields-commandstats]] 219 | === Redis CommandStats Statistics Fields 220 | 221 | Contains command-stats statistics. These statistics are returned from the redis command *INFO commandstats* 222 | 223 | 224 | 225 | [[exported-fields-commandstats]] 226 | === Redis CommandStats Statistics Fields 227 | 228 | Redis command statistics 229 | 230 | 231 | 232 | ==== stats 233 | 234 | type: object 235 | 236 | A *string => string* map of command-stats stats 237 | 238 | 239 | [[exported-fields-cluster]] 240 | === Redis Cluster Statistics Fields 241 | 242 | Contains cluster statistics. These statistics are returned from the redis command *INFO cluster* 243 | 244 | 245 | 246 | [[exported-fields-cluster]] 247 | === Redis Cluster Statistics Fields 248 | 249 | Redis Cluster section 250 | 251 | 252 | 253 | ==== stats 254 | 255 | type: object 256 | 257 | A *string => string* map of cluster stats 258 | 259 | 260 | [[exported-fields-keyspace]] 261 | === Redis Keyspace Statistics Fields 262 | 263 | Contains keyspace statistics. These statistics are returned from the redis command *INFO keyspace* 264 | 265 | 266 | 267 | [[exported-fields-keyspace]] 268 | === Redis Keyspace Statistics Fields 269 | 270 | Database related statistics 271 | 272 | 273 | 274 | ==== stats 275 | 276 | type: object 277 | 278 | A *string => string* map of keyspace stats 279 | 280 | -------------------------------------------------------------------------------- /etc/beat.yml: -------------------------------------------------------------------------------- 1 | ################### Redisbeat Configuration Example ######################### 2 | 3 | ############################# Input ############################################ 4 | input: 5 | # In seconds, defines how often to read server statistics 6 | period: 10 7 | 8 | # Host of redis server, default is localhost 9 | host: "localhost" 10 | 11 | # Port the redis server is listening on, default is 6379 12 | port: 6379 13 | 14 | # Network type, default is tcp 15 | network: "tcp" 16 | 17 | # Max connections for redis pool, default is 10 18 | maxconn: 10 19 | 20 | # Authentication config 21 | auth: 22 | # default is false 23 | required: false 24 | # default is empty string 25 | required_pass: "p@ssw0rd" 26 | 27 | # Statistics to collect (all enabled by default) 28 | stats: 29 | # server information 30 | server: true 31 | 32 | # clients information 33 | clients: true 34 | 35 | # memory information 36 | memory: true 37 | 38 | # persistence information 39 | persistence: true 40 | 41 | # stats information 42 | stats: true 43 | 44 | # replication information 45 | replication: true 46 | 47 | # cpu information 48 | cpu: true 49 | 50 | # commandstats information 51 | commandstats: true 52 | 53 | # cluster information 54 | cluster: true 55 | 56 | # keyspace information 57 | keyspace: true 58 | 59 | -------------------------------------------------------------------------------- /etc/fields.yml: -------------------------------------------------------------------------------- 1 | version: 1.0 2 | 3 | defaults: 4 | type: string 5 | required: false 6 | index: not_analyzed 7 | doc_values: true 8 | ignore_above: 1024 9 | 10 | env: 11 | type: group 12 | description: > 13 | Contains common fields available in all event types. 14 | fields: 15 | - name: "@timestamp" 16 | type: date 17 | required: true 18 | format: YYYY-MM-DDTHH:MM:SS.milliZ 19 | example: 2015-01-24T14:06:05.071Z 20 | description: > 21 | The timestamp of when the measurements were taken. The precision is in milliseconds. 22 | The timezone is UTC. 23 | 24 | - name: type 25 | description: > 26 | Set to "system" to indicate that the statistics are system-wide. 27 | required: true 28 | 29 | - name: count 30 | type: int 31 | description: > 32 | The number of transactions that this event represents. This 33 | is generally the inverse of the sampling rate. For example, for 34 | a sample rate of 1/10, the count is 10. The count is used by the 35 | UIs to return estimated values. Reserved for future usage. 36 | required: true 37 | 38 | - name: beat.name 39 | description: > 40 | Name of the Beat sending the events. If the shipper name is set 41 | in the configuration file, then that value is used. If it is not set, 42 | the hostname is used. 43 | 44 | - name: beat.hostname 45 | description: > 46 | The hostname as returned by the operating system on which the Beat is 47 | running. 48 | 49 | server: 50 | type: group 51 | description: > 52 | Contains server statistics. These statistics are returned from the redis command *INFO server* 53 | fields: 54 | - name: server 55 | type: group 56 | description: > 57 | General information about the Redis server 58 | fields: 59 | - name: stats 60 | type: object 61 | description: > 62 | A *string => string* map of server stats 63 | 64 | clients: 65 | type: group 66 | description: > 67 | Contains client statistics. These statistics are returned from the redis command *INFO clients* 68 | fields: 69 | - name: clients 70 | type: group 71 | description: > 72 | Client connections section 73 | fields: 74 | - name: stats 75 | type: object 76 | description: > 77 | A *string => string* map of client stats 78 | 79 | memory: 80 | type: group 81 | description: > 82 | Contains server statistics. These statistics are returned from the redis command *INFO memory* 83 | fields: 84 | - name: memory 85 | type: group 86 | description: > 87 | Memory consumption related information 88 | fields: 89 | - name: stats 90 | type: object 91 | description: > 92 | A *string => string* map of memory stats 93 | 94 | persistence: 95 | type: group 96 | description: > 97 | Contains persistence statistics. These statistics are returned from the redis command *INFO persistence* 98 | fields: 99 | - name: persistence 100 | type: group 101 | description: > 102 | RDB and AOF related information 103 | fields: 104 | - name: stats 105 | type: object 106 | description: > 107 | A *string => string* map of persistence stats 108 | 109 | stats: 110 | type: group 111 | description: > 112 | Contains redis-stats statistics. These statistics are returned from the redis command *INFO stats* 113 | fields: 114 | - name: stats 115 | type: group 116 | description: > 117 | General statistics 118 | fields: 119 | - name: stats 120 | type: object 121 | description: > 122 | A *string => string* map of redis-stats stats 123 | 124 | replication: 125 | type: group 126 | description: > 127 | Contains replication statistics. These statistics are returned from the redis command *INFO replication* 128 | fields: 129 | - name: replication 130 | type: group 131 | description: > 132 | Master/slave replication information 133 | fields: 134 | - name: stats 135 | type: object 136 | description: > 137 | A *string => string* map of replication stats 138 | 139 | cpu: 140 | type: group 141 | description: > 142 | Contains cpu statistics. These statistics are returned from the redis command *INFO cpu* 143 | fields: 144 | - name: cpu 145 | type: group 146 | description: > 147 | CPU consumption statistics 148 | fields: 149 | - name: stats 150 | type: object 151 | description: > 152 | A *string => string* map of cpu stats 153 | 154 | commandstats: 155 | type: group 156 | description: > 157 | Contains command-stats statistics. These statistics are returned from the redis command *INFO commandstats* 158 | fields: 159 | - name: commandstats 160 | type: group 161 | description: > 162 | Redis command statistics 163 | fields: 164 | - name: stats 165 | type: object 166 | description: > 167 | A *string => string* map of command-stats stats 168 | 169 | cluster: 170 | type: group 171 | description: > 172 | Contains cluster statistics. These statistics are returned from the redis command *INFO cluster* 173 | fields: 174 | - name: cluster 175 | type: group 176 | description: > 177 | Redis Cluster section 178 | fields: 179 | - name: stats 180 | type: object 181 | description: > 182 | A *string => string* map of cluster stats 183 | 184 | keyspace: 185 | type: group 186 | description: > 187 | Contains keyspace statistics. These statistics are returned from the redis command *INFO keyspace* 188 | fields: 189 | - name: keyspace 190 | type: group 191 | description: > 192 | Database related statistics 193 | fields: 194 | - name: stats 195 | type: object 196 | description: > 197 | A *string => string* map of keyspace stats -------------------------------------------------------------------------------- /etc/redisbeat.template.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_default_": { 4 | "_all": { 5 | "enabled": true, 6 | "norms": { 7 | "enabled": false 8 | } 9 | }, 10 | "dynamic_templates": [ 11 | { 12 | "template1": { 13 | "mapping": { 14 | "doc_values": true, 15 | "ignore_above": 1024, 16 | "index": "not_analyzed", 17 | "type": "{dynamic_type}" 18 | }, 19 | "match": "*" 20 | } 21 | } 22 | ], 23 | "properties": { 24 | "@timestamp": { 25 | "type": "date" 26 | }, 27 | "clients": { 28 | "properties": {} 29 | }, 30 | "cluster": { 31 | "properties": {} 32 | }, 33 | "commandstats": { 34 | "properties": {} 35 | }, 36 | "cpu": { 37 | "properties": {} 38 | }, 39 | "keyspace": { 40 | "properties": {} 41 | }, 42 | "memory": { 43 | "properties": {} 44 | }, 45 | "persistence": { 46 | "properties": {} 47 | }, 48 | "replication": { 49 | "properties": {} 50 | }, 51 | "server": { 52 | "properties": {} 53 | }, 54 | "stats": { 55 | "properties": {} 56 | } 57 | } 58 | } 59 | }, 60 | "settings": { 61 | "index.refresh_interval": "5s" 62 | }, 63 | "template": "redisbeat-*" 64 | } 65 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | redisbeat "github.com/chrsblck/redisbeat/beat" 5 | 6 | "github.com/elastic/beats/libbeat/beat" 7 | ) 8 | 9 | var Name = "redisbeat" 10 | var Version = "0.0.1" 11 | 12 | func main() { 13 | beat.Run(Name, Version, redisbeat.New) 14 | } 15 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Taken from - https://github.com/elastic/beats/blob/master/topbeat/main_test.go 4 | // This file is mandatory as otherwise the redisbeat.test binary is not generated correctly. 5 | 6 | import ( 7 | "flag" 8 | "testing" 9 | ) 10 | 11 | var systemTest *bool 12 | 13 | func init() { 14 | systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") 15 | } 16 | 17 | // Test started when the test binary is started. Only calls main. 18 | func TestSystem(t *testing.T) { 19 | 20 | if *systemTest { 21 | main() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /redisbeat.yml: -------------------------------------------------------------------------------- 1 | ################### Redisbeat Configuration Example ######################### 2 | 3 | ############################# Input ############################################ 4 | redisbeat: 5 | # In seconds, defines how often to read server statistics 6 | #period: 10 7 | 8 | # Host of redis server, default is localhost 9 | #host: "localhost" 10 | 11 | # Port the redis server is listening on, default is 6379 12 | #port: 6379 13 | 14 | # Network type, default is tcp 15 | #network: "tcp" 16 | 17 | # Max connections for redis pool, default is 10 18 | #maxconn: 10 19 | 20 | # Authentication config 21 | auth: 22 | # default is false 23 | #required: true 24 | # default is empty string 25 | #required_pass: "p@ssw0rd" 26 | 27 | # Statistics to collect (all enabled by default) 28 | stats: 29 | # server information 30 | #server: true 31 | 32 | # clients information 33 | #clients: true 34 | 35 | # memory information 36 | #memory: true 37 | 38 | # persistence information 39 | #persistence: true 40 | 41 | # stats information 42 | #stats: true 43 | 44 | # replication information 45 | #replication: true 46 | 47 | # cpu information 48 | #cpu: true 49 | 50 | # commandstats information 51 | #commandstats: true 52 | 53 | # cluster information 54 | #cluster: true 55 | 56 | # keyspace information 57 | #keyspace: true 58 | ############################################################################### 59 | ############################# Libbeat Config ################################## 60 | # Base config file used by all other beats for using libbeat features 61 | 62 | ############################# Output ########################################## 63 | 64 | # Configure what outputs to use when sending the data collected by the beat. 65 | # Multiple outputs may be used. 66 | output: 67 | 68 | ### Elasticsearch as output 69 | elasticsearch: 70 | # Array of hosts to connect to. 71 | # Scheme and port can be left out and will be set to the default (http and 9200) 72 | # In case you specify and additional path, the scheme is required: http://localhost:9200/path 73 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 74 | hosts: ["localhost:9200"] 75 | 76 | # Optional protocol and basic auth credentials. 77 | #protocol: "https" 78 | #username: "admin" 79 | #password: "s3cr3t" 80 | 81 | # Number of workers per Elasticsearch host. 82 | #worker: 1 83 | 84 | # Optional index name. The default is "redisbeat" and generates 85 | # [redisbeat-]YYYY.MM.DD keys. 86 | #index: "redisbeat" 87 | 88 | # Optional HTTP Path 89 | #path: "/elasticsearch" 90 | 91 | # Proxy server url 92 | #proxy_url: http://proxy:3128 93 | 94 | # The number of times a particular Elasticsearch index operation is attempted. If 95 | # the indexing operation doesn't succeed after this many retries, the events are 96 | # dropped. The default is 3. 97 | #max_retries: 3 98 | 99 | # The maximum number of events to bulk in a single Elasticsearch bulk API index request. 100 | # The default is 50. 101 | #bulk_max_size: 50 102 | 103 | # Configure http request timeout before failing an request to Elasticsearch. 104 | #timeout: 90 105 | 106 | # The number of seconds to wait for new events between two bulk API index requests. 107 | # If `bulk_max_size` is reached before this interval expires, addition bulk index 108 | # requests are made. 109 | #flush_interval: 1 110 | 111 | # Boolean that sets if the topology is kept in Elasticsearch. The default is 112 | # false. This option makes sense only for Packetbeat. 113 | #save_topology: false 114 | 115 | # The time to live in seconds for the topology information that is stored in 116 | # Elasticsearch. The default is 15 seconds. 117 | #topology_expire: 15 118 | 119 | # tls configuration. By default is off. 120 | #tls: 121 | # List of root certificates for HTTPS server verifications 122 | #certificate_authorities: ["/etc/pki/root/ca.pem"] 123 | 124 | # Certificate for TLS client authentication 125 | #certificate: "/etc/pki/client/cert.pem" 126 | 127 | # Client Certificate Key 128 | #certificate_key: "/etc/pki/client/cert.key" 129 | 130 | # Controls whether the client verifies server certificates and host name. 131 | # If insecure is set to true, all server host names and certificates will be 132 | # accepted. In this mode TLS based connections are susceptible to 133 | # man-in-the-middle attacks. Use only for testing. 134 | #insecure: true 135 | 136 | # Configure cipher suites to be used for TLS connections 137 | #cipher_suites: [] 138 | 139 | # Configure curve types for ECDHE based cipher suites 140 | #curve_types: [] 141 | 142 | # Configure minimum TLS version allowed for connection to logstash 143 | #min_version: 1.0 144 | 145 | # Configure maximum TLS version allowed for connection to logstash 146 | #max_version: 1.2 147 | 148 | 149 | ### Logstash as output 150 | #logstash: 151 | # The Logstash hosts 152 | #hosts: ["localhost:5044"] 153 | 154 | # Number of workers per Logstash host. 155 | #worker: 1 156 | 157 | # Set gzip compression level. 158 | #compression_level: 3 159 | 160 | # Optional load balance the events between the Logstash hosts 161 | #loadbalance: true 162 | 163 | # Optional index name. The default index name depends on the each beat. 164 | # For Packetbeat, the default is set to packetbeat, for Topbeat 165 | # to topbeat and for Filebeat to filebeat. 166 | #index: redisbeat 167 | 168 | # Optional TLS. By default is off. 169 | #tls: 170 | # List of root certificates for HTTPS server verifications 171 | #certificate_authorities: ["/etc/pki/root/ca.pem"] 172 | 173 | # Certificate for TLS client authentication 174 | #certificate: "/etc/pki/client/cert.pem" 175 | 176 | # Client Certificate Key 177 | #certificate_key: "/etc/pki/client/cert.key" 178 | 179 | # Controls whether the client verifies server certificates and host name. 180 | # If insecure is set to true, all server host names and certificates will be 181 | # accepted. In this mode TLS based connections are susceptible to 182 | # man-in-the-middle attacks. Use only for testing. 183 | #insecure: true 184 | 185 | # Configure cipher suites to be used for TLS connections 186 | #cipher_suites: [] 187 | 188 | # Configure curve types for ECDHE based cipher suites 189 | #curve_types: [] 190 | 191 | 192 | ### File as output 193 | #file: 194 | # Path to the directory where to save the generated files. The option is mandatory. 195 | #path: "/tmp/redisbeat" 196 | 197 | # Name of the generated files. The default is `redisbeat` and it generates files: `redisbeat`, `redisbeat.1`, `redisbeat.2`, etc. 198 | #filename: redisbeat 199 | 200 | # Maximum size in kilobytes of each file. When this size is reached, the files are 201 | # rotated. The default value is 10 MB. 202 | #rotate_every_kb: 10000 203 | 204 | # Maximum number of files under path. When this number of files is reached, the 205 | # oldest file is deleted and the rest are shifted from last to first. The default 206 | # is 7 files. 207 | #number_of_files: 7 208 | 209 | 210 | ### Console output 211 | # console: 212 | # Pretty print json event 213 | #pretty: false 214 | 215 | 216 | ############################# Shipper ######################################### 217 | 218 | shipper: 219 | # The name of the shipper that publishes the network data. It can be used to group 220 | # all the transactions sent by a single shipper in the web interface. 221 | # If this options is not defined, the hostname is used. 222 | #name: 223 | 224 | # The tags of the shipper are included in their own field with each 225 | # transaction published. Tags make it easy to group servers by different 226 | # logical properties. 227 | #tags: ["service-X", "web-tier"] 228 | 229 | # Uncomment the following if you want to ignore transactions created 230 | # by the server on which the shipper is installed. This option is useful 231 | # to remove duplicates if shippers are installed on multiple servers. 232 | #ignore_outgoing: true 233 | 234 | # How often (in seconds) shippers are publishing their IPs to the topology map. 235 | # The default is 10 seconds. 236 | #refresh_topology_freq: 10 237 | 238 | # Expiration time (in seconds) of the IPs published by a shipper to the topology map. 239 | # All the IPs will be deleted afterwards. Note, that the value must be higher than 240 | # refresh_topology_freq. The default is 15 seconds. 241 | #topology_expire: 15 242 | 243 | # Internal queue size for single events in processing pipeline 244 | #queue_size: 1000 245 | 246 | # Configure local GeoIP database support. 247 | # If no paths are not configured geoip is disabled. 248 | #geoip: 249 | #paths: 250 | # - "/usr/share/GeoIP/GeoLiteCity.dat" 251 | # - "/usr/local/var/GeoIP/GeoLiteCity.dat" 252 | 253 | 254 | ############################# Logging ######################################### 255 | 256 | # There are three options for the log ouput: syslog, file, stderr. 257 | # Under Windows systems, the log files are per default sent to the file output, 258 | # under all other system per default to syslog. 259 | logging: 260 | 261 | # Send all logging output to syslog. On Windows default is false, otherwise 262 | # default is true. 263 | #to_syslog: true 264 | 265 | # Write all logging output to files. Beats automatically rotate files if rotateeverybytes 266 | # limit is reached. 267 | #to_files: false 268 | 269 | # To enable logging to files, to_files option has to be set to true 270 | files: 271 | # The directory where the log files will written to. 272 | #path: /var/log/mybeat 273 | 274 | # The name of the files where the logs are written to. 275 | #name: mybeat 276 | 277 | # Configure log file size limit. If limit is reached, log file will be 278 | # automatically rotated 279 | rotateeverybytes: 10485760 # = 10MB 280 | 281 | # Number of rotated log files to keep. Oldest files will be deleted first. 282 | #keepfiles: 7 283 | 284 | # Enable debug output for selected components. To enable all selectors use ["*"] 285 | # Other available selectors are beat, publish, service 286 | # Multiple selectors can be chained. 287 | #selectors: [ ] 288 | 289 | # Sets log level. The default log level is error. 290 | # Available log levels are: critical, error, warning, info, debug 291 | #level: error 292 | 293 | 294 | -------------------------------------------------------------------------------- /scripts/Makefile: -------------------------------------------------------------------------------- 1 | 2 | ### VARIABLE SETUP ### 3 | 4 | ARCH?=$(shell uname -m) 5 | # Hidden directory to install dependencies for jenkins 6 | export PATH := ./bin:$(PATH) 7 | export GO15VENDOREXPERIMENT=1 8 | GOFILES = $(shell find . -type f -name '*.go') 9 | SHELL=/bin/bash 10 | ES_HOST?="elasticsearch-210" 11 | BEAT_DIR?=github.com/elastic/beats 12 | BUILD_DIR=build 13 | COVERAGE_DIR=${BUILD_DIR}/coverage 14 | TIMEOUT?= 90 15 | BEATNAME?=libbeat 16 | TEST_ENVIRONMENT?=false 17 | SYSTEM_TESTS?=false 18 | GOX_OS?=linux darwin windows solaris freebsd netbsd openbsd 19 | 20 | 21 | ### BUILDING ### 22 | 23 | # Builds beat 24 | .PHONY: build 25 | build: $(GOFILES) 26 | go build 27 | 28 | # Create test coverage binary 29 | .PHONY: libbeat.test 30 | libbeat.test: $(GOFILES) 31 | go test -c -covermode=atomic -coverpkg ./... 32 | 33 | # Cross-compile beat for the OS'es specified in GOX_OS variable. 34 | # The binaries are placed in the build/bin directory. 35 | .PHONY: crosscompile 36 | crosscompile: $(GOFILES) 37 | go get github.com/mitchellh/gox 38 | mkdir -p ${BUILD_DIR}/bin 39 | gox -output="${BUILD_DIR}/bin/{{.Dir}}-{{.OS}}-{{.Arch}}" -os="${GOX_OS}" ${GOX_FLAGS} 40 | 41 | # Checks project and source code if everything is according to standard 42 | .PHONY: check 43 | check: 44 | gofmt -l . | read && echo "Code differs from gofmt's style" && exit 1 || true 45 | go vet ./... 46 | 47 | # Cleans up directory and source code with gofmt 48 | .PHONY: clean 49 | clean: 50 | go fmt ./... 51 | gofmt -w . 52 | -rm -r build 53 | -rm ${BEATNAME} ${BEATNAME}.test 54 | 55 | # Shortcut for continuous integration 56 | # This should always run before merging. 57 | .PHONY: ci 58 | ci: 59 | make 60 | make check 61 | make testsuite 62 | 63 | ### Testing ### 64 | # All tests are always run with coverage reporting enabled 65 | 66 | 67 | # Prepration for tests 68 | .PHONY: prepare-tests 69 | prepare-tests: 70 | mkdir -p ${COVERAGE_DIR} 71 | # coverage tools 72 | go get golang.org/x/tools/cmd/cover 73 | # gotestcover is needed to fetch coverage for multiple packages 74 | go get github.com/pierrre/gotestcover 75 | 76 | # Runs the unit tests 77 | # Race is not enabled for unit tests because tests run much slower. 78 | .PHONY: unit-tests 79 | unit-tests: prepare-tests 80 | #go test -short ./... 81 | $(GOPATH)/bin/gotestcover -coverprofile=${COVERAGE_DIR}/unit.cov -short -covermode=atomic ${BEAT_DIR}/${BEATNAME}/... 82 | 83 | # Run integration tests. Unit tests are run as part of the integration tests. It runs all tests with race detection enabled. 84 | .PHONY: integration-tests 85 | integration-tests: prepare-tests 86 | $(GOPATH)/bin/gotestcover -race -coverprofile=${COVERAGE_DIR}/integration.cov -covermode=atomic ${BEAT_DIR}/${BEATNAME}/... 87 | 88 | # Runs the integration inside a virtual environment. This can be run on any docker-machine (local, remote) 89 | .PHONY: integration-tests-environment 90 | integration-tests-environment: 91 | make prepare-tests 92 | make build-image 93 | NAME=$$(docker-compose run -d ${BEATNAME} make integration-tests | awk 'END{print}') || exit 1; \ 94 | echo "docker ${BEATNAME} test container: '$$NAME'"; \ 95 | docker attach $$NAME; CODE=$$?;\ 96 | mkdir -p ${COVERAGE_DIR}; \ 97 | docker cp $$NAME:/go/src/${BEAT_DIR}/${BEATNAME}/${COVERAGE_DIR}/integration.cov $(shell pwd)/${COVERAGE_DIR}/; \ 98 | docker rm $$NAME > /dev/null; \ 99 | exit $$CODE 100 | 101 | # Runs the system tests 102 | .PHONY: system-tests 103 | system-tests: libbeat.test prepare-tests system-tests-setup 104 | . build/system-tests/env/bin/activate; nosetests -w tests/system --process-timeout=$(TIMEOUT) --with-timer 105 | python ../libbeat/scripts/aggregate_coverage.py -o ${COVERAGE_DIR}/system.cov ./build/system-tests/run 106 | 107 | # Runs the system tests 108 | .PHONY: system-tests-setup 109 | system-tests-setup: tests/system/requirements.txt 110 | test -d env || virtualenv build/system-tests/env > /dev/null 111 | . build/system-tests/env/bin/activate && pip install -Ur tests/system/requirements.txt > /dev/null 112 | touch build/system-tests/env/bin/activate 113 | 114 | 115 | # Run benchmark tests 116 | .PHONY: benchmark-tests 117 | benchmark-tests: 118 | # No benchmark tests exist so far 119 | #go test -short -bench=. ./... 120 | 121 | # Runs all tests and generates the coverage reports 122 | .PHONY: testsuite 123 | testsuite: 124 | # Setups environment if TEST_ENVIRONMENT is set to true 125 | if [ $(TEST_ENVIRONMENT) = true ]; then \ 126 | make integration-tests-environment; \ 127 | else \ 128 | make integration-tests; \ 129 | fi 130 | 131 | # Runs system tests if SYSTEM_TESTS is set to true 132 | if [ $(SYSTEM_TESTS) = true ]; then \ 133 | make system-tests; \ 134 | fi 135 | 136 | make benchmark-tests 137 | make coverage-report 138 | 139 | # Generates a coverage report from the existing coverage files 140 | # It assumes that some covrage reports already exists, otherwise it will fail 141 | .PHONY: coverage-report 142 | coverage-report: 143 | python ${GOPATH}/src/github.com/elastic/beats/dev-tools/aggregate_coverage.py -o ./${COVERAGE_DIR}/full.cov ./${COVERAGE_DIR} 144 | go tool cover -html=./${COVERAGE_DIR}/full.cov -o ${COVERAGE_DIR}/full.html 145 | 146 | # Update expects the most recent version of libbeat in the GOPATH 147 | .PHONY: update 148 | update: 149 | bash ${GOPATH}/src/github.com/elastic/beats/libbeat/scripts/update.sh ${BEATNAME} ${BEAT_DIR}/${BEATNAME} ${GOPATH}/src/github.com/elastic/beats/libbeat 150 | 151 | 152 | 153 | ### CONTAINER ENVIRONMENT #### 154 | 155 | # Builds the environment to test beat 156 | .PHONY: build-image 157 | build-image: write-environment 158 | docker-compose build 159 | 160 | # Runs the environment so the redis and elasticsearch can also be used for local development 161 | # To use it for running the test, set ES_HOST and REDIS_HOST environment variable to the ip of your docker-machine. 162 | .PHONY: start-environment 163 | start-environment: stop-environment 164 | docker-compose up -d redis elasticsearch-173 elasticsearch-210 logstash 165 | 166 | .PHONY: stop-environment 167 | stop-environment: 168 | -docker-compose stop 169 | -docker-compose rm -f 170 | -docker ps -a | grep ${BEATNAME} | grep Exited | awk '{print $$1}' | xargs docker rm 171 | 172 | .PHONY: write-environment 173 | write-environment: 174 | mkdir -p build 175 | echo "ES_HOST=${ES_HOST}" > build/test.env 176 | echo "ES_PORT=9200" >> build/test.env -------------------------------------------------------------------------------- /scripts/generate_field_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | This script generates markdown documentation from the fields yml file. 5 | 6 | Usage: python generate_field_docs.py file.yml file.asciidoc 7 | """ 8 | 9 | import sys 10 | import yaml 11 | 12 | SECTIONS = [ 13 | ("env", "Common"), 14 | ("server", "Redis Server Statistics"), 15 | ("clients", "Redis Client Statistics"), 16 | ("memory", "Redis Memory Statistics"), 17 | ("persistence", "Redis Persistence Statistics"), 18 | ("stats", "Redis Stats Statistics"), 19 | ("replication", "Redis Replication Statistics"), 20 | ("cpu", "Redis CPU Statistics"), 21 | ("commandstats", "Redis CommandStats Statistics"), 22 | ("cluster", "Redis Cluster Statistics"), 23 | ("keyspace", "Redis Keyspace Statistics")] 24 | 25 | 26 | def document_fields(output, section): 27 | 28 | if "anchor" in section: 29 | output.write("[[exported-fields-{}]]\n".format(section["anchor"])) 30 | output.write("=== {} Fields\n\n".format(section["name"])) 31 | 32 | if "description" in section: 33 | output.write("{}\n\n".format(section["description"])) 34 | 35 | output.write("\n") 36 | for field in section["fields"]: 37 | 38 | if "type" in field and field["type"] == "group": 39 | for sec, name in SECTIONS: 40 | if sec == field["name"]: 41 | field["anchor"] = field["name"] 42 | field["name"] = name 43 | break 44 | document_fields(output, field) 45 | else: 46 | document_field(output, field) 47 | 48 | 49 | def document_field(output, field): 50 | 51 | if "path" not in field: 52 | field["path"] = field["name"] 53 | 54 | output.write("==== {}\n\n".format(field["path"])) 55 | 56 | if "type" in field: 57 | output.write("type: {}\n\n".format(field["type"])) 58 | if "example" in field: 59 | output.write("example: {}\n\n".format(field["example"])) 60 | if "format" in field: 61 | output.write("format: {}\n\n".format(field["format"])) 62 | if "required" in field: 63 | output.write("required: {}\n\n".format(field["required"])) 64 | 65 | if "description" in field: 66 | output.write("{}\n\n".format(field["description"])) 67 | 68 | 69 | def fields_to_asciidoc(input, output): 70 | 71 | output.write(""" 72 | //// 73 | This file is generated! See etc/fields.yml and scripts/generate_field_docs.py 74 | //// 75 | 76 | [[exported-fields]] 77 | == Exported Fields 78 | 79 | This document describes the fields that are exported by 80 | Redisbeat. They are grouped in the 81 | following categories: 82 | 83 | """) 84 | 85 | for doc, _ in SECTIONS: 86 | output.write("* <>\n".format(doc)) 87 | output.write("\n") 88 | 89 | docs = yaml.load(input) 90 | 91 | for doc, name in SECTIONS: 92 | if doc in docs: 93 | section = docs[doc] 94 | if "type" in section: 95 | if section["type"] == "group": 96 | section["name"] = name 97 | section["anchor"] = doc 98 | document_fields(output, section) 99 | 100 | 101 | if __name__ == "__main__": 102 | if len(sys.argv) != 3: 103 | print "Usage: %s file.yml file.asciidoc" % sys.argv[0] 104 | sys.exit(1) 105 | 106 | input = open(sys.argv[1], 'r') 107 | output = open(sys.argv[2], 'w') 108 | 109 | try: 110 | fields_to_asciidoc(input, output) 111 | finally: 112 | input.close() 113 | output.close() 114 | -------------------------------------------------------------------------------- /scripts/generate_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | This script generates the ES template file (redisbeat.template.json) from 5 | the etc/fields.yml file. 6 | 7 | Example usage: 8 | 9 | python generate_template.py etc/fields.yml etc/redisbeat.template.json 10 | """ 11 | 12 | import sys 13 | import yaml 14 | import json 15 | 16 | index = "redisbeat-*" 17 | 18 | 19 | def fields_to_es_template(input, output): 20 | """ 21 | Reads the YAML file from input and generates the JSON for 22 | the ES template in output. input and output are both file 23 | pointers. 24 | """ 25 | 26 | # Custom properties 27 | docs = yaml.load(input) 28 | 29 | defaults = docs["defaults"] 30 | 31 | # skeleton 32 | template = { 33 | "template": index, 34 | "settings": { 35 | "index.refresh_interval": "5s" 36 | }, 37 | "mappings": { 38 | "_default_": { 39 | "_all": { 40 | "enabled": True, 41 | "norms": { 42 | "enabled": False 43 | } 44 | }, 45 | "properties": {}, 46 | "dynamic_templates": [{ 47 | "template1": { 48 | "match": "*", 49 | "mapping": { 50 | "type": "{dynamic_type}", 51 | "index": defaults["index"], 52 | "doc_values": defaults["doc_values"], 53 | "ignore_above": defaults["ignore_above"] 54 | } 55 | } 56 | }] 57 | } 58 | } 59 | } 60 | 61 | properties = {} 62 | for doc, section in docs.items(): 63 | if doc not in ["version", "defaults"]: 64 | prop = fill_section_properties(section, defaults) 65 | properties.update(prop) 66 | 67 | template["mappings"]["_default_"]["properties"] = properties 68 | 69 | json.dump(template, output, 70 | indent=2, separators=(',', ': '), 71 | sort_keys=True) 72 | 73 | 74 | def fill_section_properties(section, defaults): 75 | """ 76 | Traverse the sections tree and fill in the properties 77 | map. 78 | """ 79 | properties = {} 80 | 81 | for field in section["fields"]: 82 | prop = fill_field_properties(field, defaults) 83 | properties.update(prop) 84 | 85 | return properties 86 | 87 | 88 | def fill_field_properties(field, defaults): 89 | """ 90 | Add data about a particular field in the properties 91 | map. 92 | """ 93 | properties = {} 94 | 95 | for key in defaults.keys(): 96 | if key not in field: 97 | field[key] = defaults[key] 98 | 99 | if field.get("index") == "analyzed": 100 | properties[field["name"]] = { 101 | "type": field["type"], 102 | "index": "analyzed", 103 | "norms": { 104 | "enabled": False 105 | } 106 | } 107 | 108 | elif field.get("type") == "geo_point": 109 | properties[field["name"]] = { 110 | "type": "geo_point" 111 | } 112 | 113 | elif field.get("type") == "date": 114 | properties[field["name"]] = { 115 | "type": "date" 116 | } 117 | elif field.get("type") == "float": 118 | properties[field["name"]] = { 119 | "type": "float", 120 | "doc_values": "true" 121 | } 122 | elif field.get("type") == "group": 123 | properties[field.get("name")] = {"properties": {}} 124 | 125 | properties[field.get("name")]["properties"] = \ 126 | fill_section_properties(field, defaults) 127 | 128 | elif field.get("ignore_above") == 0: 129 | properties[field["name"]] = { 130 | "type": field["type"], 131 | "index": field["index"], 132 | "doc_values": field["doc_values"] 133 | } 134 | return properties 135 | 136 | 137 | if __name__ == "__main__": 138 | if len(sys.argv) != 3: 139 | print "Usage: %s fields.yml template.json" % sys.argv[0] 140 | sys.exit(1) 141 | 142 | input = open(sys.argv[1], 'r') 143 | output = open(sys.argv[2], 'w') 144 | 145 | try: 146 | fields_to_es_template(input, output) 147 | finally: 148 | input.close() 149 | output.close() 150 | -------------------------------------------------------------------------------- /tests/redisbeat.yml: -------------------------------------------------------------------------------- 1 | ################### Redisbeat Test Config ######################### 2 | 3 | ############################# Input ############################################ 4 | input: 5 | # In seconds, defines how often to read server statistics 6 | period: 5 7 | 8 | # Host of redis server, default is localhost 9 | host: "redis.testing.fake" 10 | 11 | # Port the redis server is listening on, default is 6379 12 | port: 9736 13 | 14 | # Network type, default is tcp 15 | network: "udp" 16 | 17 | # Max connections for redis pool, default is 10 18 | maxconn: 5 19 | 20 | # Authentication config 21 | auth: 22 | # default is false 23 | required: true 24 | # default is empty string 25 | requiredpass: "p@ssw0rd" 26 | 27 | # Statistics to collect (all enabled by default) 28 | stats: 29 | # server information 30 | server: true 31 | 32 | # clients information 33 | clients: false 34 | 35 | # memory information 36 | memory: false 37 | 38 | # persistence information 39 | persistence: false 40 | 41 | # stats information 42 | stats: false 43 | 44 | # replication information 45 | replication: false 46 | 47 | # cpu information 48 | cpu: false 49 | 50 | # commandstats information 51 | commandstats: false 52 | 53 | # cluster information 54 | cluster: false 55 | 56 | # keyspace information 57 | keyspace: false 58 | 59 | --------------------------------------------------------------------------------