├── .gitignore ├── Makefile ├── README.md ├── cmd └── zabbix-agent-extension-elasticsearch │ ├── .gometalinter.json │ ├── Gopkg.lock │ ├── Gopkg.toml │ ├── args.go │ ├── discovery.go │ ├── elastic.go │ ├── indices.go │ ├── main.go │ ├── metric.go │ ├── thread_pool.go │ └── tools.go └── configs ├── template ├── custom_key_template.sh └── template_elasticsearch_service.xml └── zabbix_agentd.d └── zabbix-agent-extension-elasticsearch.conf /.gitignore: -------------------------------------------------------------------------------- 1 | .gopath/ 2 | .out/ 3 | *.xz 4 | /src 5 | /pkg 6 | /zabbix-agent-extension-elasticsearch 7 | cmd/zabbix-agent-extension-elasticsearch/vendor/* 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean-all build cleand-deps deps ver make-gopath 2 | 3 | DATE := $(shell git log -1 --format="%cd" --date=short | sed s/-//g) 4 | COUNT := $(shell git rev-list --count HEAD) 5 | COMMIT := $(shell git rev-parse --short HEAD) 6 | CWD := $(shell pwd) 7 | 8 | BINARYNAME := zabbix-agent-extension-elasticsearch 9 | CONFIG := configs/zabbix_agentd.d/zabbix-agent-extension-elasticsearch.conf 10 | VERSION := "${DATE}.${COUNT}_${COMMIT}" 11 | 12 | LDFLAGS := "-X main.version=${VERSION}" 13 | 14 | 15 | default: all 16 | 17 | all: clean-all make-gopath deps build 18 | 19 | ver: 20 | @echo ${VERSION} 21 | 22 | clean-all: clean-deps 23 | @echo Clean builded binaries 24 | rm -rf .out/ 25 | rm -rf .gopath/ 26 | @echo Done 27 | 28 | build: 29 | @echo Build 30 | cd ${CWD}/.gopath/src/${BINARYNAME}; \ 31 | GOPATH=${CWD}/.gopath \ 32 | go build -v -o ${CWD}/.out/${BINARYNAME} -ldflags ${LDFLAGS} *.go 33 | @echo Done 34 | 35 | clean-deps: 36 | @echo Clean dependencies 37 | rm -rf ${CWD}/.gopath/src/${BINARYNAME}/vendor/ 38 | 39 | deps: 40 | @echo Fetch dependencies 41 | cd ${CWD}/.gopath/src/${BINARYNAME}; \ 42 | GOPATH=${CWD}/.gopath \ 43 | dep ensure -v 44 | 45 | make-gopath: 46 | @echo Creating GOPATH 47 | mkdir -p .gopath/src 48 | ln -s ${CWD}/cmd/${BINARYNAME} ${CWD}/.gopath/src/${BINARYNAME} 49 | 50 | install: 51 | @echo Install 52 | cp .out/${BINARYNAME} /usr/bin/${BINARYNAME} 53 | cp configs/zabbix_agentd.d/zabbix-agent-extension-elasticsearch.conf \ 54 | /etc/zabbix/zabbix_agentd.conf.d/zabbix-agent-extension-elasticsearch.conf 55 | @echo Done 56 | 57 | remove: 58 | @echo Remove 59 | rm -f /usr/bin/${BINARYNAME} 60 | rm -f /etc/zabbix/zabbix_agentd.conf.d/zabbix-agent-extension-elasticsearch.conf 61 | @echo Done 62 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zabbix-agent-extension-elasticsearch 2 | 3 | zabbix-agent-extension-elasticsearch - this extension for monitoring Elasticsearch cluster and node health/status. 4 | 5 | ### Supported features 6 | 7 | This extension obtains stats of two types: 8 | 9 | #### Node stat 10 | https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html 11 | 12 | - [ ] roles 13 | - [ ] attributes 14 | - [x] indices (partly) 15 | - [ ] os 16 | - [ ] processes 17 | - [x] jvm 18 | - [x] thread_pool 19 | - [ ] fs 20 | - [ ] transport 21 | - [ ] http 22 | - [ ] breakers 23 | - [ ] script 24 | - [ ] discovery 25 | - [ ] ingest 26 | 27 | #### Cluster health 28 | https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html 29 | - [x] cluster_name 30 | - [x] status 31 | - [x] timed_out 32 | - [x] number_of_nodes 33 | - [x] number_of_data_nodes 34 | - [x] total indices docs count 35 | - [x] total indices deleted docs count 36 | - [x] primary indices docs count 37 | - [x] primary indices deleted docs count 38 | - [x] total indices store size 39 | - [x] primary indices store size 40 | - [x] active_primary_shards 41 | - [x] active_shards 42 | - [x] relocating_shards 43 | - [x] initializing_shards 44 | - [x] unassigned_shards 45 | - [x] delayed_unassigned_shards 46 | - [x] number_of_pending_tasks 47 | - [x] number_of_in_flight_fetch 48 | - [x] task_max_waiting_in_queue_millis 49 | - [x] active_shards_percent_as_number 50 | 51 | ### Installation 52 | 53 | ##### Notice 54 | 55 | Before manual installation you should check `Include` option in your `zabbix-agent` configuration, it should be uncomment and check that include path are the same with this installation rule - https://github.com/zarplata/zabbix-agent-extension-elasticsearch/blob/master/Makefile#L54 otherwise you should change it to your include path. 56 | 57 | After installation you should restart your `zabbix-agent` manually for inclusion new `UserParameter` from extension configuration. 58 | 59 | #### Manual build 60 | 61 | ```sh 62 | # Building 63 | git clone https://github.com/zarplata/zabbix-agent-extension-elasticsearch.git 64 | cd zabbix-agent-extension-elasticsearch 65 | make 66 | 67 | #Installing 68 | make install 69 | 70 | # By default, binary installs into /usr/bin/ and zabbix config in /etc/zabbix/zabbix_agentd.conf.d/ but, 71 | # you may manually copy binary to your executable path and zabbix config to specific include directory 72 | ``` 73 | 74 | 75 | #### Arch Linux package 76 | ```sh 77 | # Building 78 | git clone https://github.com/zarplata/zabbix-agent-extension-elasticsearch.git 79 | git checkout pkgbuild 80 | 81 | ./build.sh 82 | 83 | #Installing 84 | pacman -U *.tar.xz 85 | ``` 86 | 87 | ### Dependencies 88 | 89 | zabbix-agent-extension-elasticsearch requires [zabbix-agent](http://www.zabbix.com/download) v2.4+ to run. 90 | 91 | ### Zabbix configuration 92 | In order to start getting metrics, it is enough to import template and attach it to monitored node. 93 | 94 | `WARNING:` You must define macro with name - `{$ZABBIX_SERVER_IP}` in global or local (template) scope with IP address of zabbix server. 95 | 96 | On one node of cluster set MACRO `{$GROUPNAME}` = `REAL_ZABBIX_GROUP`. This group must include all nodes of the cluster. 97 | Only this one node will be triggered cluster status (low level discovery added aggregate checks of cluster health). 98 | 99 | ### Customize key prefix 100 | It may you need if key in template already used. 101 | 102 | If you need change key `elasticsearch.*` -> `YOUR_PREFIX_PART.elasticsearch.*`, run script `custom_key_template.sh` whit `YOUR_PREFIX_PART` and import updated zabbix template `template_elasticsearch_service.xml`. 103 | 104 | ```sh 105 | ./custom_key_template.sh YOUR_PREFIX_PART 106 | ``` 107 | 108 | ### Elasticsearch API authentication (X-Pack security) 109 | 110 | This extension support basic authentication which provided by X-Pack. For authentication in Elasticsearch you must set valid values in template macros - `${ES_USER}` and `${ES_PASSWORD}` 111 | 112 | ### Customize Elasticsearch address. 113 | 114 | You can customize you Elasticsearch listen address. 115 | Just change`{$ES_ADDRESS}` macros in template. 116 | Possible values are - `(http|https)://host:port` 117 | 118 | Be note if you choose `https` and have self-signed certificate you also should add path to you CA in marcos `{$CA_PATH}` 119 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/.gometalinter.json: -------------------------------------------------------------------------------- 1 | { 2 | "Fast": true, 3 | "Linters": { 4 | "gas": { 5 | "Command": "gas -fmt=csv", 6 | "PartitionStrategy": "directories" 7 | }, 8 | "vet": { 9 | "Command": "go vet" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | branch = "master" 6 | digest = "1:72b78aac789a7b10282d8e71bb6618eaba311ff9da66bed25af1d76f181d3561" 7 | name = "github.com/blacked/go-zabbix" 8 | packages = ["."] 9 | pruneopts = "UT" 10 | revision = "3c6a95ec4fdc345b48c4e0e5f5c87d48d3fc40b5" 11 | 12 | [[projects]] 13 | digest = "1:abaaa7489a2f0f3afb2adc8ea1a282a5bd52350b87b26da220c94fc778d6d63b" 14 | name = "github.com/docopt/docopt-go" 15 | packages = ["."] 16 | pruneopts = "UT" 17 | revision = "784ddc588536785e7299f7272f39101f7faccc3f" 18 | version = "0.6.2" 19 | 20 | [[projects]] 21 | branch = "master" 22 | digest = "1:4fcc2642b79154894b404300e290f7967dcacd069b22b74867015b32da89aa42" 23 | name = "github.com/reconquest/hierr-go" 24 | packages = ["."] 25 | pruneopts = "UT" 26 | revision = "7d09c0176fd2bb7fd71a4349d1253eef9edb2c5c" 27 | 28 | [[projects]] 29 | branch = "master" 30 | digest = "1:ade8553e2161fce98433fe17b6bcbfadeaa727e8d0fb0a6542d8385911487be4" 31 | name = "github.com/reconquest/karma-go" 32 | packages = ["."] 33 | pruneopts = "UT" 34 | revision = "1dd2a756e5072411904cf1b01a678baed59092e4" 35 | 36 | [solve-meta] 37 | analyzer-name = "dep" 38 | analyzer-version = 1 39 | input-imports = [ 40 | "github.com/blacked/go-zabbix", 41 | "github.com/docopt/docopt-go", 42 | "github.com/reconquest/hierr-go", 43 | "github.com/reconquest/karma-go", 44 | ] 45 | solver-name = "gps-cdcl" 46 | solver-version = 1 47 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://golang.github.io/dep/docs/Gopkg.toml.html 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [[constraint]] 29 | branch = "master" 30 | name = "github.com/blacked/go-zabbix" 31 | 32 | [[constraint]] 33 | name = "github.com/docopt/docopt-go" 34 | version = "0.6.2" 35 | 36 | [[constraint]] 37 | branch = "master" 38 | name = "github.com/reconquest/hierr-go" 39 | 40 | [[constraint]] 41 | branch = "master" 42 | name = "github.com/reconquest/karma-go" 43 | 44 | [prune] 45 | go-tests = true 46 | unused-packages = true 47 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/args.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "os" 4 | 5 | func obtainCAPath() string { 6 | caPath := os.Getenv("ZBX_ES_CA_PATH") 7 | 8 | if len(caPath) == 0 { 9 | return "None" 10 | } 11 | 12 | return caPath 13 | } 14 | 15 | func obtainESDSN() string { 16 | dsn := os.Getenv("ZBX_ES_DSN") 17 | 18 | if len(dsn) == 0 { 19 | return "http://127.0.0.1:9200" 20 | } 21 | 22 | return dsn 23 | } 24 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/discovery.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | ) 7 | 8 | func discovery( 9 | nodesStats *ElasticNodesStats, 10 | aggGroup string, 11 | ) error { 12 | discoveryData := make(map[string][]map[string]string) 13 | 14 | var discoveredItems []map[string]string 15 | 16 | if aggGroup != "None" { 17 | aggregateItem := make(map[string]string) 18 | aggregateItem["{#GROUPNAME}"] = aggGroup 19 | discoveredItems = append(discoveredItems, aggregateItem) 20 | } 21 | 22 | for _, nodeStats := range nodesStats.Nodes { 23 | 24 | for collectorsName := range nodeStats.JVM.GC.Collectors { 25 | discoveredItem := make(map[string]string) 26 | discoveredItem["{#JVMGCCOLLECTORS}"] = collectorsName 27 | discoveredItems = append(discoveredItems, discoveredItem) 28 | } 29 | 30 | for bufferPoolsName := range nodeStats.JVM.BufferPools { 31 | discoveredItem := make(map[string]string) 32 | discoveredItem["{#JVMBUFFERSPOOLS}"] = bufferPoolsName 33 | discoveredItems = append(discoveredItems, discoveredItem) 34 | } 35 | 36 | for poolsName := range nodeStats.JVM.Mem.Pools { 37 | discoveredItem := make(map[string]string) 38 | discoveredItem["{#JVMMEMPOOLS}"] = poolsName 39 | discoveredItems = append(discoveredItems, discoveredItem) 40 | } 41 | 42 | for threadPoolName := range nodeStats.ThreadPools { 43 | discoveredItem := make(map[string]string) 44 | discoveredItem["{#THREADPOOLNAME}"] = threadPoolName 45 | discoveredItems = append(discoveredItems, discoveredItem) 46 | } 47 | 48 | } 49 | 50 | discoveryData["data"] = discoveredItems 51 | 52 | out, err := json.Marshal(discoveryData) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | fmt.Printf("%s\n", out) 58 | return nil 59 | } 60 | func discoveryIndices( 61 | indicesStats *ElasticIndicesStats, 62 | ) error { 63 | discoveryData := make(map[string][]map[string]string) 64 | 65 | var discoveredItems []map[string]string 66 | 67 | for name, _ := range indicesStats.Indices { 68 | discoveredItem := make(map[string]string) 69 | discoveredItem["{#INDEX}"] = name 70 | discoveredItems = append(discoveredItems, discoveredItem) 71 | } 72 | 73 | discoveryData["data"] = discoveredItems 74 | 75 | out, err := json.Marshal(discoveryData) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | fmt.Printf("%s\n", out) 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/elastic.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/reconquest/hierr-go" 9 | ) 10 | 11 | type ElasticClusterHealth struct { 12 | ClusterName string `json:"cluster_name"` 13 | Status string `json:"status"` 14 | TimedOut bool `json:"timed_out"` 15 | NumderOfNodes int64 `json:"number_of_nodes"` 16 | NumberOfDataNodes int64 `json:"number_of_data_nodes"` 17 | ActivePrimaryShards int64 `json:"active_primary_shards"` 18 | ActiveShards int64 `json:"active_shards"` 19 | RelocatingShards int64 `json:"relocating_shards"` 20 | InitializingShards int64 `json:"initializing_shards"` 21 | UnassignedShards int64 `json:"unassigned_shards"` 22 | DelayedUnassignedShards int64 `json:"delayed_unassigned_shards"` 23 | NumberOfPendingTasks int64 `json:"number_of_pending_tasks"` 24 | NumberOfInFlightFetch int64 `json:"number_of_in_flight_fetch"` 25 | TaskMaxWaitingInQueueMillis int64 `json:"task_max_waiting_in_queue_millis"` 26 | ActiveShardsPercent float64 `json:"active_shards_percent_as_number"` 27 | } 28 | 29 | type ElasticNodesStats struct { 30 | Nodes map[string]ElasticNodeStats `json:"nodes"` 31 | } 32 | 33 | type ElasticNodeStats struct { 34 | JVM ElasticNodeStatsJVM `json:"jvm"` 35 | ThreadPools map[string]NodeThreadPool `json:"thread_pool"` 36 | Indices NodeIndices `json:"indices"` 37 | Transport ElasticNodeStatsTransport `json:"transport"` 38 | Http ElasticNodeStatsHttp `json:"http"` 39 | } 40 | 41 | type ElasticNodeStatsJVM struct { 42 | Timestamp int64 `json:"timestamp"` 43 | UptimeInMillis int64 `json:"uptime_in_millis"` 44 | Mem ElasticNodeStatsJVMMem `json:"mem"` 45 | Threads ElasticNodeStatsJVMThreadsStats `json:"threads"` 46 | GC ElasticNodeStatsJVMGC `json:"gc"` 47 | BufferPools map[string]ElasticNodeStatsJVMBufferPoolsStats `json:"buffer_pools"` 48 | Classes ElasticNodeStatsJVMClassesStats `json:"classes"` 49 | } 50 | 51 | type ElasticNodeStatsJVMMem struct { 52 | HeapUsedInBytes int64 `json:"heap_used_in_bytes"` 53 | HeapUsedPercent int64 `json:"heap_used_percent"` 54 | HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"` 55 | HeapMaxInBytes int64 `json:"heap_max_in_bytes"` 56 | NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` 57 | NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` 58 | Pools map[string]ElasticNodeStatsJVMMemPoolsStats `json:"pools"` 59 | } 60 | 61 | type ElasticNodeStatsJVMMemPoolsStats struct { 62 | UsedInBytes int64 `json:"used_in_bytes"` 63 | MaxInBytes int64 `json:"max_in_bytes"` 64 | PeakUsedInBytes int64 `json:"peak_used_in_bytes"` 65 | PeakMaxInBytes int64 `json:"peak_max_in_bytes"` 66 | } 67 | 68 | type ElasticNodeStatsJVMThreadsStats struct { 69 | Count int64 `json:"count"` 70 | PeakCount int64 `json:"peak_count"` 71 | } 72 | 73 | type ElasticNodeStatsJVMGC struct { 74 | Collectors map[string]ElasticNodeStatsJVMGCCollectorsStats `json:"collectors"` 75 | } 76 | 77 | type ElasticNodeStatsJVMGCCollectorsStats struct { 78 | CollectionCount int64 `json:"collection_count"` 79 | CollectionTimeInMillis int64 `json:"collection_time_in_millis"` 80 | } 81 | 82 | type ElasticNodeStatsJVMBufferPoolsStats struct { 83 | Count int64 `json:"count"` 84 | UsedInBytes int64 `json:"used_in_bytes"` 85 | TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` 86 | } 87 | 88 | type ElasticNodeStatsJVMClassesStats struct { 89 | CurrentLoadedCount int64 `json:"current_loaded_count"` 90 | TotalLoadedCount int64 `json:"total_loaded_count"` 91 | TotalUnloadedCount int64 `json:"total_unloaded_count"` 92 | } 93 | 94 | type ElasticNodeStatsTransport struct { 95 | ServerOpen int64 `json:"server_open"` 96 | RxCount int64 `json:"rx_count"` 97 | RxSizeInBytes int64 `json:"rx_size_in_bytes"` 98 | TxCount int64 `json:"tx_count"` 99 | TxSizeInBytes int64 `json:"tx_size_in_bytes"` 100 | } 101 | 102 | type ElasticNodeStatsHttp struct { 103 | CurrentOpen int64 `json:"current_open"` 104 | TotalOpened int64 `json:"total_opened"` 105 | } 106 | 107 | type ElasticIndicesStats struct { 108 | Shards ElasticIndicesStatsShards `json:"_shards"` 109 | All ElasticIndicesStatsAll `json:"_all"` 110 | Indices map[string]ElasticIndicesStatsIndice `json:"indices"` 111 | } 112 | 113 | type ElasticIndicesStatsShards struct { 114 | Total int64 `json:"total"` 115 | Successful int64 `json:"successful"` 116 | Failed int64 `json:"failed"` 117 | } 118 | 119 | type ElasticIndicesStatsAll struct { 120 | Primaries ElasticIndicesStatsIndex `json:"primaries"` 121 | Total ElasticIndicesStatsIndex `json:"total"` 122 | } 123 | 124 | type ElasticIndicesStatsIndice struct { 125 | Primaries ElasticIndicesStatsIndex `json:"primaries"` 126 | Total ElasticIndicesStatsIndex `json:"total"` 127 | } 128 | 129 | type ElasticIndicesStatsIndex struct { 130 | Docs struct { 131 | Count int64 `json:"count"` 132 | Deleted int64 `json:"deleted"` 133 | } `json:"docs"` 134 | Store struct { 135 | SizeInBytes int64 `json:"size_in_bytes"` 136 | ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` 137 | } `json:"store"` 138 | } 139 | 140 | func getClusterHealth( 141 | elasticDSN string, 142 | elasticsearchAuthToken string, 143 | client *http.Client, 144 | ) (*ElasticClusterHealth, error) { 145 | 146 | var elasticClusterHealth ElasticClusterHealth 147 | 148 | clutserHealthURL := fmt.Sprintf("%s/_cluster/health", elasticDSN) 149 | request, err := http.NewRequest("GET", clutserHealthURL, nil) 150 | if err != nil { 151 | return nil, hierr.Errorf( 152 | err, 153 | "can`t create new HTTP request to %s", 154 | elasticDSN, 155 | ) 156 | } 157 | 158 | if elasticsearchAuthToken != noneValue { 159 | request.Header.Add("Authorization", "Basic "+elasticsearchAuthToken) 160 | } 161 | 162 | clusterHealthResponse, err := client.Do(request) 163 | if err != nil { 164 | return nil, hierr.Errorf( 165 | err.Error(), 166 | "can`t get cluster health from Elasticsearch %s", 167 | elasticDSN, 168 | ) 169 | } 170 | 171 | defer clusterHealthResponse.Body.Close() 172 | 173 | if clusterHealthResponse.StatusCode != http.StatusOK { 174 | return nil, fmt.Errorf( 175 | "can`t get cluster health, Elasticsearch cluster returned %d HTTP code, expected %d HTTP code", 176 | clusterHealthResponse.StatusCode, 177 | http.StatusOK, 178 | ) 179 | } 180 | 181 | err = json.NewDecoder(clusterHealthResponse.Body).Decode(&elasticClusterHealth) 182 | if err != nil { 183 | return nil, hierr.Errorf( 184 | err.Error(), 185 | "can`t decode cluster health response from Elasticsearch %s", 186 | elasticDSN, 187 | ) 188 | } 189 | 190 | return &elasticClusterHealth, nil 191 | } 192 | 193 | func getNodeStats( 194 | elasticDSN string, 195 | elasticsearchAuthToken string, 196 | client *http.Client, 197 | ) (*ElasticNodesStats, error) { 198 | 199 | var elasticNodesStats ElasticNodesStats 200 | 201 | nodeStatsURL := fmt.Sprintf("%s/_nodes/_local/stats", elasticDSN) 202 | request, err := http.NewRequest("GET", nodeStatsURL, nil) 203 | if err != nil { 204 | return nil, hierr.Errorf( 205 | err, 206 | "can`t create new HTTP request to %s", 207 | elasticDSN, 208 | ) 209 | } 210 | 211 | if elasticsearchAuthToken != noneValue { 212 | request.Header.Add("Authorization", "Basic "+elasticsearchAuthToken) 213 | } 214 | 215 | nodeStatsResponse, err := client.Do(request) 216 | if err != nil { 217 | return nil, hierr.Errorf( 218 | err.Error(), 219 | "can`t get node stats from Elasticsearch %s", 220 | elasticDSN, 221 | ) 222 | } 223 | 224 | defer nodeStatsResponse.Body.Close() 225 | 226 | if nodeStatsResponse.StatusCode != http.StatusOK { 227 | return nil, fmt.Errorf( 228 | "can`t get node stats, Elasticsearch node returned %d HTTP code", 229 | nodeStatsResponse.StatusCode, 230 | ) 231 | } 232 | 233 | err = json.NewDecoder(nodeStatsResponse.Body).Decode(&elasticNodesStats) 234 | if err != nil { 235 | return nil, hierr.Errorf( 236 | err.Error(), 237 | "can`t decode node stats response from Elasticsearch %s", 238 | elasticDSN, 239 | ) 240 | } 241 | 242 | return &elasticNodesStats, nil 243 | } 244 | 245 | func getIndicesStats( 246 | elasticDSN string, 247 | elasticsearchAuthToken string, 248 | client *http.Client, 249 | ) (*ElasticIndicesStats, error) { 250 | 251 | var elasticIndicesStats ElasticIndicesStats 252 | 253 | indicesStatsURL := fmt.Sprintf("%s/_stats", elasticDSN) 254 | request, err := http.NewRequest("GET", indicesStatsURL, nil) 255 | if err != nil { 256 | return nil, hierr.Errorf( 257 | err, 258 | "can`t create new HTTP request to %s", 259 | elasticDSN, 260 | ) 261 | } 262 | 263 | if elasticsearchAuthToken != noneValue { 264 | request.Header.Add("Authorization", "Basic "+elasticsearchAuthToken) 265 | } 266 | 267 | indicesStatsResponse, err := client.Do(request) 268 | if err != nil { 269 | return nil, hierr.Errorf( 270 | err.Error(), 271 | "can`t get indices stats from Elasticsearch %s", 272 | elasticDSN, 273 | ) 274 | } 275 | 276 | defer indicesStatsResponse.Body.Close() 277 | 278 | if indicesStatsResponse.StatusCode != http.StatusOK { 279 | return nil, fmt.Errorf( 280 | "can`t get indices stats, Elasticsearch node returned %d HTTP code, expected %d HTTP code", 281 | indicesStatsResponse.StatusCode, 282 | http.StatusOK, 283 | ) 284 | } 285 | 286 | err = json.NewDecoder(indicesStatsResponse.Body).Decode(&elasticIndicesStats) 287 | if err != nil { 288 | return nil, hierr.Errorf( 289 | err.Error(), 290 | "can`t decode indices stats response from Elasticsearch %s", 291 | elasticDSN, 292 | ) 293 | } 294 | 295 | return &elasticIndicesStats, nil 296 | } 297 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/indices.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "strconv" 5 | 6 | zsend "github.com/blacked/go-zabbix" 7 | ) 8 | 9 | //NodeIndices - indices stats 10 | type NodeIndices struct { 11 | Docs struct { 12 | Count int64 `json:"count"` 13 | Deleted int64 `json:"deleted"` 14 | } `json:"docs"` 15 | 16 | Store struct { 17 | SizeInBytes int64 `json:"size_in_bytes"` 18 | ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` 19 | } `json:"store"` 20 | 21 | Indexing IndicesIndexingStats `json:"indexing"` 22 | Get IndicesGetStats `json:"get"` 23 | Search IndicesSearchStats `json:"search"` 24 | Merges IndicesMergesStats `json:"merges"` 25 | QueryCache IndicesQueryCache `json:"query_cache"` 26 | } 27 | 28 | // IndicesIndexingStats - indices indexing stats 29 | type IndicesIndexingStats struct { 30 | IndexTotal int64 `json:"index_total"` 31 | IndexTimeInMillis int64 `json:"index_time_in_millis"` 32 | IndexCurrent int64 `json:"index_current"` 33 | IndexFailed int64 `json:"index_failed"` 34 | DeleteTotal int64 `json:"delete_total"` 35 | DeleteTimeInMillis int64 `json:"delete_time_in_millis"` 36 | DeleteCurrent int64 `json:"delete_current"` 37 | NoopUpdateTotal int64 `json:"noop_update_total"` 38 | IsThrottled bool `json:"is_throttled"` 39 | ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` 40 | } 41 | 42 | // IndicesGetStats - indices get stats 43 | type IndicesGetStats struct { 44 | Total int64 `json:"total"` 45 | TimeInMillis int64 `json:"time_in_millis"` 46 | ExistsTotal int64 `json:"exists_total"` 47 | ExistsTimeInMillis int64 `json:"exists_time_in_millis"` 48 | MissingTotal int64 `json:"missing_total"` 49 | MissingTimeInMillis int64 `json:"missing_time_in_millis"` 50 | Current int64 `json:"current"` 51 | } 52 | 53 | // IndicesSearchStats - indices search stats 54 | type IndicesSearchStats struct { 55 | OpenContexts int64 `json:"open_contexts"` 56 | QueryTotal int64 `json:"query_total"` 57 | QueryTimeInMillis int64 `json:"query_time_in_millis"` 58 | QueryCurrent int64 `json:"query_current"` 59 | FetchTotal int64 `json:"fetch_total"` 60 | FetchTimeInMillis int64 `json:"fetch_time_in_millis"` 61 | FetchCurrent int64 `json:"fetch_current"` 62 | ScrollTotal int64 `json:"scroll_total"` 63 | ScrollTimeInMillis int64 `json:"scroll_time_in_millis"` 64 | ScrollCurrent int64 `json:"scroll_current"` 65 | SuggestTotal int64 `json:"suggest_total"` 66 | SuggestTimeInMillis int64 `json:"suggest_time_in_millis"` 67 | SuggestCurrent int64 `json:"suggest_current"` 68 | } 69 | 70 | // IndicesMergesStats - indices merge stats 71 | type IndicesMergesStats struct { 72 | Current int64 `json:"current"` 73 | CurrentDocs int64 `json:"current_docs"` 74 | CurrentSizeInBytes int64 `json:"current_size_in_bytes"` 75 | Total int64 `json:"total"` 76 | TotalTimeInMillis int64 `json:"total_time_in_millis"` 77 | TotalDocs int64 `json:"total_docs"` 78 | TotalSizeInBytes int64 `json:"total_size_in_bytes"` 79 | TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"` 80 | TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"` 81 | TotalAutoThrottleInBytes int64 `json:"total_auto_throttle_in_bytes"` 82 | } 83 | 84 | type IndicesQueryCache struct { 85 | MemorySizeInBytes int64 `json:"memory_size_in_bytes"` 86 | TotalCount int64 `json:"total_count"` 87 | HitCount int64 `json:"hit_count"` 88 | MissCount int64 `json:"miss_count"` 89 | CacheSize int64 `json:"cache_size"` 90 | CacheCount int64 `json:"cache_count"` 91 | Evictions int64 `json:"evictions"` 92 | } 93 | 94 | func createNodeStatsIndices( 95 | hostname string, 96 | nodesStats *ElasticNodesStats, 97 | metrics []*zsend.Metric, 98 | prefix string, 99 | ) []*zsend.Metric { 100 | 101 | var nodeStats ElasticNodeStats 102 | 103 | for _, nodeStat := range nodesStats.Nodes { 104 | nodeStats = nodeStat 105 | break 106 | } 107 | 108 | metrics = append( 109 | metrics, 110 | zsend.NewMetric( 111 | hostname, 112 | makePrefix( 113 | prefix, 114 | "node_stats.indices.docs.count", 115 | ), 116 | strconv.Itoa(int(nodeStats.Indices.Docs.Count)), 117 | ), 118 | ) 119 | 120 | metrics = append( 121 | metrics, 122 | zsend.NewMetric( 123 | hostname, 124 | makePrefix( 125 | prefix, 126 | "node_stats.indices.docs.deleted", 127 | ), 128 | strconv.Itoa(int(nodeStats.Indices.Docs.Deleted)), 129 | ), 130 | ) 131 | 132 | metrics = append( 133 | metrics, 134 | zsend.NewMetric( 135 | hostname, 136 | makePrefix( 137 | prefix, 138 | "node_stats.indices.store.size_in_bytes", 139 | ), 140 | strconv.Itoa(int(nodeStats.Indices.Store.SizeInBytes)), 141 | ), 142 | ) 143 | 144 | metrics = append( 145 | metrics, 146 | zsend.NewMetric( 147 | hostname, 148 | makePrefix( 149 | prefix, 150 | "node_stats.indices.store.throttle_time_in_millis", 151 | ), 152 | strconv.Itoa(int(nodeStats.Indices.Store.ThrottleTimeInMillis)), 153 | ), 154 | ) 155 | 156 | metrics = createNodeStatsIndicesIndexing( 157 | hostname, 158 | &nodeStats, 159 | metrics, 160 | prefix, 161 | ) 162 | 163 | metrics = createNodeStatsIndicesSearch( 164 | hostname, 165 | &nodeStats, 166 | metrics, 167 | prefix, 168 | ) 169 | 170 | metrics = createNodeStatsIndicesQueryCache( 171 | hostname, 172 | &nodeStats, 173 | metrics, 174 | prefix, 175 | ) 176 | 177 | metrics = createNodeStatsIndicesGet( 178 | hostname, 179 | &nodeStats, 180 | metrics, 181 | prefix, 182 | ) 183 | 184 | return metrics 185 | } 186 | 187 | func calculateOpLatency( 188 | totalSpentTimeInMillis int64, 189 | totalOperations int64, 190 | ) string { 191 | var operationTime float64 192 | 193 | if totalOperations == 0 { 194 | return strconv.FormatFloat( 195 | float64(totalOperations), 196 | 'f', 197 | 8, 198 | 64, 199 | ) 200 | } 201 | 202 | totalSpentTimeInSeconds := float64(totalSpentTimeInMillis) / 1000 203 | operationTime = totalSpentTimeInSeconds / float64(totalOperations) 204 | 205 | return strconv.FormatFloat(operationTime, 'f', 8, 64) 206 | } 207 | 208 | func createNodeStatsIndicesIndexing( 209 | hostname string, 210 | nodeStats *ElasticNodeStats, 211 | metrics []*zsend.Metric, 212 | prefix string, 213 | ) []*zsend.Metric { 214 | 215 | metrics = append( 216 | metrics, 217 | zsend.NewMetric( 218 | hostname, 219 | makePrefix( 220 | prefix, 221 | "node_stats.indices.indexing.index_total", 222 | ), 223 | strconv.Itoa(int(nodeStats.Indices.Indexing.IndexTotal)), 224 | ), 225 | ) 226 | 227 | metrics = append( 228 | metrics, 229 | zsend.NewMetric( 230 | hostname, 231 | makePrefix( 232 | prefix, 233 | "node_stats.indices.indexing.index_time_in_millis", 234 | ), 235 | strconv.Itoa(int(nodeStats.Indices.Indexing.IndexTimeInMillis)), 236 | ), 237 | ) 238 | 239 | metrics = append( 240 | metrics, 241 | zsend.NewMetric( 242 | hostname, 243 | makePrefix( 244 | prefix, 245 | "node_stats.indices.indexing.index_latency", 246 | ), 247 | calculateOpLatency( 248 | nodeStats.Indices.Indexing.IndexTimeInMillis, 249 | nodeStats.Indices.Indexing.IndexTotal, 250 | ), 251 | ), 252 | ) 253 | 254 | metrics = append( 255 | metrics, 256 | zsend.NewMetric( 257 | hostname, 258 | makePrefix( 259 | prefix, 260 | "node_stats.indices.indexing.index_current", 261 | ), 262 | strconv.Itoa(int(nodeStats.Indices.Indexing.IndexCurrent)), 263 | ), 264 | ) 265 | 266 | metrics = append( 267 | metrics, 268 | zsend.NewMetric( 269 | hostname, 270 | makePrefix( 271 | prefix, 272 | "node_stats.indices.indexing.index_failed", 273 | ), 274 | strconv.Itoa(int(nodeStats.Indices.Indexing.IndexFailed)), 275 | ), 276 | ) 277 | 278 | metrics = append( 279 | metrics, 280 | zsend.NewMetric( 281 | hostname, 282 | makePrefix( 283 | prefix, 284 | "node_stats.indices.indexing.delete_total", 285 | ), 286 | strconv.Itoa(int(nodeStats.Indices.Indexing.DeleteTotal)), 287 | ), 288 | ) 289 | 290 | metrics = append( 291 | metrics, 292 | zsend.NewMetric( 293 | hostname, 294 | makePrefix( 295 | prefix, 296 | "node_stats.indices.indexing.delete_time_in_millis", 297 | ), 298 | strconv.Itoa(int(nodeStats.Indices.Indexing.DeleteTimeInMillis)), 299 | ), 300 | ) 301 | 302 | metrics = append( 303 | metrics, 304 | zsend.NewMetric( 305 | hostname, 306 | makePrefix( 307 | prefix, 308 | "node_stats.indices.indexing.delete_latency", 309 | ), 310 | calculateOpLatency( 311 | nodeStats.Indices.Indexing.DeleteTimeInMillis, 312 | nodeStats.Indices.Indexing.DeleteTotal, 313 | ), 314 | ), 315 | ) 316 | 317 | metrics = append( 318 | metrics, 319 | zsend.NewMetric( 320 | hostname, 321 | makePrefix( 322 | prefix, 323 | "node_stats.indices.indexing.delete_current", 324 | ), 325 | strconv.Itoa(int(nodeStats.Indices.Indexing.DeleteCurrent)), 326 | ), 327 | ) 328 | 329 | metrics = append( 330 | metrics, 331 | zsend.NewMetric( 332 | hostname, 333 | makePrefix( 334 | prefix, 335 | "node_stats.indices.indexing.noop_update_total", 336 | ), 337 | strconv.Itoa(int(nodeStats.Indices.Indexing.NoopUpdateTotal)), 338 | ), 339 | ) 340 | 341 | metrics = append( 342 | metrics, 343 | zsend.NewMetric( 344 | hostname, 345 | makePrefix( 346 | prefix, 347 | "node_stats.indices.indexing.is_throttled", 348 | ), 349 | strconv.FormatBool(nodeStats.Indices.Indexing.IsThrottled), 350 | ), 351 | ) 352 | 353 | metrics = append( 354 | metrics, 355 | zsend.NewMetric( 356 | hostname, 357 | makePrefix( 358 | prefix, 359 | "node_stats.indices.indexing.throttle_time_in_millis", 360 | ), 361 | strconv.Itoa(int(nodeStats.Indices.Indexing.ThrottleTimeInMillis)), 362 | ), 363 | ) 364 | 365 | return metrics 366 | } 367 | 368 | func createNodeStatsIndicesSearch( 369 | hostname string, 370 | nodeStats *ElasticNodeStats, 371 | metrics []*zsend.Metric, 372 | prefix string, 373 | ) []*zsend.Metric { 374 | 375 | metrics = append( 376 | metrics, 377 | zsend.NewMetric( 378 | hostname, 379 | makePrefix( 380 | prefix, 381 | "node_stats.indices.search.open_contexts", 382 | ), 383 | strconv.Itoa(int(nodeStats.Indices.Search.OpenContexts)), 384 | ), 385 | ) 386 | 387 | metrics = append( 388 | metrics, 389 | zsend.NewMetric( 390 | hostname, 391 | makePrefix( 392 | prefix, 393 | "node_stats.indices.search.query_total", 394 | ), 395 | strconv.Itoa(int(nodeStats.Indices.Search.QueryTotal)), 396 | ), 397 | ) 398 | 399 | metrics = append( 400 | metrics, 401 | zsend.NewMetric( 402 | hostname, 403 | makePrefix( 404 | prefix, 405 | "node_stats.indices.search.query_time_in_millis", 406 | ), 407 | strconv.Itoa(int(nodeStats.Indices.Search.QueryTimeInMillis)), 408 | ), 409 | ) 410 | 411 | metrics = append( 412 | metrics, 413 | zsend.NewMetric( 414 | hostname, 415 | makePrefix( 416 | prefix, 417 | "node_stats.indices.search.query_latency", 418 | ), 419 | calculateOpLatency( 420 | nodeStats.Indices.Search.QueryTimeInMillis, 421 | nodeStats.Indices.Search.QueryTotal, 422 | ), 423 | ), 424 | ) 425 | 426 | metrics = append( 427 | metrics, 428 | zsend.NewMetric( 429 | hostname, 430 | makePrefix( 431 | prefix, 432 | "node_stats.indices.search.query_current", 433 | ), 434 | strconv.Itoa(int(nodeStats.Indices.Search.QueryCurrent)), 435 | ), 436 | ) 437 | 438 | metrics = append( 439 | metrics, 440 | zsend.NewMetric( 441 | hostname, 442 | makePrefix( 443 | prefix, 444 | "node_stats.indices.search.fetch_total", 445 | ), 446 | strconv.Itoa(int(nodeStats.Indices.Search.FetchTotal)), 447 | ), 448 | ) 449 | 450 | metrics = append( 451 | metrics, 452 | zsend.NewMetric( 453 | hostname, 454 | makePrefix( 455 | prefix, 456 | "node_stats.indices.search.fetch_time_in_millis", 457 | ), 458 | strconv.Itoa(int(nodeStats.Indices.Search.FetchTimeInMillis)), 459 | ), 460 | ) 461 | 462 | metrics = append( 463 | metrics, 464 | zsend.NewMetric( 465 | hostname, 466 | makePrefix( 467 | prefix, 468 | "node_stats.indices.search.fetch_latency", 469 | ), 470 | calculateOpLatency( 471 | nodeStats.Indices.Search.FetchTimeInMillis, 472 | nodeStats.Indices.Search.FetchTotal, 473 | ), 474 | ), 475 | ) 476 | 477 | metrics = append( 478 | metrics, 479 | zsend.NewMetric( 480 | hostname, 481 | makePrefix( 482 | prefix, 483 | "node_stats.indices.search.fetch_current", 484 | ), 485 | strconv.Itoa(int(nodeStats.Indices.Search.FetchCurrent)), 486 | ), 487 | ) 488 | 489 | metrics = append( 490 | metrics, 491 | zsend.NewMetric( 492 | hostname, 493 | makePrefix( 494 | prefix, 495 | "node_stats.indices.search.scroll_total", 496 | ), 497 | strconv.Itoa(int(nodeStats.Indices.Search.ScrollTotal)), 498 | ), 499 | ) 500 | 501 | metrics = append( 502 | metrics, 503 | zsend.NewMetric( 504 | hostname, 505 | makePrefix( 506 | prefix, 507 | "node_stats.indices.search.scroll_time_in_millis", 508 | ), 509 | strconv.Itoa(int(nodeStats.Indices.Search.ScrollTimeInMillis)), 510 | ), 511 | ) 512 | 513 | metrics = append( 514 | metrics, 515 | zsend.NewMetric( 516 | hostname, 517 | makePrefix( 518 | prefix, 519 | "node_stats.indices.search.scroll_latency", 520 | ), 521 | calculateOpLatency( 522 | nodeStats.Indices.Search.ScrollTimeInMillis, 523 | nodeStats.Indices.Search.ScrollTotal, 524 | ), 525 | ), 526 | ) 527 | 528 | metrics = append( 529 | metrics, 530 | zsend.NewMetric( 531 | hostname, 532 | makePrefix( 533 | prefix, 534 | "node_stats.indices.search.scroll_current", 535 | ), 536 | strconv.Itoa(int(nodeStats.Indices.Search.ScrollCurrent)), 537 | ), 538 | ) 539 | 540 | metrics = append( 541 | metrics, 542 | zsend.NewMetric( 543 | hostname, 544 | makePrefix( 545 | prefix, 546 | "node_stats.indices.search.suggest_total", 547 | ), 548 | strconv.Itoa(int(nodeStats.Indices.Search.SuggestTotal)), 549 | ), 550 | ) 551 | 552 | metrics = append( 553 | metrics, 554 | zsend.NewMetric( 555 | hostname, 556 | makePrefix( 557 | prefix, 558 | "node_stats.indices.search.suggest_time_in_millis", 559 | ), 560 | strconv.Itoa(int(nodeStats.Indices.Search.SuggestTimeInMillis)), 561 | ), 562 | ) 563 | 564 | metrics = append( 565 | metrics, 566 | zsend.NewMetric( 567 | hostname, 568 | makePrefix( 569 | prefix, 570 | "node_stats.indices.search.suggest_current", 571 | ), 572 | strconv.Itoa(int(nodeStats.Indices.Search.SuggestCurrent)), 573 | ), 574 | ) 575 | 576 | metrics = append( 577 | metrics, 578 | zsend.NewMetric( 579 | hostname, 580 | makePrefix( 581 | prefix, 582 | "node_stats.indices.search.suggest_latency", 583 | ), 584 | calculateOpLatency( 585 | nodeStats.Indices.Search.SuggestTimeInMillis, 586 | nodeStats.Indices.Search.SuggestTotal, 587 | ), 588 | ), 589 | ) 590 | 591 | return metrics 592 | } 593 | 594 | func createNodeStatsIndicesQueryCache( 595 | hostname string, 596 | nodeStats *ElasticNodeStats, 597 | metrics []*zsend.Metric, 598 | prefix string, 599 | ) []*zsend.Metric { 600 | 601 | metrics = append( 602 | metrics, 603 | zsend.NewMetric( 604 | hostname, 605 | makePrefix( 606 | prefix, 607 | "node_stats.indices.query_cache.memory_size_in_bytes", 608 | ), 609 | strconv.Itoa(int(nodeStats.Indices.QueryCache.MemorySizeInBytes)), 610 | ), 611 | ) 612 | 613 | metrics = append( 614 | metrics, 615 | zsend.NewMetric( 616 | hostname, 617 | makePrefix( 618 | prefix, 619 | "node_stats.indices.query_cache.total_count", 620 | ), 621 | strconv.Itoa(int(nodeStats.Indices.QueryCache.TotalCount)), 622 | ), 623 | ) 624 | 625 | metrics = append( 626 | metrics, 627 | zsend.NewMetric( 628 | hostname, 629 | makePrefix( 630 | prefix, 631 | "node_stats.indices.query_cache.hit_count", 632 | ), 633 | strconv.Itoa(int(nodeStats.Indices.QueryCache.HitCount)), 634 | ), 635 | ) 636 | 637 | metrics = append( 638 | metrics, 639 | zsend.NewMetric( 640 | hostname, 641 | makePrefix( 642 | prefix, 643 | "node_stats.indices.query_cache.miss_count", 644 | ), 645 | strconv.Itoa(int(nodeStats.Indices.QueryCache.MissCount)), 646 | ), 647 | ) 648 | 649 | queryCacheEfficiency := float64( 650 | nodeStats.Indices.QueryCache.HitCount, 651 | ) * float64(100) / float64(nodeStats.Indices.QueryCache.TotalCount) 652 | 653 | metrics = append( 654 | metrics, 655 | zsend.NewMetric( 656 | hostname, 657 | makePrefix( 658 | prefix, 659 | "node_stats.indices.query_cache.efficiency", 660 | ), 661 | strconv.FormatFloat( 662 | queryCacheEfficiency, 663 | 'f', 664 | 2, 665 | 64, 666 | ), 667 | ), 668 | ) 669 | 670 | metrics = append( 671 | metrics, 672 | zsend.NewMetric( 673 | hostname, 674 | makePrefix( 675 | prefix, 676 | "node_stats.indices.search.cache_size", 677 | ), 678 | strconv.Itoa(int(nodeStats.Indices.QueryCache.CacheSize)), 679 | ), 680 | ) 681 | 682 | metrics = append( 683 | metrics, 684 | zsend.NewMetric( 685 | hostname, 686 | makePrefix( 687 | prefix, 688 | "node_stats.indices.query_cache.cache_count", 689 | ), 690 | strconv.Itoa(int(nodeStats.Indices.QueryCache.CacheCount)), 691 | ), 692 | ) 693 | 694 | metrics = append( 695 | metrics, 696 | zsend.NewMetric( 697 | hostname, 698 | makePrefix( 699 | prefix, 700 | "node_stats.indices.query_cache.evictions", 701 | ), 702 | strconv.Itoa(int(nodeStats.Indices.QueryCache.Evictions)), 703 | ), 704 | ) 705 | 706 | return metrics 707 | } 708 | 709 | func createNodeStatsIndicesGet( 710 | hostname string, 711 | nodeStats *ElasticNodeStats, 712 | metrics []*zsend.Metric, 713 | prefix string, 714 | ) []*zsend.Metric { 715 | 716 | metrics = append( 717 | metrics, 718 | zsend.NewMetric( 719 | hostname, 720 | makePrefix( 721 | prefix, 722 | "node_stats.indices.get.missing_total", 723 | ), 724 | strconv.Itoa(int(nodeStats.Indices.Get.MissingTotal)), 725 | ), 726 | ) 727 | 728 | metrics = append( 729 | metrics, 730 | zsend.NewMetric( 731 | hostname, 732 | makePrefix( 733 | prefix, 734 | "node_stats.indices.get.missing_time_in_millis", 735 | ), 736 | strconv.Itoa(int(nodeStats.Indices.Get.MissingTimeInMillis)), 737 | ), 738 | ) 739 | 740 | return metrics 741 | } 742 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "os" 7 | "strconv" 8 | "strings" 9 | 10 | zsend "github.com/blacked/go-zabbix" 11 | docopt "github.com/docopt/docopt-go" 12 | ) 13 | 14 | const ( 15 | noneValue = "None" 16 | ) 17 | 18 | var ( 19 | version = "[manual build]" 20 | err error 21 | ) 22 | 23 | func main() { 24 | usage := `zabbix-agent-extension-elasticsearch 25 | 26 | Usage: 27 | zabbix-agent-extension-elasticsearch [options] 28 | 29 | Options: 30 | --type Type of statistics: global (cluster and nodes) 31 | or indices [default: global]. 32 | -e --elasticsearch DSN of Elasticsearch server 33 | [default: ` + obtainESDSN() + `]. 34 | -c --ca Path to custom CA. 35 | [default: ` + obtainCAPath() + `]. 36 | --agg-group Group name which will be use for aggregate 37 | item values [default: None]. 38 | -u --user User for authenticate through 39 | Elasticsearch API [default: None]. 40 | -x --password Password for user [default: None]. 41 | 42 | Stats options: 43 | -z --zabbix Hostname or IP address of zabbix server 44 | [default: 127.0.0.1]. 45 | -p --port Port of zabbix server [default: 10051] 46 | --prefix Add part of your prefix for key 47 | [default: None_pfx]. 48 | --hostname Override hostname used to identify in zabbix server 49 | [default: None]. 50 | 51 | Discovery options: 52 | --discovery Run low-level discovery for determine 53 | gc collectors, mem pools, boofer pools, etc. 54 | 55 | Misc options: 56 | --version Show version. 57 | -h --help Show this screen. 58 | ` 59 | 60 | args, _ := docopt.Parse(usage, nil, true, version, false) 61 | 62 | elasticDSN := parseDSN(args["--elasticsearch"].(string)) 63 | httpClient, err := makeHTTPClient(args["--ca"].(string)) 64 | if err != nil { 65 | fmt.Println(err.Error()) 66 | os.Exit(1) 67 | } 68 | 69 | aggGroup := args["--agg-group"].(string) 70 | 71 | zabbix := args["--zabbix"].(string) 72 | port, err := strconv.Atoi(args["--port"].(string)) 73 | if err != nil { 74 | fmt.Println(err.Error()) 75 | os.Exit(1) 76 | } 77 | 78 | prefix := args["--prefix"].(string) 79 | if prefix != "None_pfx" { 80 | prefix = strings.Join([]string{prefix, "elasticsearch"}, ".") 81 | } else { 82 | prefix = "elasticsearch" 83 | } 84 | 85 | elasticsearchAuthToken := noneValue 86 | 87 | elasticsearchUser := args["--user"].(string) 88 | elasticsearchPassword := args["--password"].(string) 89 | 90 | if elasticsearchUser != noneValue && elasticsearchPassword != noneValue { 91 | elasticsearchAuthToken = base64.StdEncoding.EncodeToString( 92 | []byte( 93 | fmt.Sprintf( 94 | "%s:%s", 95 | elasticsearchUser, 96 | elasticsearchPassword, 97 | ), 98 | ), 99 | ) 100 | } 101 | 102 | hostname := args["--hostname"].(string) 103 | if hostname == "None" { 104 | hostname, err = os.Hostname() 105 | if err != nil { 106 | fmt.Println(err.Error()) 107 | os.Exit(1) 108 | } 109 | } 110 | 111 | var metrics []*zsend.Metric 112 | 113 | statsType := args["--type"].(string) 114 | 115 | switch statsType { 116 | case "indices": 117 | if aggGroup == "None" { 118 | fmt.Println("indices work only master node with --agg-group set") 119 | os.Exit(0) 120 | } 121 | 122 | indicesStats, err := getIndicesStats( 123 | elasticDSN, 124 | elasticsearchAuthToken, 125 | httpClient, 126 | ) 127 | if err != nil { 128 | fmt.Println(err.Error()) 129 | os.Exit(1) 130 | } 131 | 132 | if args["--discovery"].(bool) { 133 | err = discoveryIndices(indicesStats) 134 | if err != nil { 135 | fmt.Println(err.Error()) 136 | os.Exit(1) 137 | } 138 | os.Exit(0) 139 | } 140 | 141 | metrics = createIndicesStats( 142 | hostname, 143 | indicesStats, 144 | metrics, 145 | prefix, 146 | ) 147 | 148 | case "global": 149 | clusterHealth, err := getClusterHealth( 150 | elasticDSN, 151 | elasticsearchAuthToken, 152 | httpClient, 153 | ) 154 | if err != nil { 155 | fmt.Println(err.Error()) 156 | os.Exit(1) 157 | } 158 | 159 | nodesStats, err := getNodeStats( 160 | elasticDSN, 161 | elasticsearchAuthToken, 162 | httpClient, 163 | ) 164 | if err != nil { 165 | fmt.Println(err.Error()) 166 | os.Exit(1) 167 | } 168 | 169 | if args["--discovery"].(bool) { 170 | err = discovery(nodesStats, aggGroup) 171 | if err != nil { 172 | fmt.Println(err.Error()) 173 | os.Exit(1) 174 | } 175 | os.Exit(0) 176 | } 177 | 178 | metrics = createClusterHealthMetrics( 179 | hostname, 180 | clusterHealth, 181 | metrics, 182 | prefix, 183 | ) 184 | metrics = createNodeStatsJVMMetrics( 185 | hostname, 186 | nodesStats, 187 | metrics, 188 | prefix, 189 | ) 190 | 191 | metrics = createNodeStatsThreadPool( 192 | hostname, 193 | nodesStats, 194 | metrics, 195 | prefix, 196 | ) 197 | 198 | metrics = createNodeStatsIndices( 199 | hostname, 200 | nodesStats, 201 | metrics, 202 | prefix, 203 | ) 204 | 205 | metrics = createNodeStatsTransport( 206 | hostname, 207 | nodesStats, 208 | metrics, 209 | prefix, 210 | ) 211 | 212 | metrics = createNodeStatsHttp( 213 | hostname, 214 | nodesStats, 215 | metrics, 216 | prefix, 217 | ) 218 | 219 | default: 220 | fmt.Println("Unsupported type of stats.") 221 | os.Exit(0) 222 | } 223 | 224 | packet := zsend.NewPacket(metrics) 225 | sender := zsend.NewSender( 226 | zabbix, 227 | port, 228 | ) 229 | sender.Send(packet) 230 | 231 | fmt.Println("OK") 232 | } 233 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/metric.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | zsend "github.com/blacked/go-zabbix" 8 | ) 9 | 10 | func makePrefix(prefix, key string) string { 11 | return fmt.Sprintf( 12 | "%s.%s", prefix, key, 13 | ) 14 | 15 | } 16 | 17 | func createClusterHealthMetrics( 18 | hostname string, 19 | clusterHealth *ElasticClusterHealth, 20 | metrics []*zsend.Metric, 21 | prefix string, 22 | ) []*zsend.Metric { 23 | 24 | healthToInt := make(map[string]int) 25 | healthToInt["green"] = 0 26 | healthToInt["yellow"] = 1 27 | healthToInt["red"] = 2 28 | 29 | metrics = append( 30 | metrics, 31 | zsend.NewMetric( 32 | hostname, 33 | makePrefix( 34 | prefix, 35 | "cluster_health.cluster_name", 36 | ), 37 | clusterHealth.ClusterName, 38 | ), 39 | ) 40 | metrics = append( 41 | metrics, 42 | zsend.NewMetric( 43 | hostname, 44 | makePrefix( 45 | prefix, 46 | "cluster_health.status_int", 47 | ), 48 | strconv.Itoa(int(healthToInt[clusterHealth.Status])), 49 | ), 50 | ) 51 | metrics = append( 52 | metrics, 53 | zsend.NewMetric( 54 | hostname, 55 | makePrefix( 56 | prefix, 57 | "cluster_health.status", 58 | ), 59 | clusterHealth.Status, 60 | ), 61 | ) 62 | 63 | metrics = append( 64 | metrics, 65 | zsend.NewMetric( 66 | hostname, 67 | makePrefix( 68 | prefix, 69 | "cluster_health.timed_out", 70 | ), 71 | strconv.FormatBool(clusterHealth.TimedOut), 72 | ), 73 | ) 74 | metrics = append( 75 | metrics, 76 | zsend.NewMetric( 77 | hostname, 78 | makePrefix( 79 | prefix, 80 | "cluster_health.number_of_nodes", 81 | ), 82 | strconv.Itoa(int(clusterHealth.NumderOfNodes)), 83 | ), 84 | ) 85 | metrics = append( 86 | metrics, 87 | zsend.NewMetric( 88 | hostname, 89 | makePrefix( 90 | prefix, 91 | "cluster_health.number_of_data_nodes", 92 | ), 93 | strconv.Itoa(int(clusterHealth.NumberOfDataNodes)), 94 | ), 95 | ) 96 | metrics = append( 97 | metrics, 98 | zsend.NewMetric( 99 | hostname, 100 | makePrefix( 101 | prefix, 102 | "cluster_health.active_primary_shards", 103 | ), 104 | strconv.Itoa(int(clusterHealth.ActivePrimaryShards)), 105 | ), 106 | ) 107 | metrics = append( 108 | metrics, 109 | zsend.NewMetric( 110 | hostname, 111 | makePrefix( 112 | prefix, 113 | "cluster_health.active_shards", 114 | ), 115 | strconv.Itoa(int(clusterHealth.ActiveShards)), 116 | ), 117 | ) 118 | metrics = append( 119 | metrics, 120 | zsend.NewMetric( 121 | hostname, 122 | makePrefix( 123 | prefix, 124 | "cluster_health.relocating_shards", 125 | ), 126 | strconv.Itoa(int(clusterHealth.RelocatingShards)), 127 | ), 128 | ) 129 | metrics = append( 130 | metrics, 131 | zsend.NewMetric( 132 | hostname, 133 | makePrefix( 134 | prefix, 135 | "cluster_health.initializing_shards", 136 | ), 137 | strconv.Itoa(int(clusterHealth.InitializingShards)), 138 | ), 139 | ) 140 | metrics = append( 141 | metrics, 142 | zsend.NewMetric( 143 | hostname, 144 | makePrefix( 145 | prefix, 146 | "cluster_health.unassigned_shards", 147 | ), 148 | strconv.Itoa(int(clusterHealth.UnassignedShards)), 149 | ), 150 | ) 151 | metrics = append( 152 | metrics, 153 | zsend.NewMetric( 154 | hostname, 155 | makePrefix( 156 | prefix, 157 | "cluster_health.delayed_unassigned_shards", 158 | ), 159 | strconv.Itoa(int(clusterHealth.DelayedUnassignedShards)), 160 | ), 161 | ) 162 | metrics = append( 163 | metrics, 164 | zsend.NewMetric( 165 | hostname, 166 | makePrefix( 167 | prefix, 168 | "cluster_health.number_of_pending_tasks", 169 | ), 170 | strconv.Itoa(int(clusterHealth.NumberOfPendingTasks)), 171 | ), 172 | ) 173 | metrics = append( 174 | metrics, 175 | zsend.NewMetric( 176 | hostname, 177 | makePrefix( 178 | prefix, 179 | "cluster_health.number_of_in_flight_fetch", 180 | ), 181 | strconv.Itoa(int(clusterHealth.NumberOfInFlightFetch)), 182 | ), 183 | ) 184 | metrics = append( 185 | metrics, 186 | zsend.NewMetric( 187 | hostname, 188 | makePrefix( 189 | prefix, 190 | "cluster_health.task_max_waiting_in_queue_millis", 191 | ), 192 | strconv.Itoa(int(clusterHealth.TaskMaxWaitingInQueueMillis)), 193 | ), 194 | ) 195 | metrics = append( 196 | metrics, 197 | zsend.NewMetric( 198 | hostname, 199 | makePrefix( 200 | prefix, 201 | "cluster_health.active_shards_percent", 202 | ), 203 | strconv.Itoa(int(clusterHealth.ActiveShardsPercent)), 204 | ), 205 | ) 206 | 207 | return metrics 208 | } 209 | 210 | func createNodeStatsTransport( 211 | hostname string, 212 | nodesStats *ElasticNodesStats, 213 | metrics []*zsend.Metric, 214 | prefix string, 215 | ) []*zsend.Metric { 216 | 217 | for _, nodeStats := range nodesStats.Nodes { 218 | metrics = append( 219 | metrics, 220 | zsend.NewMetric( 221 | hostname, 222 | makePrefix( 223 | prefix, 224 | "node_stats.transport.server_open", 225 | ), 226 | strconv.Itoa(int(nodeStats.Transport.ServerOpen)), 227 | ), 228 | ) 229 | metrics = append( 230 | metrics, 231 | zsend.NewMetric( 232 | hostname, 233 | makePrefix( 234 | prefix, 235 | "node_stats.transport.rx_count", 236 | ), 237 | strconv.Itoa(int(nodeStats.Transport.RxCount)), 238 | ), 239 | ) 240 | metrics = append( 241 | metrics, 242 | zsend.NewMetric( 243 | hostname, 244 | makePrefix( 245 | prefix, 246 | "node_stats.transport.rx_size_in_bytes", 247 | ), 248 | strconv.Itoa(int(nodeStats.Transport.RxSizeInBytes)), 249 | ), 250 | ) 251 | metrics = append( 252 | metrics, 253 | zsend.NewMetric( 254 | hostname, 255 | makePrefix( 256 | prefix, 257 | "node_stats.transport.tx_count", 258 | ), 259 | strconv.Itoa(int(nodeStats.Transport.TxCount)), 260 | ), 261 | ) 262 | metrics = append( 263 | metrics, 264 | zsend.NewMetric( 265 | hostname, 266 | makePrefix( 267 | prefix, 268 | "node_stats.transport.tx_size_in_bytes", 269 | ), 270 | strconv.Itoa(int(nodeStats.Transport.TxSizeInBytes)), 271 | ), 272 | ) 273 | } 274 | 275 | return metrics 276 | } 277 | 278 | func createNodeStatsHttp( 279 | hostname string, 280 | nodesStats *ElasticNodesStats, 281 | metrics []*zsend.Metric, 282 | prefix string, 283 | ) []*zsend.Metric { 284 | 285 | for _, nodeStats := range nodesStats.Nodes { 286 | metrics = append( 287 | metrics, 288 | zsend.NewMetric( 289 | hostname, 290 | makePrefix( 291 | prefix, 292 | "node_stats.http.current_open", 293 | ), 294 | strconv.Itoa(int(nodeStats.Http.CurrentOpen)), 295 | ), 296 | ) 297 | metrics = append( 298 | metrics, 299 | zsend.NewMetric( 300 | hostname, 301 | makePrefix( 302 | prefix, 303 | "node_stats.http.total_opened", 304 | ), 305 | strconv.Itoa(int(nodeStats.Http.TotalOpened)), 306 | ), 307 | ) 308 | } 309 | 310 | return metrics 311 | } 312 | 313 | func createNodeStatsJVMMetrics( 314 | hostname string, 315 | nodesStats *ElasticNodesStats, 316 | metrics []*zsend.Metric, 317 | prefix string, 318 | ) []*zsend.Metric { 319 | 320 | for _, nodeStats := range nodesStats.Nodes { 321 | metrics = append( 322 | metrics, 323 | zsend.NewMetric( 324 | hostname, 325 | makePrefix( 326 | prefix, 327 | "node_stats.jvm.timestamp", 328 | ), 329 | strconv.Itoa(int(nodeStats.JVM.Timestamp)), 330 | ), 331 | ) 332 | metrics = append( 333 | metrics, 334 | zsend.NewMetric( 335 | hostname, 336 | makePrefix( 337 | prefix, 338 | "node_stats.jvm.uptime_in_millis", 339 | ), 340 | strconv.Itoa(int(nodeStats.JVM.UptimeInMillis)), 341 | ), 342 | ) 343 | 344 | metrics = createNodeStatsJVMMemMetrics(hostname, metrics, &nodeStats, prefix) 345 | metrics = createNodeStatsJVMThreadsMetrics(hostname, metrics, &nodeStats, prefix) 346 | 347 | for collectorsName, nodeStatsJVMGCColletorsStats := range nodeStats.JVM.GC.Collectors { 348 | metrics = createNodeStatsJVMGCCollectorsMetrics(hostname, metrics, &nodeStatsJVMGCColletorsStats, collectorsName, prefix) 349 | } 350 | 351 | for bufferPoolsName, nodeStatsJVMBufferPoolsStats := range nodeStats.JVM.BufferPools { 352 | metrics = createNodeStatsJVMBufferPoolsMetrics(hostname, metrics, &nodeStatsJVMBufferPoolsStats, bufferPoolsName, prefix) 353 | } 354 | metrics = createNodeStatsJVMClassesMetrics(hostname, metrics, &nodeStats, prefix) 355 | } 356 | 357 | return metrics 358 | } 359 | 360 | func createNodeStatsJVMMemMetrics( 361 | hostname string, 362 | metrics []*zsend.Metric, 363 | nodeStats *ElasticNodeStats, 364 | prefix string, 365 | ) []*zsend.Metric { 366 | 367 | metrics = append( 368 | metrics, 369 | zsend.NewMetric( 370 | hostname, 371 | makePrefix( 372 | prefix, 373 | "node_stats.jvm.mem.heap_used_in_bytes", 374 | ), 375 | strconv.Itoa(int(nodeStats.JVM.Mem.HeapUsedInBytes)), 376 | ), 377 | ) 378 | metrics = append( 379 | metrics, 380 | zsend.NewMetric( 381 | hostname, 382 | makePrefix( 383 | prefix, 384 | "node_stats.jvm.mem.heap_used_percent", 385 | ), 386 | strconv.Itoa(int(nodeStats.JVM.Mem.HeapUsedPercent)), 387 | ), 388 | ) 389 | metrics = append( 390 | metrics, 391 | zsend.NewMetric( 392 | hostname, 393 | makePrefix( 394 | prefix, 395 | "node_stats.jvm.mem.heap_committed_in_bytes", 396 | ), 397 | strconv.Itoa(int(nodeStats.JVM.Mem.NonHeapCommittedInBytes)), 398 | ), 399 | ) 400 | metrics = append( 401 | metrics, 402 | zsend.NewMetric( 403 | hostname, 404 | makePrefix( 405 | prefix, 406 | "node_stats.jvm.mem.heap_max_in_bytes", 407 | ), 408 | strconv.Itoa(int(nodeStats.JVM.Mem.HeapMaxInBytes)), 409 | ), 410 | ) 411 | metrics = append( 412 | metrics, 413 | zsend.NewMetric( 414 | hostname, 415 | makePrefix( 416 | prefix, 417 | "node_stats.jvm.mem.non_heap_used_in_bytes", 418 | ), 419 | strconv.Itoa(int(nodeStats.JVM.Mem.NonHeapUsedInBytes)), 420 | ), 421 | ) 422 | metrics = append( 423 | metrics, 424 | zsend.NewMetric( 425 | hostname, 426 | makePrefix( 427 | prefix, 428 | "node_stats.jvm.mem.non_heap_committed_in_bytes", 429 | ), 430 | strconv.Itoa(int(nodeStats.JVM.Mem.NonHeapCommittedInBytes)), 431 | ), 432 | ) 433 | 434 | for poolsName, nodeStatsJVMMemPoolsStats := range nodeStats.JVM.Mem.Pools { 435 | metrics = createNodeStatsJVMMemPoolsMetrics(hostname, metrics, &nodeStatsJVMMemPoolsStats, poolsName, prefix) 436 | } 437 | 438 | return metrics 439 | } 440 | 441 | func createNodeStatsJVMMemPoolsMetrics( 442 | hostname string, 443 | metrics []*zsend.Metric, 444 | nodeStatsJVMMemPoolsStats *ElasticNodeStatsJVMMemPoolsStats, 445 | poolsName string, 446 | prefix string, 447 | ) []*zsend.Metric { 448 | 449 | metrics = append( 450 | metrics, 451 | zsend.NewMetric( 452 | hostname, 453 | makePrefix( 454 | prefix, 455 | fmt.Sprintf("node_stats.jvm.mem.pools.used_in_bytes.[%s]", poolsName), 456 | ), 457 | strconv.Itoa(int(nodeStatsJVMMemPoolsStats.UsedInBytes)), 458 | ), 459 | ) 460 | metrics = append( 461 | metrics, 462 | zsend.NewMetric( 463 | hostname, 464 | makePrefix( 465 | prefix, 466 | fmt.Sprintf("node_stats.jvm.mem.pools.max_in_bytes.[%s]", poolsName), 467 | ), 468 | strconv.Itoa(int(nodeStatsJVMMemPoolsStats.MaxInBytes)), 469 | ), 470 | ) 471 | metrics = append( 472 | metrics, 473 | zsend.NewMetric( 474 | hostname, 475 | makePrefix( 476 | prefix, 477 | fmt.Sprintf("node_stats.jvm.mem.pools.peak_used_in_bytes.[%s]", poolsName), 478 | ), 479 | strconv.Itoa(int(nodeStatsJVMMemPoolsStats.PeakUsedInBytes)), 480 | ), 481 | ) 482 | metrics = append( 483 | metrics, 484 | zsend.NewMetric( 485 | hostname, 486 | makePrefix( 487 | prefix, 488 | fmt.Sprintf("node_stats.jvm.mem.pools.peak_max_in_bytes.[%s]", poolsName), 489 | ), 490 | strconv.Itoa(int(nodeStatsJVMMemPoolsStats.PeakMaxInBytes)), 491 | ), 492 | ) 493 | 494 | return metrics 495 | } 496 | 497 | func createNodeStatsJVMThreadsMetrics( 498 | hostname string, 499 | metrics []*zsend.Metric, 500 | nodeStats *ElasticNodeStats, 501 | prefix string, 502 | ) []*zsend.Metric { 503 | 504 | metrics = append( 505 | metrics, 506 | zsend.NewMetric( 507 | hostname, 508 | makePrefix( 509 | prefix, 510 | "node_stats.jvm.threads.count", 511 | ), 512 | strconv.Itoa(int(nodeStats.JVM.Threads.Count)), 513 | ), 514 | ) 515 | metrics = append( 516 | metrics, 517 | zsend.NewMetric( 518 | hostname, 519 | makePrefix( 520 | prefix, 521 | "node_stats.jvm.threads.peak_count", 522 | ), 523 | strconv.Itoa(int(nodeStats.JVM.Threads.PeakCount)), 524 | ), 525 | ) 526 | 527 | return metrics 528 | } 529 | 530 | func createNodeStatsJVMGCCollectorsMetrics( 531 | hostname string, 532 | metrics []*zsend.Metric, 533 | nodeStatsJVMGCColletorsStats *ElasticNodeStatsJVMGCCollectorsStats, 534 | collectorsName string, 535 | prefix string, 536 | ) []*zsend.Metric { 537 | 538 | metrics = append( 539 | metrics, 540 | zsend.NewMetric( 541 | hostname, 542 | makePrefix( 543 | prefix, 544 | fmt.Sprintf("node_stats.jvm.gc.collectors.collection_cout.[%s]", collectorsName), 545 | ), 546 | strconv.Itoa(int(nodeStatsJVMGCColletorsStats.CollectionCount)), 547 | ), 548 | ) 549 | metrics = append( 550 | metrics, 551 | zsend.NewMetric( 552 | hostname, 553 | makePrefix( 554 | prefix, 555 | fmt.Sprintf("node_stats.jvm.gc.collectors.collection_time_in_millis.[%s]", collectorsName), 556 | ), 557 | strconv.Itoa(int(nodeStatsJVMGCColletorsStats.CollectionTimeInMillis)), 558 | ), 559 | ) 560 | 561 | return metrics 562 | } 563 | 564 | func createNodeStatsJVMBufferPoolsMetrics( 565 | hostname string, 566 | metrics []*zsend.Metric, 567 | nodeStatsJVMBufferPoolsStats *ElasticNodeStatsJVMBufferPoolsStats, 568 | bufferPoolsName string, 569 | prefix string, 570 | ) []*zsend.Metric { 571 | 572 | metrics = append( 573 | metrics, 574 | zsend.NewMetric( 575 | hostname, 576 | makePrefix( 577 | prefix, 578 | fmt.Sprintf("node_stats.jvm.buffer_polls.count.[%s]", bufferPoolsName), 579 | ), 580 | strconv.Itoa(int(nodeStatsJVMBufferPoolsStats.Count)), 581 | ), 582 | ) 583 | metrics = append( 584 | metrics, 585 | zsend.NewMetric( 586 | hostname, 587 | makePrefix( 588 | prefix, 589 | fmt.Sprintf("node_stats.jvm.buffer_polls.used_in_bytes.[%s]", bufferPoolsName), 590 | ), 591 | strconv.Itoa(int(nodeStatsJVMBufferPoolsStats.UsedInBytes)), 592 | ), 593 | ) 594 | metrics = append( 595 | metrics, 596 | zsend.NewMetric( 597 | hostname, 598 | makePrefix( 599 | prefix, 600 | fmt.Sprintf("node_stats.jvm.buffer_polls.total_capacity_in_bytes.[%s]", bufferPoolsName), 601 | ), 602 | strconv.Itoa(int(nodeStatsJVMBufferPoolsStats.TotalCapacityInBytes)), 603 | ), 604 | ) 605 | 606 | return metrics 607 | } 608 | 609 | func createNodeStatsJVMClassesMetrics( 610 | hostname string, 611 | metrics []*zsend.Metric, 612 | nodeStats *ElasticNodeStats, 613 | prefix string, 614 | ) []*zsend.Metric { 615 | 616 | metrics = append( 617 | metrics, 618 | zsend.NewMetric( 619 | hostname, 620 | makePrefix( 621 | prefix, 622 | "node_stats.jvm.classes.current_loaded_count", 623 | ), 624 | strconv.Itoa(int(nodeStats.JVM.Classes.CurrentLoadedCount)), 625 | ), 626 | ) 627 | metrics = append( 628 | metrics, 629 | zsend.NewMetric( 630 | hostname, 631 | makePrefix( 632 | prefix, 633 | "node_stats.jvm.classes.total_loaded_count", 634 | ), 635 | strconv.Itoa(int(nodeStats.JVM.Classes.TotalLoadedCount)), 636 | ), 637 | ) 638 | metrics = append( 639 | metrics, 640 | zsend.NewMetric( 641 | hostname, 642 | makePrefix( 643 | prefix, 644 | "node_stats.jvm.classes.total_unloaded_count", 645 | ), 646 | strconv.Itoa(int(nodeStats.JVM.Classes.TotalUnloadedCount)), 647 | ), 648 | ) 649 | 650 | return metrics 651 | } 652 | 653 | func createIndicesStats( 654 | hostname string, 655 | indicesStats *ElasticIndicesStats, 656 | metrics []*zsend.Metric, 657 | prefix string, 658 | ) []*zsend.Metric { 659 | 660 | for indexName, indexStats := range indicesStats.Indices { 661 | 662 | metrics = append( 663 | metrics, 664 | zsend.NewMetric( 665 | hostname, 666 | makePrefix( 667 | prefix, 668 | fmt.Sprintf( 669 | "indices_stats.total.docs.count.[%s]", 670 | indexName, 671 | ), 672 | ), 673 | strconv.Itoa(int(indexStats.Total.Docs.Count)), 674 | ), 675 | ) 676 | metrics = append( 677 | metrics, 678 | zsend.NewMetric( 679 | hostname, 680 | makePrefix( 681 | prefix, 682 | fmt.Sprintf( 683 | "indices_stats.total.docs.deleted.[%s]", 684 | indexName, 685 | ), 686 | ), 687 | strconv.Itoa(int(indexStats.Total.Docs.Deleted)), 688 | ), 689 | ) 690 | metrics = append( 691 | metrics, 692 | zsend.NewMetric( 693 | hostname, 694 | makePrefix( 695 | prefix, 696 | fmt.Sprintf( 697 | "indices_stats.primaries.docs.count.[%s]", 698 | indexName, 699 | ), 700 | ), 701 | strconv.Itoa(int(indexStats.Primaries.Docs.Count)), 702 | ), 703 | ) 704 | metrics = append( 705 | metrics, 706 | zsend.NewMetric( 707 | hostname, 708 | makePrefix( 709 | prefix, 710 | fmt.Sprintf( 711 | "indices_stats.primaries.docs.deleted.[%s]", 712 | indexName, 713 | ), 714 | ), 715 | strconv.Itoa(int(indexStats.Primaries.Docs.Deleted)), 716 | ), 717 | ) 718 | metrics = append( 719 | metrics, 720 | zsend.NewMetric( 721 | hostname, 722 | makePrefix( 723 | prefix, 724 | fmt.Sprintf( 725 | "indices_stats.total.store.size.[%s]", 726 | indexName, 727 | ), 728 | ), 729 | strconv.Itoa(int(indexStats.Total.Store.SizeInBytes)), 730 | ), 731 | ) 732 | metrics = append( 733 | metrics, 734 | zsend.NewMetric( 735 | hostname, 736 | makePrefix( 737 | prefix, 738 | fmt.Sprintf( 739 | "indices_stats.primaries.store.size.[%s]", 740 | indexName, 741 | ), 742 | ), 743 | strconv.Itoa(int(indexStats.Primaries.Store.SizeInBytes)), 744 | ), 745 | ) 746 | 747 | } 748 | 749 | return metrics 750 | } 751 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/thread_pool.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | zsend "github.com/blacked/go-zabbix" 8 | ) 9 | 10 | type NodeThreadPool struct { 11 | Threads int64 `json:"threads"` 12 | Queue int64 `json:"queue"` 13 | Active int64 `json:"active"` 14 | Rejected int64 `json:"rejected"` 15 | Largest int64 `json:"largest"` 16 | Completed int64 `json:"completed"` 17 | } 18 | 19 | func createNodeStatsThreadPool( 20 | hostname string, 21 | nodesStats *ElasticNodesStats, 22 | metrics []*zsend.Metric, 23 | prefix string, 24 | ) []*zsend.Metric { 25 | 26 | var nodeStats ElasticNodeStats 27 | 28 | for _, nodeStat := range nodesStats.Nodes { 29 | nodeStats = nodeStat 30 | break 31 | } 32 | 33 | for threadPoolName, threadPoolMetric := range nodeStats.ThreadPools { 34 | metrics = append( 35 | metrics, 36 | zsend.NewMetric( 37 | hostname, 38 | makePrefix( 39 | prefix, 40 | fmt.Sprintf( 41 | "node_stats.thread_pool.threads[%s]", 42 | threadPoolName, 43 | ), 44 | ), 45 | strconv.Itoa(int(threadPoolMetric.Threads)), 46 | ), 47 | ) 48 | 49 | metrics = append( 50 | metrics, 51 | zsend.NewMetric( 52 | hostname, 53 | makePrefix( 54 | prefix, 55 | fmt.Sprintf( 56 | "node_stats.thread_pool.queue[%s]", 57 | threadPoolName, 58 | ), 59 | ), 60 | strconv.Itoa(int(threadPoolMetric.Queue)), 61 | ), 62 | ) 63 | 64 | metrics = append( 65 | metrics, 66 | zsend.NewMetric( 67 | hostname, 68 | makePrefix( 69 | prefix, 70 | fmt.Sprintf( 71 | "node_stats.thread_pool.active[%s]", 72 | threadPoolName, 73 | ), 74 | ), 75 | strconv.Itoa(int(threadPoolMetric.Active)), 76 | ), 77 | ) 78 | 79 | metrics = append( 80 | metrics, 81 | zsend.NewMetric( 82 | hostname, 83 | makePrefix( 84 | prefix, 85 | fmt.Sprintf( 86 | "node_stats.thread_pool.rejected[%s]", 87 | threadPoolName, 88 | ), 89 | ), 90 | strconv.Itoa(int(threadPoolMetric.Rejected)), 91 | ), 92 | ) 93 | 94 | metrics = append( 95 | metrics, 96 | zsend.NewMetric( 97 | hostname, 98 | makePrefix( 99 | prefix, 100 | fmt.Sprintf( 101 | "node_stats.thread_pool.largest[%s]", 102 | threadPoolName, 103 | ), 104 | ), 105 | strconv.Itoa(int(threadPoolMetric.Largest)), 106 | ), 107 | ) 108 | 109 | metrics = append( 110 | metrics, 111 | zsend.NewMetric( 112 | hostname, 113 | makePrefix( 114 | prefix, 115 | fmt.Sprintf( 116 | "node_stats.thread_pool.completed[%s]", 117 | threadPoolName, 118 | ), 119 | ), 120 | strconv.Itoa(int(threadPoolMetric.Completed)), 121 | ), 122 | ) 123 | 124 | } 125 | 126 | return metrics 127 | } 128 | -------------------------------------------------------------------------------- /cmd/zabbix-agent-extension-elasticsearch/tools.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "strings" 10 | 11 | karma "github.com/reconquest/karma-go" 12 | ) 13 | 14 | func parseDSN(rawDSN string) string { 15 | DSN := strings.TrimSpace(rawDSN) 16 | 17 | if !strings.HasPrefix(DSN, "http://") && 18 | !strings.HasPrefix(DSN, "https://") { 19 | 20 | return fmt.Sprintf("http://%s", DSN) 21 | } 22 | 23 | return DSN 24 | } 25 | 26 | func makeHTTPClient(caPath string) (*http.Client, error) { 27 | destiny := karma.Describe( 28 | "method", "makeHTTPClient", 29 | ) 30 | 31 | if caPath == noneValue { 32 | return &http.Client{}, nil 33 | } 34 | 35 | cert, err := ioutil.ReadFile(caPath) 36 | if err != nil { 37 | return nil, destiny.Describe( 38 | "CA path", caPath, 39 | ).Describe( 40 | "error", err, 41 | ).Reason( 42 | "can't read CA", 43 | ) 44 | } 45 | 46 | rootCAs, err := x509.SystemCertPool() 47 | if err != nil { 48 | return nil, destiny.Describe( 49 | "error", err, 50 | ).Reason( 51 | "can't obtain root CA pool", 52 | ) 53 | } 54 | 55 | if rootCAs == nil { 56 | rootCAs = x509.NewCertPool() 57 | } 58 | 59 | if hasAppended := rootCAs.AppendCertsFromPEM(cert); !hasAppended { 60 | return nil, destiny.Reason("cert CA has't appended") 61 | } 62 | 63 | tlsConfig := &tls.Config{ 64 | RootCAs: rootCAs, 65 | } 66 | 67 | return &http.Client{ 68 | Transport: &http.Transport{ 69 | TLSClientConfig: tlsConfig, 70 | }, 71 | }, nil 72 | } 73 | -------------------------------------------------------------------------------- /configs/template/custom_key_template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | prefix="$1" 4 | 5 | if [ -z "$prefix" ]; then 6 | echo "Not define prefix." 7 | exit 1 8 | fi 9 | 10 | sed "s/elasticsearch./$prefix.elasticsearch./g" -i template_elasticsearch_service.xml 11 | sed "s/None_pfx/$prefix/g" -i template_elasticsearch_service.xml 12 | 13 | echo "Done." 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /configs/zabbix_agentd.d/zabbix-agent-extension-elasticsearch.conf: -------------------------------------------------------------------------------- 1 | UserParameter=elasticsearch.discovery[*], /usr/bin/zabbix-agent-extension-elasticsearch --discovery --elasticsearch $1 --agg-group $2 --user $3 --password $4 --ca $5 --hostname $6 2 | UserParameter=elasticsearch.stats[*], /usr/bin/zabbix-agent-extension-elasticsearch --zabbix $1 --elasticsearch $2 --prefix $3 --user $4 --password $5 --ca $6 --hostname $7 3 | # 4 | UserParameter=elasticsearch.indices.discovery[*], /usr/bin/zabbix-agent-extension-elasticsearch --type indices --discovery --elasticsearch $1 --agg-group $2 --user $3 --password $4 --ca $5 --hostname $6 5 | UserParameter=elasticsearch.indices.stats[*], /usr/bin/zabbix-agent-extension-elasticsearch --type indices --zabbix $1 --elasticsearch $2 --prefix $3 --agg-group $4 --user $5 --password $6 --ca $7 --hostname $8 6 | --------------------------------------------------------------------------------