├── .env ├── dev.env └── prod.env ├── .github ├── FUNDING.yml └── workflows │ └── release.yml ├── LICENSE ├── Makefile ├── README.md ├── assets └── logo.png ├── cmd └── main.go ├── conf └── conf.yml ├── deploy ├── Dockerfile ├── docker-compose.yml └── entrypoint.sh ├── go.mod ├── go.sum └── internal ├── monitor ├── cpumonitor.go ├── diskmonitor.go ├── memorymonitor.go └── networkmonitor.go └── util ├── bytes.go ├── math.go ├── model.go ├── stringutil.go └── util_test.go /.env/dev.env: -------------------------------------------------------------------------------- 1 | HOST_CONF_PATH=../conf/conf.yml 2 | HOSTFS_PREFIX= 3 | -------------------------------------------------------------------------------- /.env/prod.env: -------------------------------------------------------------------------------- 1 | HOST_CONF_PATH=/mnt/user/appdata/unraid-simple-monitoring-api/conf.yml 2 | HOSTFS_PREFIX=/hostfs 3 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: nebn 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish Docker image as GitHub package 2 | 3 | on: 4 | release: 5 | types: [released] 6 | 7 | env: 8 | REGISTRY: ghcr.io 9 | # github.repository as / 10 | 11 | jobs: 12 | test: 13 | runs-on: self-hosted 14 | steps: 15 | - name: Checkout branch 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Go 19 | uses: actions/setup-go@v5 20 | with: 21 | go-version: '1.22' 22 | 23 | - name: Test 24 | run: go test -v ./... 25 | 26 | docker: 27 | needs: test 28 | runs-on: self-hosted 29 | steps: 30 | - name: Set lowercase image name 31 | run: echo "IMAGE_NAME=$(echo '${{ github.repository }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV 32 | 33 | - name: Set lowercase DockerHub user name 34 | run: echo "DH_USER=$(echo '${{ github.actor }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV 35 | 36 | # https://github.com/docker/login-action 37 | - name: Log into registry ${{ env.REGISTRY }} 38 | uses: docker/login-action@v3 39 | with: 40 | registry: ${{ env.REGISTRY }} 41 | username: ${{ github.actor }} 42 | password: ${{ secrets.GHCR_TOKEN }} 43 | 44 | - name: Log into DockerHub 45 | uses: docker/login-action@v3 46 | with: 47 | username: ${{ env.DH_USER }} 48 | password: ${{ secrets.DH_TOKEN }} 49 | 50 | 51 | - name: Build and push Docker image 52 | run: | 53 | docker compose --env-file .env/prod.env --file deploy/docker-compose.yml build --no-cache 54 | docker tag ${{ env.IMAGE_NAME }}:latest ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} 55 | docker tag ${{ env.IMAGE_NAME }}:latest ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 56 | docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.ref_name }} 57 | docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 58 | docker push ${{ env.IMAGE_NAME }}:latest 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 NebN 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | run: 2 | export CONF_PATH="conf/conf.yml" && \ 3 | go run ./cmd/main.go 4 | 5 | test: 6 | go test ./... 7 | 8 | docker-run: 9 | sudo docker compose --env-file .env/dev.env --file deploy/docker-compose.yml up --build --force-recreate -d 10 | 11 | docker-build: 12 | sudo docker compose --env-file .env/dev.env --file deploy/docker-compose.yml build --no-cache 13 | 14 | docker-rm: 15 | sudo docker compose --env-file .env/dev.env --file deploy/docker-compose.yml rm -f 16 | 17 | docker-logs: 18 | sudo docker logs -f $(shell sudo docker ps | grep unraid-simple-monitoring-api | cut -d " " -f 1) 19 | 20 | docker-push-qa: test 21 | sudo docker compose --env-file .env/prod.env --file deploy/docker-compose.yml build --no-cache && \ 22 | sudo docker tag nebn/unraid-simple-monitoring-api:latest ghcr.io/nebn/unraid-simple-monitoring-api:qa && \ 23 | sudo docker push ghcr.io/nebn/unraid-simple-monitoring-api:qa 24 | 25 | docker-prune: 26 | sudo docker image prune -f && sudo docker container prune -f 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![GitHub Release](https://img.shields.io/github/v/release/nebn/unraid-simple-monitoring-api?display_name=tag&style=for-the-badge) 2 | ![GitHub commits since latest release](https://img.shields.io/github/commits-since/nebn/unraid-simple-monitoring-api/latest?style=for-the-badge) 3 | ![GitHub last commit](https://img.shields.io/github/last-commit/nebn/unraid-simple-monitoring-api?style=for-the-badge) 4 | 5 | # Unraid Simple Monitoring API 6 | Simple REST API to monitor basic metrics, currently supports: 7 | - Disk utilization and status 8 | - Network traffic 9 | - CPU load and temperature 10 | - Memory utilization 11 | 12 | Originally created for [Unraid](https://unraid.net/) for use with [Homepage](https://gethomepage.dev/widgets/services/customapi/). 13 | 14 | ## Table of Contents 15 | - [Utilization with Unraid](#unraid) 16 | - [Installation](#unraid-install) 17 | - [Configuration](#unraid-conf) 18 | - [Additional pools](#pools) 19 | - [CPU Temperature](#cpu-temp) 20 | - [Logging](#logging-level) 21 | - [CORS](#cors) 22 | - [ZFS](#unraid-zfs) 23 | - [Calling the API](#unraid-use) 24 | - [Integration with Homepage](#homepage) 25 | - [Configuration](#homepage-conf) 26 | - [Available Fields](#available-fields) 27 | - [How reliable are the measurements?](#caveat) 28 | - [Installing a QA build](#qa) 29 | 30 | ## Utilization with Unraid 31 | ### Installation 32 | Install from the Unraid community apps 33 | 34 | ### Configuration 35 | By default the application expects a configuration file in 36 | ``` 37 | /mnt/user/appdata/unraid-simple-monitoring-api/conf.yml 38 | ``` 39 | 40 | You can find an example file [here](https://github.com/NebN/unraid-simple-monitoring-api/blob/master/conf/conf.yml). It should look like this 41 | 42 | ```yaml 43 | networks: 44 | - eth0 45 | - anotherNetwork 46 | disks: 47 | cache: 48 | - /mnt/cache 49 | - /another/cache/mount 50 | array: 51 | - /mnt/disk1 52 | - /mnt/disk2 53 | ``` 54 | #### Additional pools 55 | You can add any number of custom disk pools. 56 | ```yaml 57 | disks: 58 | poolname: 59 | - /mnt/pooldisk1 60 | - /mnt/pooldisk2 61 | anotherpool: 62 | - /mnt/anotherdisk 63 | ``` 64 | 65 | #### CPU Temperature file 66 | You can specify which file to read to obtain the correct CPU temperature. 67 | ```yaml 68 | cpuTemp: /path/to/temp/file 69 | ``` 70 | To see where this information might be, you can try running the following command: 71 | ```bash 72 | for dir in /sys/class/hwmon/hwmon*; do 73 | echo "Directory: $dir" 74 | for file in $dir/temp*_input; do 75 | echo "Reading from: $file" 76 | cat $file 77 | done 78 | done 79 | ``` 80 | If no file is specified in the configuration, **the software will attempt to figure it out by running a very quick stress test** (a few seconds) while monitoring plausible files. You can find the result of this search in the application's logs. This method is of questionable reliability, specifying which file should be read is the preferred option. 81 | 82 | #### Logging level 83 | ```yaml 84 | loggingLevel: DEBUG 85 | ``` 86 | Accepted values are `DEBUG` `INFO` `WARN` and `ERROR`, it defaults to `INFO`. 87 | 88 | #### CORS 89 | You can specify these CORS headers: 90 | - Access-Control-Allow-Origin 91 | - Access-Control-Allow-Methods 92 | - Access-Control-Allow-Headers 93 | 94 | ```yaml 95 | cors: 96 | origin: "*" 97 | methods: "method, method" 98 | headers: "header-name, header-name" 99 | ``` 100 | 101 | 102 | ### ZFS 103 | If any of the mount points listed in the configuration are using ZFS, the application needs to be run as privileged in order to obtain the correct utilization of ZFS datasets. The command `zfs list` is being used to obtain the correct information, as conventional disk reading methods do not seem to work. 104 | 105 | If you are comfortable with running the container as privileged, follow these steps: 106 | - Unraid Docker Tab 107 | - `unraid-simple-monitoring-api` > Edit 108 | - Change `Privileged:` to `ON` 109 | - Apply 110 | 111 | You can always decide to turn `Privileged:` back to `OFF`. 112 | > [!TIP] 113 | > If you are not using ZFS, there is no reason to run the container as privileged. 114 | 115 | ### Calling the API 116 | Make a request to 117 | ``` 118 | http://your-unraid-ip:24940 119 | ``` 120 |
121 | Click to view an example JSON response 122 | 123 | ```json 124 | { 125 | "array":[ 126 | { 127 | "mount":"/mnt/disk1", 128 | "total":3724, 129 | "used":1864, 130 | "free":1860, 131 | "used_percent":50.05, 132 | "free_percent":49.95, 133 | "temp":32, 134 | "disk_id":"WDC_WD40EFPX-1234567_WD-WXC12345678A", 135 | "is_spinning":true 136 | }, 137 | { 138 | "mount":"/mnt/disk2", 139 | "total":3724, 140 | "used":1366, 141 | "free":2358, 142 | "used_percent":36.68, 143 | "free_percent":63.32, 144 | "temp":34, 145 | "disk_id":"WDC_WD40EFPX-1234567_WD-WXC12345678B", 146 | "is_spinning":true 147 | }, 148 | { 149 | "mount":"/mnt/disk3", 150 | "total":931, 151 | "used":7, 152 | "free":924, 153 | "used_percent":0.75, 154 | "free_percent":99.25, 155 | "temp":0, 156 | "disk_id":"WDC_WD40EFPX-1234567_WD-WXC12345678C", 157 | "is_spinning":false 158 | } 159 | ], 160 | "cache":[ 161 | { 162 | "mount":"/mnt/cache", 163 | "total":465, 164 | "used":210, 165 | "free":255, 166 | "used_percent":45.16, 167 | "free_percent":54.84, 168 | "temp":37, 169 | "disk_id":"Samsung_SSD_870_EVO_1TB_S123456789", 170 | "is_spinning":true 171 | } 172 | ], 173 | "pools": [ 174 | { 175 | "name": "poolname", 176 | "total": { 177 | "mount": "/mnt/disk*", 178 | "total": 6517, 179 | "used": 5478, 180 | "free": 1039, 181 | "used_percent": 84.06, 182 | "free_percent": 15.94, 183 | "temp": 0, 184 | "disk_id": "WDC_WD40EFPX-1234567_WD-WXC12345678C WDC_WD40EFPX-1234567_WD-WXC12345678C", 185 | "is_spinning": false 186 | }, 187 | "disks": [ 188 | { 189 | "mount": "/mnt/disk1", 190 | "total": 3724, 191 | "used": 3262, 192 | "free": 462, 193 | "used_percent": 87.59, 194 | "free_percent": 12.41, 195 | "temp": 0, 196 | "disk_id": "WDC_WD40EFPX-1234567_WD-WXC12345678C", 197 | "is_spinning": false 198 | }, 199 | { 200 | "mount": "/mnt/disk5", 201 | "total": 2793, 202 | "used": 2216, 203 | "free": 577, 204 | "used_percent": 79.34, 205 | "free_percent": 20.66, 206 | "temp": 0, 207 | "disk_id": "WDC_WD40EFPX-1234567_WD-WXC12345678C", 208 | "is_spinning": false 209 | } 210 | ] 211 | }], 212 | "parity":[ 213 | { 214 | "name":"parity", 215 | "temp":31, 216 | "disk_id":"WDC_WD40EFPX-1234567_WD-WXC12345678D", 217 | "is_spinning":true 218 | }, 219 | { 220 | "name":"parity2", 221 | "temp":0, 222 | "disk_id":"", 223 | "is_spinning":false 224 | } 225 | ], 226 | "network":[ 227 | { 228 | "interface":"docker0", 229 | "rx_MiBs":0, 230 | "tx_MiBs":0, 231 | "rx_Mbps":0, 232 | "tx_Mbps":0 233 | }, 234 | { 235 | "interface":"eth0", 236 | "rx_MiBs":0.02, 237 | "tx_MiBs":5.22, 238 | "rx_Mbps":0.13, 239 | "tx_Mbps":43.8 240 | } 241 | ], 242 | "array_total":{ 243 | "mount":"/mnt/disk*", 244 | "total":13034, 245 | "used":3342, 246 | "free":9692, 247 | "used_percent":25.64, 248 | "free_percent":74.36, 249 | "disk_id":"WDC_WD40EFPX-1234567_WD-WXC12345678A WDC_WD40EFPX-1234567_WD-WXC12345678B WDC_WD40EFPX-1234567_WD-WXC12345678C" 250 | }, 251 | "cache_total":{ 252 | "mount":"/mnt/cache", 253 | "total":465, 254 | "used":210, 255 | "free":255, 256 | "used_percent":45.16, 257 | "free_percent":54.84, 258 | "disk_id":"Samsung_SSD_870_EVO_1TB_S123456789" 259 | }, 260 | "network_total":{ 261 | "interface":"docker0 eth0", 262 | "rx_MiBs":0.02, 263 | "tx_MiBs":5.22, 264 | "rx_Mbps":0.13, 265 | "tx_Mbps":43.8 266 | }, 267 | "cpu":{ 268 | "load_percent":10.6, 269 | "temp":41 270 | }, 271 | "cores": [ 272 | { 273 | "name": "cpu0", 274 | "load_percent": 6.55 275 | }, 276 | { 277 | "name": "cpu1", 278 | "load_percent": 8.55 279 | }, 280 | { 281 | "name": "cpu2", 282 | "load_percent": 4.94 283 | }, 284 | { 285 | "name": "cpu3", 286 | "load_percent": 8.37 287 | } 288 | ], 289 | "memory":{ 290 | "total":15788, 291 | "used":1288, 292 | "free":14500, 293 | "used_percent":8.16, 294 | "free_percent":91.84 295 | }, 296 | "error":null 297 | } 298 | ``` 299 | 300 |
301 | 302 | ## Integration with Homepage 303 | ![image](https://github.com/NebN/unraid-simple-monitoring-api/assets/57036949/0175ffbd-fe84-494c-a29f-264f09aae6f3) 304 | ### Homepage configuration 305 | Check out [Hompage's official custom API widget documentation](https://gethomepage.dev/widgets/services/customapi/). 306 | Your homepage `services.yaml` should look like this if you want it to look like the above example, showing cache and network data. 307 | 308 | ```yaml 309 | - Category: 310 | - Unraid: 311 | icon: unraid.png 312 | href: http:// 313 | widget: 314 | type: customapi 315 | url: http://:24940 316 | method: GET # this doesn't matter 317 | mappings: 318 | - field: 319 | cache_total: free 320 | label: cache free 321 | format: number 322 | suffix: GiB 323 | - field: 324 | cache_total: free_percent 325 | label: percent 326 | format: percent 327 | - field: 328 | network_total: rx_MiBs 329 | label: rx 330 | format: float 331 | suffix: MiB/s 332 | - field: 333 | network_total: tx_MiBs 334 | label: tx 335 | format: float 336 | suffix: MiB/s 337 | ``` 338 | 339 | #### Available fields 340 | ##### Array Total 341 | ```yaml 342 | - field: 343 | array_total: free # or used, total, used_percent, free_percent, temp, mount, disk_id, is_spinning 344 | label: your label 345 | format: number # or percentage 346 | suffix: GiB # or nothing in case of percentages, or whatver you prefer 347 | ``` 348 |
349 | 350 | ##### Cache Total 351 | ```yaml 352 | - field: 353 | cache_total: free # or used, total, used_percent, free_percent, temp, mount, disk_id, is_spinning 354 | label: your label 355 | format: number # or percentage 356 | suffix: GiB # or nothing in case of percentages, or whatver you prefer 357 | ``` 358 |
359 | 360 | ##### Specific Disk 361 | ```yaml 362 | - field: 363 | array: # or cache 364 | 0: free 365 | # '0' is the index of the disk, 0 = the first 366 | # 'free' is the field you wish to read 367 | label: your label 368 | format: number 369 | suffix: GiB 370 | ``` 371 |
372 | 373 | ##### Custom pool 374 | ```yaml 375 | - field: 376 | pools: 377 | 0: 378 | total: free 379 | # '0' is the index of the pool, 0 = the first 380 | # 'free' is the field you wish to read 381 | label: your label 382 | format: number 383 | suffix: GiB 384 | ``` 385 |
386 | 387 | ##### Specific disk in custom pool 388 | ```yaml 389 | - field: 390 | pools: 391 | 0: # '0' is the index of the pool, 0 = the first 392 | disks: # reading 'disks' list 393 | 0: free # '0' is the index of the disk in the list 394 | label: your label 395 | format: number 396 | suffix: GiB 397 | ``` 398 |
399 | 400 | ##### Parity 401 | ```yaml 402 | - field: 403 | parity: 404 | 0: temp 405 | # '0' is the index of the parity disk, 0 = the first 406 | # 'temp' is the field you wish to read 407 | label: your label 408 | format: number 409 | suffix: ° 410 | ``` 411 |
412 | 413 | ##### Network Total 414 | ```yaml 415 | - field: 416 | network_total: rx_MiBs # or tx_MiBs, rx_Mbps, tx_Mbps 417 | label: your label 418 | format: float # or 'number' to round to the nearest integer 419 | suffix: MiB/s # or Mbps, or whatever you prefer 420 | ``` 421 |
422 | 423 | ##### Specific Network 424 | ```yaml 425 | - field: 426 | network: 427 | 0: rx_MiBs 428 | # '0' is the index of the network, 0 = the first 429 | # 'rx_MiBs' is the field you wish to read 430 | label: your label 431 | format: float 432 | suffix: MiB/s 433 | ``` 434 |
435 | 436 | ##### CPU 437 | ```yaml 438 | - field: 439 | cpu: load_percent # or temp 440 | label: your label 441 | format: percent # or number 442 | ``` 443 | 444 |
445 | 446 | ##### Cores 447 | ```yaml 448 | - field: 449 | cores: 450 | 0: load_percent 451 | label: cpu0 452 | format: percent 453 | ``` 454 | 455 |
456 | 457 | ##### Memory 458 | ```yaml 459 | - field: 460 | memory: used_percent # or free_percent, total, used, free 461 | label: your label 462 | format: percent 463 | ``` 464 |
465 | 466 | > [!TIP] 467 | > If you wish to show more than the usual 4 allowed fields, there are two solutions: 468 | > - you can set the widget property `display: list` to have the fields displayed in a vertical list that can be arbitrarily long 469 | > ```yaml 470 | > widget: 471 | > type: customapi 472 | > url: http://:24940 473 | > display: list 474 | > mappings: 475 | > ... 476 | > ``` 477 | > ![image](https://github.com/NebN/unraid-simple-monitoring-api/assets/57036949/ed4b694c-ac76-4516-a722-573510e0271c) 478 | > 479 | >
480 | > 481 | > - instead of `widget` you can use `widgets` and specify a list of widgets, each one is able to display up to 4 fields 482 | > ```yaml 483 | > widgets: 484 | > - type: customapi 485 | > url: http://:24940 486 | > method: GET 487 | > mappings: 488 | > ... 489 | > 490 | > - type: customapi 491 | > url: http://:24940 492 | > method: GET 493 | > mappings: 494 | > ... 495 | > ``` 496 | > ![image](https://github.com/user-attachments/assets/f0ae80ab-2884-4bca-90cb-e52c94baa891) 497 | > 498 | >
499 | > 500 | > You can also combine the two: 501 | > ![image](https://github.com/user-attachments/assets/209db47f-9d96-47f2-9e1d-89fd74f0d93a) 502 | 503 | 504 | 505 | 506 | 507 | 508 | ## How reliable are the measurements? 509 | The goal of this API is to be simple, fast, and lightweight. 510 | For these reasons, the measurements provided are not as accurate as they could be. 511 | 512 | ### Disk 513 | Disk utilization is rounded down to the nearest GiB. 514 | 515 | ### Network and CPU 516 | Both Network and CPU usage need to be measured for some time interval. Typically, to get an accurate measurement, you would monitor these for a few seconds before providing a response. 517 | To avoid having to either: 518 | - wait for the measurement to be completed before responding 519 | - continuosly measure them to have a recent measurement ready to respond with 520 | 521 | A different approach has been taken: a snapshot of Network and CPU usage is taken every time the API is called, and the response is the average Network and CPU usage between the current and last API call. 522 | This ensures that the response is quick and reasonably accurate, without having the process continuously read Network and CPU data even when not required. 523 | 524 | ## Installing a QA build 525 | Everyone's Unraid setup is different, therefore, when implementing a new feature or fixing a bug specific to a certain setup, it might be necessary that the end user (you) install a testing deployment to verify that everything works as expected. 526 | 527 | To do so follow these steps: 528 | - Unraid Docker Tab 529 | - `unraid-simple-monitoring-api` > Stop 530 | - Add container 531 | - Template > `unraid-simple-monitoring-api` 532 | - Change the name to something else, e.g.: `unraid-simple-monitoring-api-QA` 533 | - Change `Repository:` to `ghcr.io/nebn/unraid-simple-monitoring-api:qa` (The actual tag might change, currently using `qa`) 534 | - Apply 535 | 536 | You should now have 2 installations on your Docker Tab, and can switch between them by stopping/starting them. 537 | 538 | > [!NOTE] 539 | > Avoid having both active at the same time, as they share the same port and would therefore be unable to start the HTTP service. 540 | 541 | > [!WARNING] 542 | > It is a good idea to switch back to the official build as soon as whatever fix you were testing is deployed to it. QA builds are unstable and are likely to not work correctly if you update them further. 543 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NebN/unraid-simple-monitoring-api/75a7f486fa5114b918fcf78bf81086f363520b43/assets/logo.png -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "log/slog" 7 | "net/http" 8 | "os" 9 | "reflect" 10 | "strings" 11 | 12 | "github.com/NebN/unraid-simple-monitoring-api/internal/monitor" 13 | "gopkg.in/yaml.v3" 14 | ) 15 | 16 | const PORT = "24940" 17 | 18 | func main() { 19 | slog.SetLogLoggerLevel(slog.LevelDebug) 20 | mux := http.NewServeMux() 21 | confPath := os.Getenv("CONF_PATH") 22 | conf, err := readConf(confPath) 23 | if err != nil { 24 | switch err := err.(type) { 25 | case *os.PathError: 26 | defaultInfo := "Default location is /mnt/user/appdata/unraid-simple-monitoring-api/conf.yml. " + 27 | "More info @ https://github.com/NebN/unraid-simple-monitoring-api" 28 | 29 | if strings.Contains(err.Error(), "is a directory") { 30 | slog.Error("Configuration file has been created as a directory. " + 31 | "Please delete it and create a configuration file in its place. " + defaultInfo) 32 | } else { 33 | slog.Error("Configuration file not found. Please create it. " + defaultInfo) 34 | } 35 | 36 | case *yaml.TypeError: 37 | slog.Error("Configuration file is malformed", "error", err.Error()) 38 | 39 | default: 40 | slog.Error("Unable to read configuration file", "error", err.Error(), "type", reflect.TypeOf(err)) 41 | } 42 | return 43 | } else { 44 | var loggingLevel slog.Level 45 | loggingLevel.UnmarshalText([]byte(conf.LoggingLevel)) 46 | slog.SetLogLoggerLevel(loggingLevel) 47 | slog.Info("Logging", slog.String("level", loggingLevel.Level().String())) 48 | slog.Debug("Configuration", "conf", conf) 49 | } 50 | 51 | rootHandler := NewHandler(conf) 52 | mux.Handle("/", &rootHandler) 53 | 54 | slog.Info(fmt.Sprintf("API running on port %s ...", PORT)) 55 | err = http.ListenAndServe(fmt.Sprintf(":%s", "24940"), mux) 56 | if err != nil { 57 | slog.Error("Cannot start API", slog.String("error", err.Error())) 58 | } 59 | } 60 | 61 | type Conf struct { 62 | Networks []string `yaml:"networks"` 63 | Disks map[string][]string `yaml:"disks"` 64 | LoggingLevel string `yaml:"loggingLevel"` 65 | CpuTemp *string `yaml:"cpuTemp"` 66 | Include []string `yaml:"include"` 67 | Exclude []string `yaml:"exclude"` 68 | Cors *Cors `yaml:"cors"` 69 | } 70 | 71 | type Cors struct { 72 | Origin string `yaml:"origin"` 73 | Methods string `yaml:"methods"` 74 | Headers string `yaml:"headers"` 75 | } 76 | 77 | func readConf(path string) (Conf, error) { 78 | conf := Conf{} 79 | content, err := os.ReadFile(path) 80 | if err != nil { 81 | return conf, err 82 | } 83 | 84 | err = yaml.Unmarshal(content, &conf) 85 | if err != nil { 86 | return conf, err 87 | } 88 | 89 | return conf, nil 90 | } 91 | 92 | type handler struct { 93 | NetworkMonitor monitor.NetworkMonitor 94 | DiskMonitor monitor.DiskMonitor 95 | CpuMonitor monitor.CpuMonitor 96 | MemoryMonitor monitor.MemoryMonitor 97 | Cors *Cors 98 | } 99 | 100 | func NewHandler(conf Conf) (handler handler) { 101 | handler.DiskMonitor = monitor.NewDiskMonitor(conf.Disks) 102 | handler.NetworkMonitor = monitor.NewNetworkMonitor(conf.Networks) 103 | handler.CpuMonitor = monitor.NewCpuMonitor(conf.CpuTemp) 104 | handler.Cors = conf.Cors 105 | return 106 | } 107 | 108 | func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 109 | 110 | slog.Debug("Request received", slog.String("request", fmt.Sprintf("%+v\n", r))) 111 | 112 | diskUsage := h.DiskMonitor.ComputeDiskUsage() 113 | network := h.NetworkMonitor.ComputeNetworkRate() 114 | cacheTotal := monitor.AggregateDiskStatuses(diskUsage.Cache) 115 | arrayTotal := monitor.AggregateDiskStatuses(diskUsage.Array) 116 | networkTotal := monitor.AggregateNetworkRates(network) 117 | cpu, cores := h.CpuMonitor.ComputeCpuStatus() 118 | memory := h.MemoryMonitor.ComputeMemoryUsage() 119 | 120 | response := Report{ 121 | Cache: diskUsage.Cache, 122 | Array: diskUsage.Array, 123 | Pools: diskUsage.Pools, 124 | Parity: diskUsage.Party, 125 | Network: network, 126 | ArrayTotal: arrayTotal, 127 | CacheTotal: cacheTotal, 128 | NetworkTotal: networkTotal, 129 | Cpu: cpu, 130 | Cores: cores, 131 | Memory: memory, 132 | Error: nil, 133 | } 134 | 135 | w.Header().Set("Content-Type", "application/json") 136 | if h.Cors != nil { 137 | w.Header().Set("Access-Control-Allow-Origin", h.Cors.Origin) 138 | w.Header().Set("Access-Control-Allow-Methods", h.Cors.Methods) 139 | w.Header().Set("Access-Control-Allow-Headers", h.Cors.Headers) 140 | } 141 | responseJson, err := json.Marshal(response) 142 | if err != nil { 143 | slog.Error("Error trying to respond to API call", 144 | slog.String("error", err.Error()), 145 | slog.String("attempting to marshal", fmt.Sprintf("%+v\n", response))) 146 | errorResponse, _ := json.Marshal(newErrorReport(err.Error())) 147 | w.Write([]byte(errorResponse)) 148 | } else { 149 | slog.Debug("Responding to request", "response", responseJson) 150 | w.Write([]byte(responseJson)) 151 | } 152 | } 153 | 154 | type Report struct { 155 | Array []monitor.DiskStatus `json:"array"` 156 | Cache []monitor.DiskStatus `json:"cache"` 157 | Pools []monitor.PoolStatus `json:"pools"` 158 | Parity []monitor.ParityStatus `json:"parity"` 159 | Network []monitor.NetworkRate `json:"network"` 160 | ArrayTotal monitor.DiskStatus `json:"array_total"` 161 | CacheTotal monitor.DiskStatus `json:"cache_total"` 162 | NetworkTotal monitor.NetworkRate `json:"network_total"` 163 | Cpu monitor.CpuStatus `json:"cpu"` 164 | Cores []monitor.CoreStatus `json:"cores"` 165 | Memory monitor.MemoryStatus `json:"memory"` 166 | Error *string `json:"error"` 167 | } 168 | 169 | func newErrorReport(err string) (report Report) { 170 | 171 | report.Array = make([]monitor.DiskStatus, 0) 172 | report.Cache = make([]monitor.DiskStatus, 0) 173 | report.Network = make([]monitor.NetworkRate, 0) 174 | 175 | report.Error = &err 176 | 177 | return 178 | } 179 | -------------------------------------------------------------------------------- /conf/conf.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | - eth0 3 | disks: 4 | cache: 5 | - /mnt/cache 6 | array: 7 | - /mnt/disk1 8 | - /mnt/disk2 9 | - /mnt/disk3 10 | - /mnt/disk4 11 | - /mnt/disk5 12 | - /mnt/disk6 13 | - /mnt/disk7 14 | - /mnt/disk8 15 | - /mnt/disk9 16 | - /mnt/disk10 -------------------------------------------------------------------------------- /deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as build 2 | 3 | RUN apk update && apk upgrade 4 | 5 | WORKDIR /app 6 | 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | 10 | COPY cmd ./cmd 11 | COPY internal ./internal 12 | 13 | RUN CGO_ENABLED=0 GOOS=linux go build -o /unraid-simple-monitoring-api ./cmd 14 | 15 | 16 | FROM alpine as run 17 | 18 | RUN apk update && apk upgrade && apk add --no-cache zfs 19 | 20 | COPY --from=build /unraid-simple-monitoring-api . 21 | 22 | COPY deploy/entrypoint.sh /entrypoint.sh 23 | RUN chmod +x /entrypoint.sh 24 | 25 | ARG HOSTFS_PREFIX 26 | 27 | ENV CONF_PATH=/app/conf.yml 28 | ENV HOSTFS_PREFIX=${HOSTFS_PREFIX} 29 | 30 | ENTRYPOINT [ "/entrypoint.sh" ] 31 | 32 | CMD ["/unraid-simple-monitoring-api"] 33 | -------------------------------------------------------------------------------- /deploy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | 5 | api: 6 | build: 7 | context: .. 8 | args: 9 | HOSTFS_PREFIX: ${HOSTFS_PREFIX} 10 | dockerfile: deploy/Dockerfile 11 | no_cache: false 12 | # private repo 13 | #image: docker.local.nebn.dev/unraid-simple-monitoring-api:latest 14 | image: nebn/unraid-simple-monitoring-api:latest 15 | network_mode: host 16 | ports: 17 | - "24940:24940" 18 | volumes: 19 | - $HOST_CONF_PATH:/app/conf.yml 20 | restart: unless-stopped 21 | -------------------------------------------------------------------------------- /deploy/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # zfs list will not work if the container is being run as unprivileged 4 | if zfs list > /dev/null 2>&1; then 5 | export ZFS_OK="true" 6 | else 7 | export ZFS_OK="false" 8 | fi 9 | 10 | echo "ZFS_OK=${ZFS_OK}" >> /etc/environment 11 | 12 | # Source the /etc/environment 13 | . /etc/environment 14 | 15 | exec "$@" 16 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/NebN/unraid-simple-monitoring-api 2 | 3 | go 1.22.0 4 | 5 | require ( 6 | github.com/shirou/gopsutil v3.21.11+incompatible 7 | gopkg.in/yaml.v3 v3.0.1 8 | ) 9 | 10 | require ( 11 | github.com/go-ole/go-ole v1.2.6 // indirect 12 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 13 | golang.org/x/sys v0.18.0 // indirect 14 | gopkg.in/ini.v1 v1.67.0 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= 2 | github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= 3 | github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= 4 | github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= 5 | github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= 6 | github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= 7 | golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 8 | golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= 9 | golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 10 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 11 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 12 | gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= 13 | gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= 14 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 15 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 16 | -------------------------------------------------------------------------------- /internal/monitor/cpumonitor.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "log/slog" 8 | "os" 9 | "path/filepath" 10 | "runtime" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/NebN/unraid-simple-monitoring-api/internal/util" 17 | ) 18 | 19 | type CoreStatus struct { 20 | Name string `json:"name"` 21 | LoadPercent float64 `json:"load_percent"` 22 | } 23 | 24 | type CpuStatus struct { 25 | LoadPercent float64 `json:"load_percent"` 26 | Temp int `json:"temp"` 27 | } 28 | 29 | type CpuSnapshot struct { 30 | name string 31 | idle uint64 32 | total uint64 33 | } 34 | 35 | type CpuMonitor struct { 36 | snapshot CpuSnapshot 37 | coresSnapshots []CpuSnapshot 38 | mu sync.Mutex 39 | cpuTempPath *string 40 | } 41 | 42 | func NewCpuMonitor(cpuTempPath *string) (cm CpuMonitor) { 43 | cm.cpuTempPath = cpuTempPath 44 | cm.snapshot, cm.coresSnapshots = newCpuSnapshot() 45 | cm.cpuTempPath = locateCpuTempFile(cpuTempPath) 46 | return 47 | } 48 | 49 | func (m *CpuMonitor) ComputeCpuStatus() (status CpuStatus, cores []CoreStatus) { 50 | m.mu.Lock() 51 | defer m.mu.Unlock() 52 | 53 | snapshot, coresSnapshots := newCpuSnapshot() 54 | oldSnapshot := m.snapshot 55 | oldCoreSnapshots := m.coresSnapshots 56 | 57 | status.LoadPercent = computeLoad(oldSnapshot, snapshot) 58 | status.Temp = m.temp() 59 | 60 | for i, coreSnapshot := range coresSnapshots { 61 | coreStatus := CoreStatus{ 62 | Name: coreSnapshot.name, 63 | LoadPercent: computeLoad(oldCoreSnapshots[i], coreSnapshot), 64 | } 65 | cores = append(cores, coreStatus) 66 | } 67 | 68 | m.snapshot = snapshot 69 | m.coresSnapshots = coresSnapshots 70 | 71 | slog.Debug("CPU status computed", "status", status) 72 | return 73 | } 74 | 75 | func computeLoad(a CpuSnapshot, b CpuSnapshot) float64 { 76 | deltaIdle := b.idle - a.idle 77 | deltaTotal := b.total - a.total 78 | 79 | slog.Debug("CPU snapshot delta", "idle", deltaIdle, "total", deltaTotal) 80 | loadPercent := 0.0 81 | if deltaTotal > 0 { 82 | loadPercent = (1 - (float64(deltaIdle) / float64(deltaTotal))) * 100 83 | } else { 84 | slog.Warn("CPU delta between snapshots' total values is 0, cpu load percent will be returned as 0") 85 | } 86 | 87 | return util.RoundTwoDecimals(loadPercent) 88 | } 89 | 90 | func newCpuSnapshot() (cpu CpuSnapshot, cores []CpuSnapshot) { 91 | stat, err := os.Open("/proc/stat") 92 | if err != nil { 93 | slog.Error("CPU Cannot read data", slog.String("error", err.Error())) 94 | } 95 | defer stat.Close() 96 | 97 | scanner := bufio.NewScanner(stat) 98 | 99 | for hasNext := scanner.Scan(); hasNext; hasNext = scanner.Scan() { 100 | line := scanner.Text() 101 | slog.Debug("CPU", "line", line) 102 | fields := strings.Fields(line) 103 | name := fields[0] 104 | if !strings.Contains(name, ("cpu")) { 105 | continue 106 | } 107 | 108 | if name == "cpu" { 109 | cpu = parseCpuStatLine(fields) 110 | cpu.name = name 111 | } else { 112 | core := parseCpuStatLine(fields) 113 | core.name = name 114 | cores = append(cores, core) 115 | } 116 | 117 | } 118 | 119 | return 120 | } 121 | 122 | func parseCpuStatLine(items []string) (snapshot CpuSnapshot) { 123 | 124 | var sum uint64 = 0 125 | for i, item := range items[1:] { 126 | parsed, err := strconv.ParseUint(item, 10, 64) 127 | if err != nil { 128 | slog.Error("CPU cannot parse cpu data from /proc/stat", 129 | slog.String("trying to parse", item), 130 | slog.String("error", err.Error())) 131 | } 132 | sum += parsed 133 | slog.Debug("CPU parsed", "value", parsed) 134 | if i == 3 { 135 | slog.Debug("CPU idle value found", "idle", parsed) 136 | snapshot.idle = parsed 137 | } 138 | } 139 | 140 | snapshot.total = sum 141 | slog.Debug("CPU", "snapshot", snapshot) 142 | 143 | return 144 | } 145 | 146 | func (monitor *CpuMonitor) temp() int { 147 | if monitor.cpuTempPath == nil { 148 | return 0 149 | } 150 | 151 | temp, err := readCpuTemp(*monitor.cpuTempPath) 152 | if err != nil { 153 | slog.Error("CPU error while reading temperature.", slog.String("error", err.Error())) 154 | return 0 155 | } 156 | 157 | return temp 158 | } 159 | 160 | func locateCpuTempFile(cpuTempPath *string) *string { 161 | if cpuTempPath != nil { 162 | return cpuTempPath 163 | } 164 | 165 | slog.Info("CPU temperature file not defined, attempting to locate it. " + 166 | "It can be specified in the configuration file. \"cpuTemp: /path/to/file\"") 167 | 168 | possiblePatterns := []string{ 169 | // "/sys/class/thermal/thermal_zone*/temp", unsure if checking this makes sense 170 | "/sys/class/hwmon/hwmon*/temp1_input", 171 | } 172 | 173 | type cpuFileGuess struct { 174 | path string 175 | initialTemp int 176 | finalTemp int 177 | delta int 178 | } 179 | 180 | var guesses = make([]cpuFileGuess, 0) 181 | 182 | for _, pattern := range possiblePatterns { 183 | files, err := filepath.Glob(pattern) 184 | if err != nil { 185 | slog.Error("CPU Unable to read files", slog.String("pattern", pattern), slog.String("error", err.Error())) 186 | } 187 | 188 | for _, file := range files { 189 | cpuTemp, err := readCpuTemp(file) 190 | if err != nil { 191 | slog.Warn(err.Error()) 192 | } else { 193 | guesses = append(guesses, cpuFileGuess{ 194 | path: file, 195 | initialTemp: cpuTemp, 196 | }) 197 | } 198 | } 199 | } 200 | 201 | stressCPU(5 * time.Second) 202 | 203 | for i, guess := range guesses { 204 | newTemp, err := readCpuTemp(guess.path) 205 | if err != nil { 206 | slog.Warn(err.Error()) 207 | } else { 208 | guessAtIndex := &guesses[i] 209 | guessAtIndex.finalTemp = newTemp 210 | guessAtIndex.delta = newTemp - guessAtIndex.initialTemp 211 | } 212 | } 213 | 214 | var bestGuess cpuFileGuess = cpuFileGuess{ 215 | delta: 0, 216 | } 217 | for _, guess := range guesses { 218 | if guess.delta > bestGuess.delta { 219 | bestGuess = guess 220 | } 221 | } 222 | 223 | if bestGuess.path != "" { 224 | slog.Info("Best guess for CPU temperature file", "path", bestGuess.path, 225 | "initial temp", bestGuess.initialTemp, 226 | "final temp", bestGuess.finalTemp) 227 | return &bestGuess.path 228 | } else { 229 | slog.Warn("Was unable to find a suitable CPU temperature file") 230 | if slog.Default().Enabled(context.Background(), slog.LevelDebug) { 231 | slog.Debug("Guesses:") 232 | for _, guess := range guesses { 233 | slog.Debug(fmt.Sprintf("%v", guess)) 234 | } 235 | } 236 | return nil 237 | } 238 | } 239 | 240 | func readCpuTemp(path string) (int, error) { 241 | stat, err := os.Open(path) 242 | if err != nil { 243 | return 0, err 244 | } 245 | defer stat.Close() 246 | 247 | scanner := bufio.NewScanner(stat) 248 | if !scanner.Scan() { 249 | return 0, fmt.Errorf("unable to read file for CPU temp. path=%s", path) 250 | } 251 | 252 | firstLine := scanner.Text() 253 | slog.Debug("CPU", "temp line", firstLine) 254 | parsed, err := strconv.Atoi(firstLine) 255 | if err != nil { 256 | return 0, fmt.Errorf("unable to parse CPU temp data. string=%s, error=%s", firstLine, err.Error()) 257 | } 258 | 259 | return parsed / 1000, nil 260 | } 261 | 262 | func stressCPU(duration time.Duration) { 263 | slog.Info("Running a very quick CPU stress test to attempt to locate the temperature file.", 264 | "duration", duration) 265 | 266 | stress := func(wg *sync.WaitGroup) { 267 | defer wg.Done() 268 | end := time.Now().Add(duration) 269 | for time.Now().Before(end) { 270 | for i := 0; i < 100000; i++ { 271 | _ = i * i 272 | } 273 | } 274 | } 275 | 276 | var wg sync.WaitGroup 277 | cpus := runtime.NumCPU() 278 | runtime.GOMAXPROCS(cpus) 279 | wg.Add(cpus) 280 | 281 | for range cpus { 282 | go stress(&wg) 283 | } 284 | 285 | wg.Wait() 286 | } 287 | -------------------------------------------------------------------------------- /internal/monitor/diskmonitor.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "bufio" 5 | "os" 6 | "os/exec" 7 | "path/filepath" 8 | "sort" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | 13 | "log/slog" 14 | 15 | "github.com/NebN/unraid-simple-monitoring-api/internal/util" 16 | "github.com/shirou/gopsutil/disk" 17 | "gopkg.in/ini.v1" 18 | ) 19 | 20 | const parityLabel = "parity" 21 | 22 | type Pool struct { 23 | Name string `yaml:"name"` 24 | Mounts []string `yaml:"mounts"` 25 | } 26 | 27 | type DiskStatus struct { 28 | Name string `json:"-"` 29 | Path string `json:"mount"` 30 | Total uint64 `json:"total"` 31 | Used uint64 `json:"used"` 32 | Free uint64 `json:"free"` 33 | UsedPercent float64 `json:"used_percent"` 34 | FreePercent float64 `json:"free_percent"` 35 | Temp uint64 `json:"temp"` 36 | Id string `json:"disk_id"` 37 | IsSpinning bool `json:"is_spinning"` 38 | } 39 | 40 | type ParityStatus struct { 41 | Name string `json:"name"` 42 | Temp uint64 `json:"temp"` 43 | Id string `json:"disk_id"` 44 | IsSpinning bool `json:"is_spinning"` 45 | } 46 | 47 | type PoolStatus struct { 48 | Name string `json:"name"` 49 | Total DiskStatus `json:"total"` 50 | Disks []DiskStatus `json:"disks"` 51 | } 52 | 53 | type DiskIni struct { 54 | Id string 55 | Temp uint64 56 | Spundown bool 57 | } 58 | 59 | type DiskMonitor struct { 60 | pools []Pool 61 | checkZfs bool 62 | } 63 | 64 | type ZfsDataset struct { 65 | Name string 66 | Used string 67 | Avail string 68 | Refer string 69 | Mountpoint string 70 | } 71 | 72 | func NewDiskMonitor(disks map[string][]string) (dm DiskMonitor) { 73 | pools := make([]Pool, 0, len(disks)) 74 | for name, mounts := range disks { 75 | pools = append(pools, Pool{ 76 | Name: name, 77 | Mounts: mounts, 78 | }) 79 | } 80 | dm.pools = pools 81 | 82 | checkZfsString := os.Getenv("ZFS_OK") 83 | checkZfsBool, err := strconv.ParseBool(checkZfsString) 84 | 85 | if err != nil { 86 | slog.Error("Disk unable to parse env variable as bool", 87 | slog.String("variable name", "ZFS_OK"), 88 | slog.String("variable value", checkZfsString)) 89 | } 90 | 91 | dm.checkZfs = checkZfsBool 92 | 93 | if checkZfsBool { 94 | slog.Info("Running in privileged mode. Will be able to check zfs datasets.") 95 | } else { 96 | slog.Info("Not running in privileged mode. Will not be able to check zfs datasets.") 97 | } 98 | return 99 | } 100 | 101 | type DiskUsage struct { 102 | Array []DiskStatus 103 | Cache []DiskStatus 104 | Party []ParityStatus 105 | Pools []PoolStatus 106 | } 107 | 108 | func (monitor *DiskMonitor) ComputeDiskUsage() DiskUsage { 109 | diskIniMap := readDiskIni() 110 | 111 | var wg sync.WaitGroup 112 | 113 | zfsDatasets := monitor.readZfsDatasets() 114 | 115 | computeGroup := func(paths []string) []DiskStatus { 116 | diskChan := make(chan util.IndexedValue[DiskStatus], len(paths)) 117 | 118 | for i, path := range paths { 119 | dataset, exists := zfsDatasets[path] 120 | if exists { 121 | diskChan <- util.IndexedValue[DiskStatus]{ 122 | Index: i, 123 | Value: zfsDatasetUsage(dataset), 124 | } 125 | } else { 126 | wg.Add(1) 127 | go diskUsage(i, path, &wg, diskChan) 128 | } 129 | } 130 | 131 | wg.Wait() 132 | close(diskChan) 133 | 134 | disks := make([]DiskStatus, len(paths)) 135 | for disk := range diskChan { 136 | diskIni := diskIniMap[disk.Value.Name] 137 | 138 | disk.Value.Id = diskIni.Id 139 | disk.Value.Temp = diskIni.Temp 140 | disk.Value.IsSpinning = !diskIni.Spundown 141 | 142 | disks[disk.Index] = disk.Value 143 | } 144 | 145 | return disks 146 | } 147 | 148 | poolsStatus := make([]PoolStatus, 0, len(monitor.pools)) 149 | var array PoolStatus 150 | var cache PoolStatus 151 | 152 | for _, pool := range monitor.pools { 153 | disks := computeGroup(pool.Mounts) 154 | total := AggregateDiskStatuses(disks) 155 | status := PoolStatus{ 156 | Name: pool.Name, 157 | Total: total, 158 | Disks: disks, 159 | } 160 | if pool.Name == "array" { 161 | array = status 162 | } else if pool.Name == "cache" { 163 | cache = status 164 | } else { 165 | poolsStatus = append(poolsStatus, status) 166 | } 167 | } 168 | 169 | parity := make([]ParityStatus, 0) 170 | for name, diskIni := range diskIniMap { 171 | if strings.Contains(name, parityLabel) { 172 | parity = append(parity, ParityStatus{ 173 | Name: name, 174 | Temp: diskIni.Temp, 175 | Id: diskIni.Id, 176 | IsSpinning: !diskIni.Spundown, 177 | }) 178 | } 179 | } 180 | 181 | sort.Slice(parity, func(i, j int) bool { 182 | return parity[i].Name < parity[j].Name 183 | }) 184 | return DiskUsage{ 185 | Array: array.Disks, 186 | Cache: cache.Disks, 187 | Pools: poolsStatus, 188 | Party: parity, 189 | } 190 | } 191 | 192 | func diskUsage(index int, path string, wg *sync.WaitGroup, diskChan chan util.IndexedValue[DiskStatus]) { 193 | defer wg.Done() 194 | 195 | var pathToQuery = path 196 | 197 | var hostFsPrefix, isSet = os.LookupEnv("HOSTFS_PREFIX") 198 | if isSet { 199 | slog.Debug("Disk host prefix is set", "value", hostFsPrefix) 200 | pathToQuery = filepath.Join(hostFsPrefix, path) 201 | } 202 | slog.Debug("Disk reading usage", "path", pathToQuery, "original_path", path) 203 | usage, err := disk.Usage(pathToQuery) 204 | 205 | if err != nil { 206 | slog.Error("Disk cannot read", slog.String("path", path), slog.String("error", err.Error())) 207 | diskChan <- util.IndexedValue[DiskStatus]{Index: index, Value: DiskStatus{Path: path}} 208 | } else { 209 | total := util.BytesToGibiBytes(usage.Total) 210 | free := util.BytesToGibiBytes(usage.Free) 211 | used := total - free 212 | 213 | freePercent := 0.0 214 | usedPercent := 0.0 215 | 216 | if total > 0 { 217 | freePercent = util.RoundTwoDecimals((float64(free) / float64(total)) * 100) 218 | usedPercent = util.RoundTwoDecimals(100 - freePercent) 219 | } else { 220 | slog.Warn("Disk total size is 0, free/used percent will be returned as 0", slog.String("disk", path)) 221 | } 222 | 223 | status := DiskStatus{ 224 | Name: filepath.Base(path), 225 | Path: path, 226 | Total: total, 227 | Free: free, 228 | Used: used, 229 | FreePercent: freePercent, 230 | UsedPercent: usedPercent, 231 | } 232 | 233 | slog.Debug("Disk status computed", "index", index, "status", status) 234 | 235 | diskChan <- util.IndexedValue[DiskStatus]{Index: index, Value: status} 236 | } 237 | } 238 | 239 | func zfsDatasetUsage(dataset ZfsDataset) DiskStatus { 240 | usedBytes, _ := util.ParseZfsSize(dataset.Used) 241 | used := util.BytesToGibiBytes(usedBytes) 242 | freeBytes, _ := util.ParseZfsSize(dataset.Avail) 243 | free := util.BytesToGibiBytes(freeBytes) 244 | total := used + free 245 | 246 | freePercent := 0.0 247 | usedPercent := 0.0 248 | 249 | if total > 0 { 250 | freePercent = util.RoundTwoDecimals((float64(free) / float64(total)) * 100) 251 | usedPercent = util.RoundTwoDecimals(100 - freePercent) 252 | } else { 253 | slog.Warn("Disk ZFS dataset total size is 0, free/used percent will be returned as 0", slog.String("dataset", dataset.Mountpoint)) 254 | } 255 | 256 | status := DiskStatus{ 257 | Name: filepath.Base(dataset.Mountpoint), 258 | Path: dataset.Mountpoint, 259 | Total: total, 260 | Free: free, 261 | Used: used, 262 | FreePercent: freePercent, 263 | UsedPercent: usedPercent, 264 | } 265 | 266 | slog.Debug("Disk ZFS dataset status computed", "status", status) 267 | 268 | return status 269 | } 270 | 271 | func (monitor *DiskMonitor) readZfsDatasets() map[string]ZfsDataset { 272 | 273 | zfsDatasets := make(map[string]ZfsDataset) 274 | 275 | if !monitor.checkZfs { 276 | slog.Debug("Disk ZFS dataset checking is disabled") 277 | return zfsDatasets 278 | } else { 279 | slog.Debug("Disk ZFS dataset checking is enabled") 280 | } 281 | 282 | cmd := exec.Command("zfs", "list") 283 | stdout, err := cmd.StdoutPipe() 284 | if err != nil { 285 | slog.Error("Disk error while preparing command 'zfs list'", slog.String("error", err.Error())) 286 | return zfsDatasets 287 | } 288 | 289 | if err := cmd.Start(); err != nil { 290 | slog.Error("Disk error running command 'zfs list'", slog.String("error", err.Error())) 291 | return zfsDatasets 292 | } 293 | 294 | outScanner := bufio.NewScanner(stdout) 295 | 296 | outScanner.Scan() // skip header 297 | for outScanner.Scan() { 298 | fields := strings.Fields(outScanner.Text()) 299 | ds := ZfsDataset{ 300 | Name: fields[0], 301 | Used: fields[1], 302 | Avail: fields[2], 303 | Refer: fields[3], 304 | Mountpoint: fields[4], 305 | } 306 | slog.Debug("Disk ZFS dataset found", "dataset", ds) 307 | zfsDatasets[ds.Mountpoint] = ds 308 | } 309 | cmd.Wait() 310 | 311 | return zfsDatasets 312 | } 313 | 314 | func readDiskIni() map[string]DiskIni { 315 | pathToQuery := "/var/local/emhttp/disks.ini" 316 | 317 | var hostFsPrefix, isSet = os.LookupEnv("HOSTFS_PREFIX") 318 | if isSet { 319 | slog.Debug("Disk host prefix is set", "value", hostFsPrefix) 320 | pathToQuery = filepath.Join(hostFsPrefix, pathToQuery) 321 | } 322 | 323 | diskIniMap := make(map[string]DiskIni) 324 | 325 | disks, err := ini.Load(pathToQuery) 326 | if err != nil { 327 | slog.Error("Disk unable to read disks.ini", "error", slog.String("error", err.Error())) 328 | } 329 | 330 | for _, section := range disks.Sections() { 331 | if section.Name() == "DEFAULT" { 332 | continue 333 | } 334 | 335 | sectionName := strings.Trim(section.Name(), "\"") 336 | 337 | slog.Debug("Disk reading section", "section", sectionName) 338 | 339 | idKey := "id" 340 | idString, err := section.GetKey(idKey) 341 | if err != nil { 342 | slog.Error("Disk unable to read disks.ini section", "section", sectionName, "key", idKey, "error", slog.String("error", err.Error())) 343 | continue 344 | } 345 | 346 | var temp uint64 347 | tempKey := "temp" 348 | tempString, err := section.GetKey(tempKey) 349 | if err != nil { 350 | slog.Error("Disk unable to read disks.ini section", "section", sectionName, "key", tempKey, "error", slog.String("error", err.Error())) 351 | continue 352 | } 353 | 354 | if tempString.String() != "*" { 355 | temp, err = strconv.ParseUint(tempString.String(), 10, 16) 356 | if err != nil { 357 | slog.Error("Disk unable to parse disk temp", "string", tempString.String(), "error", slog.String("error", err.Error())) 358 | } 359 | } else { 360 | slog.Debug("Disk temp unavailable", "disk", sectionName) 361 | } 362 | 363 | spunDownKey := "spundown" 364 | spunDown, err := section.GetKey(spunDownKey) 365 | if err != nil { 366 | slog.Error("Disk unable to read disks.ini section", "section", sectionName, "key", spunDownKey, "error", slog.String("error", err.Error())) 367 | continue 368 | } 369 | 370 | diskIniMap[sectionName] = DiskIni{ 371 | Id: idString.String(), 372 | Temp: temp, 373 | Spundown: spunDown.String() == "1", 374 | } 375 | } 376 | 377 | return diskIniMap 378 | } 379 | 380 | func AggregateDiskStatuses(disks []DiskStatus) (status DiskStatus) { 381 | paths := make([]string, 0, len(disks)) 382 | temps := make([]float64, 0) 383 | ids := make([]string, 0, len(disks)) 384 | isSpinning := false 385 | 386 | for _, disk := range disks { 387 | paths = append(paths, disk.Path) 388 | ids = append(ids, disk.Id) 389 | isSpinning = isSpinning || disk.IsSpinning 390 | if disk.Temp > 0 { 391 | temps = append(temps, float64(disk.Temp)) 392 | } 393 | status.Total = status.Total + disk.Total 394 | status.Used = status.Used + disk.Used 395 | slog.Debug("Disk aggregating usage", "current_disk", disk, "running_total", status) 396 | } 397 | 398 | status.Free = status.Total - status.Used 399 | 400 | if status.Total > 0 { 401 | status.FreePercent = util.RoundTwoDecimals(float64(status.Free) / float64(status.Total) * 100) 402 | status.UsedPercent = util.RoundTwoDecimals(100 - status.FreePercent) 403 | } else { 404 | slog.Warn("Disk aggregation total space is 0, free/used percent will be returned as 0") 405 | } 406 | 407 | slog.Debug("Disk figuring out common base path", "paths", paths) 408 | status.Path = util.CommonBase(paths...) + "*" 409 | slog.Debug("Disk common base path", "path", status.Path) 410 | 411 | status.Temp = uint64(util.Average(temps)) 412 | status.IsSpinning = isSpinning 413 | status.Id = strings.Join(ids, " ") 414 | 415 | return 416 | } 417 | -------------------------------------------------------------------------------- /internal/monitor/memorymonitor.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "bufio" 5 | "log/slog" 6 | "os" 7 | "regexp" 8 | "strconv" 9 | 10 | "github.com/NebN/unraid-simple-monitoring-api/internal/util" 11 | ) 12 | 13 | var memTotalRegex = regexp.MustCompile(`MemTotal:\s+(\d+)`) 14 | var memAvailableRegex = regexp.MustCompile(`MemAvailable:\s+(\d+)`) 15 | 16 | type MemoryStatus struct { 17 | Total uint64 `json:"total"` 18 | Used uint64 `json:"used"` 19 | Free uint64 `json:"free"` 20 | UsedPercent float64 `json:"used_percent"` 21 | FreePercent float64 `json:"free_percent"` 22 | } 23 | 24 | type MemoryMonitor struct{} 25 | 26 | func NewMemoryMonitor() (mm MemoryMonitor) { 27 | return 28 | } 29 | 30 | func (monitor *MemoryMonitor) ComputeMemoryUsage() (status MemoryStatus) { 31 | 32 | meminfo, err := os.Open("/proc/meminfo") 33 | if err != nil { 34 | slog.Error("Memory cannot read data", slog.String("error", err.Error())) 35 | return 36 | } 37 | defer meminfo.Close() 38 | 39 | findGroup := func(r *regexp.Regexp, s string) (uint64, bool) { 40 | res := r.FindStringSubmatch(s) 41 | if len(res) > 1 { 42 | parsed, err := strconv.ParseUint(res[1], 10, 64) 43 | if err != nil { 44 | slog.Error("Memory cannot parse value from /proc/meminfo", 45 | slog.String("parsing", res[1]), 46 | slog.String("error", err.Error())) 47 | return 0, false 48 | } 49 | return parsed, true 50 | } 51 | 52 | return 0, false 53 | } 54 | 55 | scanner := bufio.NewScanner(meminfo) 56 | for scanner.Scan() { 57 | line := scanner.Text() 58 | 59 | slog.Debug("Memory", "line", line) 60 | if status.Total == 0 { 61 | memTotal, found := findGroup(memTotalRegex, line) 62 | if found { 63 | status.Total = util.KibiBytesToMebiBytes(memTotal) 64 | slog.Debug("Memory total parsed", "total_MiB", status.Total) 65 | } 66 | } 67 | 68 | if status.Free == 0 { 69 | memAvailable, found := findGroup(memAvailableRegex, line) 70 | if found { 71 | status.Free = util.KibiBytesToMebiBytes(memAvailable) 72 | slog.Debug("Memory free parsed", "free_MiB", status.Free) 73 | } 74 | } 75 | 76 | if status.Total != 0 && status.Free != 0 { 77 | break 78 | } 79 | } 80 | 81 | if status.Total == 0 { 82 | slog.Error("Memory unable to compute usage") 83 | return 84 | } 85 | 86 | status.Used = status.Total - status.Free 87 | if status.Total > 0 { 88 | status.FreePercent = util.RoundTwoDecimals((float64(status.Free) / float64(status.Total)) * 100) 89 | status.UsedPercent = util.RoundTwoDecimals(100 - status.FreePercent) 90 | } else { 91 | slog.Warn("Memory total is 0, free/used percent will be returned as 0") 92 | } 93 | 94 | return 95 | } 96 | -------------------------------------------------------------------------------- /internal/monitor/networkmonitor.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "os" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "time" 11 | 12 | "github.com/NebN/unraid-simple-monitoring-api/internal/util" 13 | ) 14 | 15 | type NetworkRate struct { 16 | Iname string `json:"interface"` 17 | RxMiBs float64 `json:"rx_MiBs"` 18 | TxMiBs float64 `json:"tx_MiBs"` 19 | RxMbps float64 `json:"rx_Mbps"` 20 | TxMbps float64 `json:"tx_Mbps"` 21 | } 22 | 23 | type NetworkSnapshot struct { 24 | Iname string 25 | Rx uint64 26 | Tx uint64 27 | RxTs time.Time 28 | TxTs time.Time 29 | } 30 | 31 | type NetworkMonitor struct { 32 | snapshots []NetworkSnapshot 33 | mu sync.Mutex 34 | } 35 | 36 | func NewNetworkMonitor(inames []string) (monitor NetworkMonitor) { 37 | snapshots := make([]NetworkSnapshot, len(inames)) 38 | for i, iname := range inames { 39 | snapshots[i] = newNetworkSnapshot(iname) 40 | } 41 | monitor.snapshots = snapshots 42 | return 43 | } 44 | 45 | func (monitor *NetworkMonitor) ComputeNetworkRate() []NetworkRate { 46 | monitor.mu.Lock() 47 | defer monitor.mu.Unlock() 48 | 49 | var wg sync.WaitGroup 50 | snapshotChan := make(chan util.IndexedValue[NetworkSnapshot], len(monitor.snapshots)) 51 | rateChan := make(chan util.IndexedValue[NetworkRate], len(monitor.snapshots)) 52 | 53 | for i, snapshot := range monitor.snapshots { 54 | wg.Add(1) 55 | go networkRate(i, snapshot, &wg, snapshotChan, rateChan) 56 | } 57 | 58 | wg.Wait() 59 | close(snapshotChan) 60 | close(rateChan) 61 | 62 | rates := make([]NetworkRate, len(monitor.snapshots)) 63 | snapshots := make([]NetworkSnapshot, len(monitor.snapshots)) 64 | 65 | for snapshot := range snapshotChan { 66 | snapshots[snapshot.Index] = snapshot.Value 67 | } 68 | for rate := range rateChan { 69 | rates[rate.Index] = rate.Value 70 | } 71 | 72 | monitor.snapshots = snapshots 73 | 74 | return rates 75 | } 76 | 77 | func networkRate( 78 | index int, 79 | previousSnapshot NetworkSnapshot, 80 | wg *sync.WaitGroup, 81 | snapshotChan chan util.IndexedValue[NetworkSnapshot], 82 | rateChan chan util.IndexedValue[NetworkRate]) { 83 | 84 | defer wg.Done() 85 | 86 | snapshot := newNetworkSnapshot(previousSnapshot.Iname) 87 | 88 | ratePerSecond := func(t0Reading uint64, t1Reading uint64, t0 time.Time, t1 time.Time) (float64, float64) { 89 | readingDiff := (t1Reading - t0Reading) 90 | deltaTime := t1.Sub(t0).Seconds() 91 | if deltaTime > 0 { 92 | rate := float64(readingDiff) / deltaTime 93 | rateMebiBytes := util.BytesToMebiBytes(rate) 94 | rateMegaBits := util.BytesToBits(util.BytesToMegaBytes(rate)) 95 | slog.Debug("Network computing rate per second", 96 | "interface", snapshot.Iname, 97 | "t0_value", t0Reading, 98 | "t0", t0, 99 | "t1_value", t1Reading, 100 | "t1", t1) 101 | return rateMebiBytes, rateMegaBits 102 | } else { 103 | slog.Warn("Network delta time between snapshots is 0, rate will be returned as 0", slog.String("interface", previousSnapshot.Iname)) 104 | } 105 | return 0, 0 106 | } 107 | 108 | rxMiBs, rxMbps := ratePerSecond(previousSnapshot.Rx, snapshot.Rx, previousSnapshot.RxTs, snapshot.RxTs) 109 | txMiBs, txMbps := ratePerSecond(previousSnapshot.Tx, snapshot.Tx, previousSnapshot.TxTs, snapshot.TxTs) 110 | 111 | rate := NetworkRate{ 112 | RxMiBs: util.RoundTwoDecimals(rxMiBs), 113 | TxMiBs: util.RoundTwoDecimals(txMiBs), 114 | RxMbps: util.RoundTwoDecimals(rxMbps), 115 | TxMbps: util.RoundTwoDecimals(txMbps), 116 | Iname: previousSnapshot.Iname, 117 | } 118 | 119 | slog.Debug("Network", "rate", rate) 120 | snapshotChan <- util.IndexedValue[NetworkSnapshot]{Index: index, Value: snapshot} 121 | rateChan <- util.IndexedValue[NetworkRate]{Index: index, Value: rate} 122 | } 123 | 124 | func newNetworkSnapshot(iname string) (network NetworkSnapshot) { 125 | network.Iname = iname 126 | 127 | usageInBps := func(direction string, c chan uint64, ts chan time.Time) { 128 | 129 | defer close(c) 130 | defer close(ts) 131 | 132 | now := time.Now() 133 | res, err := os.ReadFile(fmt.Sprintf("/sys/class/net/%s/statistics/%s_bytes", iname, direction)) 134 | if err != nil { 135 | slog.Error("Network cannot read data", "interface", iname, slog.String("error", err.Error())) 136 | return 137 | } 138 | 139 | stringBytes := strings.TrimSuffix(string(res), "\n") 140 | 141 | uint64Bytes, err := strconv.ParseUint(stringBytes, 10, 64) 142 | if err != nil { 143 | slog.Error("Network cannot parse data from /sys/class/net/", 144 | slog.String("trying to parse", stringBytes), 145 | slog.String("error", err.Error())) 146 | } 147 | 148 | c <- uint64Bytes 149 | ts <- now 150 | } 151 | 152 | rxChan := make(chan uint64) 153 | txChan := make(chan uint64) 154 | rxTsChan := make(chan time.Time) 155 | txTsChan := make(chan time.Time) 156 | 157 | go usageInBps("rx", rxChan, rxTsChan) 158 | go usageInBps("tx", txChan, txTsChan) 159 | 160 | rx, ok := <-rxChan 161 | if !ok { 162 | rx = 0 163 | } 164 | 165 | tx, ok := <-txChan 166 | if !ok { 167 | tx = 0 168 | } 169 | 170 | rxTs, ok := <-rxTsChan 171 | if !ok { 172 | rxTs = time.Now() 173 | } 174 | 175 | txTs, ok := <-txTsChan 176 | if !ok { 177 | txTs = time.Now() 178 | } 179 | 180 | network.Rx = rx 181 | network.Tx = tx 182 | network.RxTs = rxTs 183 | network.TxTs = txTs 184 | 185 | slog.Debug("Network", "snapshot", network) 186 | return 187 | } 188 | 189 | func AggregateNetworkRates(networks []NetworkRate) (status NetworkRate) { 190 | names := make([]string, 0, len(networks)) 191 | 192 | for _, network := range networks { 193 | names = append(names, network.Iname) 194 | status.RxMbps = status.RxMbps + network.RxMbps 195 | status.TxMbps = status.TxMbps + network.TxMbps 196 | status.RxMiBs = status.RxMiBs + network.RxMiBs 197 | status.TxMiBs = status.TxMiBs + network.TxMiBs 198 | slog.Debug("Network aggregation", "network", network, "running_total", status) 199 | } 200 | 201 | status.Iname = strings.Join(names, " ") 202 | 203 | return 204 | } 205 | -------------------------------------------------------------------------------- /internal/util/bytes.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "math" 7 | "regexp" 8 | "strconv" 9 | ) 10 | 11 | var zfsSizeRegex = regexp.MustCompile(`(\d+.?\d+?)([BKMGTPY])`) 12 | 13 | const BINARY_KILO = 1024 14 | 15 | var unitMultiplier = map[string]uint64{ 16 | "B": uint64(math.Pow(BINARY_KILO, 0)), 17 | "K": uint64(math.Pow(BINARY_KILO, 1)), 18 | "M": uint64(math.Pow(BINARY_KILO, 2)), 19 | "G": uint64(math.Pow(BINARY_KILO, 3)), 20 | "T": uint64(math.Pow(BINARY_KILO, 4)), 21 | "P": uint64(math.Pow(BINARY_KILO, 5)), 22 | "Y": uint64(math.Pow(BINARY_KILO, 6)), 23 | } 24 | 25 | func BytesToMegaBytes(b float64) float64 { 26 | return b / 1_000_000 27 | } 28 | 29 | func BytesToMebiBytes(b float64) float64 { 30 | mantissa, exponent := math.Frexp(b) 31 | return math.Ldexp(mantissa, exponent-20) 32 | } 33 | 34 | func BytesToBits(b float64) float64 { 35 | mantissa, exponent := math.Frexp(b) 36 | return math.Ldexp(mantissa, exponent+3) 37 | } 38 | 39 | func BytesToGibiBytes(b uint64) uint64 { 40 | return b >> 30 41 | } 42 | 43 | func KibiBytesToMebiBytes(b uint64) uint64 { 44 | return b >> 10 45 | } 46 | 47 | func ParseZfsSize(size string) (uint64, error) { 48 | res := zfsSizeRegex.FindStringSubmatch(size) 49 | if len(res) > 1 { 50 | number := res[1] 51 | unit := res[2] 52 | parsed, err := strconv.ParseFloat(number, 64) 53 | 54 | if err != nil { 55 | slog.Error("Unable to parse zfs size", slog.String("raw value", number)) 56 | return 0, err 57 | } 58 | 59 | return uint64(parsed * float64(unitMultiplier[unit])), nil 60 | } else { 61 | slog.Error("Unable to parse zfs size", slog.String("raw value", size)) 62 | return 0, fmt.Errorf("unable to match size from string %s", size) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /internal/util/math.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "math" 4 | 5 | func RoundTwoDecimals(n float64) float64 { 6 | return math.Round(n*100) / 100 7 | } 8 | 9 | func Average(xs []float64) float64 { 10 | if len(xs) == 0 { 11 | return 0 12 | } 13 | 14 | total := 0.0 15 | for _, v := range xs { 16 | total += v 17 | } 18 | return total / float64(len(xs)) 19 | } 20 | -------------------------------------------------------------------------------- /internal/util/model.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | type IndexedValue[T any] struct { 4 | Index int 5 | Value T 6 | } 7 | -------------------------------------------------------------------------------- /internal/util/stringutil.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | func CommonBase(strings ...string) string { 4 | if len(strings) == 0 { 5 | return "" 6 | } 7 | 8 | common, tail := strings[0], strings[1:] 9 | 10 | for _, s := range tail { 11 | common = commonBase(common, s) 12 | } 13 | 14 | return common 15 | } 16 | 17 | func commonBase(a string, b string) string { 18 | 19 | longer, shorter := longerAndShorter(a, b) 20 | breakIx := 0 21 | 22 | longerRunes := []rune(longer) 23 | for ix, char := range shorter { 24 | if longerRunes[ix] != char { 25 | break 26 | } 27 | breakIx = ix + 1 28 | } 29 | 30 | return string(longerRunes[:breakIx]) 31 | } 32 | 33 | func longerAndShorter(a string, b string) (string, string) { 34 | if len(a) > len(b) { 35 | return a, b 36 | } 37 | return b, a 38 | } 39 | -------------------------------------------------------------------------------- /internal/util/util_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "math" 5 | "testing" 6 | ) 7 | 8 | func TestBytesToMegaBytes(t *testing.T) { 9 | bytes := 12_000_000.0 10 | expected := 12.0 11 | 12 | res := BytesToMegaBytes(bytes) 13 | if !floatAreEqualEnough(res, expected) { 14 | t.Fatalf("expected: %f, got: %f", expected, res) 15 | } 16 | } 17 | 18 | func TestBytesToMebiBytes(t *testing.T) { 19 | bytes := 12_000_000.0 20 | expected := 11.444091797 21 | 22 | res := BytesToMebiBytes(bytes) 23 | if !floatAreEqualEnough(res, expected) { 24 | t.Fatalf("expected: %f, got: %f", expected, res) 25 | } 26 | } 27 | 28 | func TestBytesToBits(t *testing.T) { 29 | bytes := 12_000_000.0 30 | expected := 96000000.0 31 | 32 | res := BytesToBits(bytes) 33 | if !floatAreEqualEnough(res, expected) { 34 | t.Fatalf("expected: %f, got: %f", expected, res) 35 | } 36 | } 37 | 38 | func TestBytesToGibiBytes(t *testing.T) { 39 | // BytesToGibiBytes simply bitshifts, so it truncates instead of rounding 40 | // this is so far the expected behaviour 41 | // 6,442,000,000 is slightly less than 6GiB, we get 5 42 | // 6,444,000,000 is slightly more than 6GiB, we get 6 43 | 44 | var bytes uint64 = 644_2_000000 45 | var expected uint64 = 5 46 | 47 | var res = BytesToGibiBytes(bytes) 48 | if res != expected { 49 | t.Fatalf("expected: %d, got: %d", expected, res) 50 | } 51 | 52 | bytes = 644_4_000000 53 | expected = 6 54 | 55 | res = BytesToGibiBytes(bytes) 56 | if res != expected { 57 | t.Fatalf("expected: %d, got: %d", expected, res) 58 | } 59 | } 60 | 61 | func TestRoundTwoDecimals(t *testing.T) { 62 | number := 123.456 63 | expected := 123.46 64 | res := RoundTwoDecimals(number) 65 | if !floatAreEqualEnough(res, expected) { 66 | t.Fatalf("expected: %f, got: %f", expected, res) 67 | } 68 | 69 | number = 123.454 70 | expected = 123.45 71 | res = RoundTwoDecimals(number) 72 | if !floatAreEqualEnough(res, expected) { 73 | t.Fatalf("expected: %f, got: %f", expected, res) 74 | } 75 | 76 | number = 123.3 77 | expected = 123.30 78 | res = RoundTwoDecimals(number) 79 | if !floatAreEqualEnough(res, expected) { 80 | t.Fatalf("expected: %f, got: %f", expected, res) 81 | } 82 | } 83 | 84 | func TestParseZfsSize(t *testing.T) { 85 | str := "12.5G" 86 | var expected uint64 = 13421772800 87 | res, err := ParseZfsSize(str) 88 | if err != nil { 89 | t.Fatalf(err.Error()) 90 | } 91 | if res != expected { 92 | t.Fatalf("expected: %d, got: %d", expected, res) 93 | } 94 | 95 | str = "230.5T" 96 | expected = 253437430202368 97 | res, err = ParseZfsSize(str) 98 | if err != nil { 99 | t.Fatalf(err.Error()) 100 | } 101 | if res != expected { 102 | t.Fatalf("expected: %d, got: %d", expected, res) 103 | } 104 | 105 | str = "130.50M" 106 | expected = 136839168 107 | res, err = ParseZfsSize(str) 108 | if err != nil { 109 | t.Fatalf(err.Error()) 110 | } 111 | if res != expected { 112 | t.Fatalf("expected: %d, got: %d", expected, res) 113 | } 114 | 115 | str = "93K" 116 | expected = 95232 117 | res, err = ParseZfsSize(str) 118 | if err != nil { 119 | t.Fatalf(err.Error()) 120 | } 121 | if res != expected { 122 | t.Fatalf("expected: %d, got: %d", expected, res) 123 | } 124 | 125 | str = "100B" 126 | expected = 100 127 | res, err = ParseZfsSize(str) 128 | if err != nil { 129 | t.Fatalf(err.Error()) 130 | } 131 | if res != expected { 132 | t.Fatalf("expected: %d, got: %d", expected, res) 133 | } 134 | } 135 | 136 | func floatAreEqualEnough(a float64, b float64) bool { 137 | return math.Abs(a-b) < 1e-9 138 | } 139 | 140 | func TestCommonBase(t *testing.T) { 141 | a := "thisPartIsInCommonX" 142 | b := "thisPartIsInCommonY" 143 | expected := "thisPartIsInCommon" 144 | 145 | res := CommonBase(a, b) 146 | 147 | if res != expected { 148 | t.Fatalf("expected: %s, got: %s", expected, res) 149 | } 150 | } 151 | 152 | func TestCommonBaseEmpty(t *testing.T) { 153 | a := "not empty" 154 | b := "" 155 | c := "not quite empty" 156 | expected := "" 157 | 158 | res := CommonBase(a, b, c) 159 | 160 | if res != expected { 161 | t.Fatalf("expected: %s, got: %s", expected, res) 162 | } 163 | } 164 | 165 | func TestCommonBaseIdentical(t *testing.T) { 166 | a := "identical" 167 | b := "identical" 168 | expected := "identical" 169 | 170 | res := CommonBase(a, b) 171 | 172 | if res != expected { 173 | t.Fatalf("expected: %s, got: %s", expected, res) 174 | } 175 | } 176 | 177 | func TestCommonBaseMultiple(t *testing.T) { 178 | a := "/common/part/ends/here/abc" 179 | b := "/common/part/ends/here/bcd" 180 | c := "/common/part/ends/here/cde" 181 | 182 | expected := "/common/part/ends/here/" 183 | 184 | res := CommonBase(a, b, c) 185 | 186 | if res != expected { 187 | t.Fatalf("expected: %s, got: %s", expected, res) 188 | } 189 | } 190 | 191 | func TestCommonBaseSingle(t *testing.T) { 192 | a := "single" 193 | 194 | expected := "single" 195 | 196 | res := CommonBase(a) 197 | 198 | if res != expected { 199 | t.Fatalf("expected: %s, got: %s", expected, res) 200 | } 201 | } 202 | 203 | func TestCommonBaseNothing(t *testing.T) { 204 | 205 | expected := "" 206 | 207 | res := CommonBase() 208 | 209 | if res != expected { 210 | t.Fatalf("expected: %s, got: %s", expected, res) 211 | } 212 | } 213 | --------------------------------------------------------------------------------