├── .env-example ├── .github └── workflows │ └── release.yml ├── .gitignore ├── .gitmodules ├── Cargo.toml ├── LICENSE ├── README.md ├── cardamon.bk.toml ├── cardamon.toml ├── examples ├── docker-unix │ ├── README.md │ ├── cardamon.unix.toml │ ├── db │ │ └── docker-compose.yml │ └── test │ │ └── docker-compose.yml └── tod │ ├── Dockerfile │ ├── cardamon.toml │ ├── docker-compose.yml │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── testing │ ├── Cargo.toml │ └── src │ └── main.rs ├── fixtures ├── cardamon.missing_process.toml ├── cardamon.missing_scenario.toml ├── cardamon.multiple_iterations.toml ├── cardamon.multiple_scenarios.toml ├── cardamon.success.toml ├── cpus.sql ├── iterations.sql ├── metrics.sql ├── power_curves.sql └── runs.sql └── src ├── carbon_intensity.rs ├── config.rs ├── dao.rs ├── dao ├── cpu.rs ├── iteration.rs ├── metrics.rs ├── pagination.rs ├── run.rs └── scenario.rs ├── data.rs ├── data ├── dataset.rs └── dataset_builder.rs ├── entities ├── cpu.rs ├── iteration.rs ├── metrics.rs ├── mod.rs ├── power_curve.rs ├── prelude.rs └── run.rs ├── lib.rs ├── main.rs ├── metrics.rs ├── metrics_logger.rs ├── metrics_logger ├── bare_metal.rs └── docker.rs ├── migrations ├── m20240822_095823_create_run_table.rs ├── m20240822_095830_create_metrics_table.rs ├── m20240822_095838_create_iteration_table.rs ├── m20241109_180400_add_region_column.rs ├── m20241110_191154_add_ci_column.rs └── mod.rs ├── migrator.rs ├── models.rs ├── public └── .gitkeep ├── server.rs ├── server ├── errors.rs └── routes.rs └── templates ├── cardamon.unix.toml └── cardamon.win.toml /.env-example: -------------------------------------------------------------------------------- 1 | DATABASE_URL=sqlite://cardamon.db?mode=rwc 2 | DATABASE_NAME= 3 | SERVER_PORT=4001 4 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | permissions: 3 | "contents": "write" 4 | 5 | on: 6 | pull_request: 7 | push: 8 | tags: 9 | - "**[0-9]+.[0-9]+.[0-9]+*" 10 | 11 | jobs: 12 | checks: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | with: 17 | submodules: recursive 18 | 19 | - name: Setup Rust 20 | uses: actions-rust-lang/setup-rust-toolchain@v1 21 | 22 | - name: Check version numbers match 23 | run: | 24 | card_ver=v$(cargo metadata --format-version=1 --no-deps | jq '.packages[0].version' | tr -d '"') 25 | if [[ $card_ver != "${{ github.ref_name }}" ]]; then exit 1; fi 26 | 27 | build: 28 | needs: [checks] 29 | strategy: 30 | matrix: 31 | os: [ubuntu-latest, windows-latest, macos-latest] 32 | include: 33 | - os: ubuntu-latest 34 | target: x86_64-unknown-linux-gnu 35 | exe_ext: 36 | archive_ext: tgz 37 | archive_cmd: tar -czf cardamon-x86_64-unknown-linux-gnu.tgz -C artifact . 38 | - os: windows-latest 39 | target: x86_64-pc-windows-msvc 40 | exe_ext: .exe 41 | archive_ext: zip 42 | archive_cmd: Compress-Archive -Path artifact/* -Destination cardamon-x86_64-pc-windows-msvc.zip 43 | - os: macos-latest 44 | target: aarch64-apple-darwin 45 | exe_ext: 46 | archive_ext: tgz 47 | archive_cmd: tar -czf cardamon-aarch64-apple-darwin.tgz -C artifact . 48 | runs-on: ${{ matrix.os }} 49 | steps: 50 | - name: Checkout cardamon 51 | uses: actions/checkout@v4 52 | with: 53 | submodules: recursive 54 | 55 | - name: Setup NodeJS 56 | uses: actions/setup-node@v4 57 | with: 58 | node-version: "20.x" 59 | 60 | - name: Build UI 61 | run: cd ui && npm cache clean --force && npm install && npm run build:release 62 | 63 | - name: Setup Rust 64 | uses: actions-rust-lang/setup-rust-toolchain@v1 65 | 66 | - name: Build Cardamon 67 | run: cargo build --release 68 | 69 | - name: Build release artifact 70 | run: | 71 | mkdir artifact 72 | cp target/release/cardamon${{ matrix.exe_ext }} artifact/cardamon${{ matrix.exe_ext }} 73 | cp LICENSE artifact/ 74 | cp README.md artifact/ 75 | 76 | - name: Compress build artifact 77 | run: ${{ matrix.archive_cmd }} 78 | 79 | - name: Calculate SHA-256 checksum 80 | run: shasum -a 256 cardamon-${{ matrix.target }}.${{ matrix.archive_ext }} > cardamon-${{ matrix.target }}.${{ matrix.archive_ext }}.sha256 81 | 82 | - name: Copy artifacts to upload directory 83 | run: | 84 | mkdir uploads 85 | mv cardamon-${{ matrix.target }}.${{ matrix.archive_ext }} uploads/ 86 | mv cardamon-${{ matrix.target }}.${{ matrix.archive_ext }}.sha256 uploads/ 87 | 88 | - name: Upload artifact 89 | uses: actions/upload-artifact@v4 90 | with: 91 | name: cardamon-${{ matrix.os }} 92 | path: uploads/ 93 | 94 | publish_crate: 95 | needs: [build] 96 | runs-on: ubuntu-latest 97 | steps: 98 | - name: Checkout cardamon 99 | uses: actions/checkout@v4 100 | with: 101 | submodules: recursive 102 | 103 | - name: Setup NodeJS 104 | uses: actions/setup-node@v4 105 | with: 106 | node-version: "20.x" 107 | 108 | - name: Build UI 109 | run: cd ui && npm install && npm run build:release 110 | 111 | - name: Setup Rust 112 | uses: actions-rust-lang/setup-rust-toolchain@v1 113 | 114 | - name: Build Cardamon 115 | run: cargo publish --no-verify --allow-dirty --token ${{ secrets.CRATES_IO_TOKEN }} 116 | 117 | release: 118 | needs: [publish_crate] 119 | runs-on: ubuntu-latest 120 | steps: 121 | - uses: actions/checkout@v4 122 | with: 123 | submodules: recursive 124 | - name: Download artifacts 125 | uses: actions/download-artifact@v4 126 | with: 127 | path: artifacts 128 | pattern: cardamon-* 129 | merge-multiple: true 130 | - name: Create release 131 | run: | 132 | gh release create ${{ github.ref_name }} --draft --title "Release ${{ github.ref_name }}" --notes "" \ 133 | artifacts/cardamon-x86_64-unknown-linux-gnu.tgz \ 134 | artifacts/cardamon-x86_64-unknown-linux-gnu.tgz.sha256 \ 135 | artifacts/cardamon-x86_64-pc-windows-msvc.zip \ 136 | artifacts/cardamon-x86_64-pc-windows-msvc.zip.sha256 \ 137 | artifacts/cardamon-aarch64-apple-darwin.tgz \ 138 | artifacts/cardamon-aarch64-apple-darwin.tgz.sha256 139 | env: 140 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 141 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig 2 | # Created by https://www.toptal.com/developers/gitignore/api/windows,visualstudiocode,rust-analyzer,rust,osx,macos,linux,dotenv,database,intellij+all 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=windows,visualstudiocode,rust-analyzer,rust,osx,macos,linux,dotenv,database,intellij+all 4 | 5 | ### Database ### 6 | *.accdb 7 | *.db 8 | *.dbf 9 | *.mdb 10 | *.pdb 11 | *.sqlite3 12 | *.db-shm 13 | *.db-wal 14 | 15 | ### dotenv ### 16 | .env 17 | debug.log 18 | .stderr 19 | .stdout 20 | curl/ 21 | 22 | ### Intellij+all ### 23 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 24 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 25 | 26 | # User-specific stuff 27 | .idea/**/workspace.xml 28 | .idea/**/tasks.xml 29 | .idea/**/usage.statistics.xml 30 | .idea/**/dictionaries 31 | .idea/**/shelf 32 | 33 | # AWS User-specific 34 | .idea/**/aws.xml 35 | 36 | # Generated files 37 | .idea/**/contentModel.xml 38 | 39 | # Sensitive or high-churn files 40 | .idea/**/dataSources/ 41 | .idea/**/dataSources.ids 42 | .idea/**/dataSources.local.xml 43 | .idea/**/sqlDataSources.xml 44 | .idea/**/dynamic.xml 45 | .idea/**/uiDesigner.xml 46 | .idea/**/dbnavigator.xml 47 | 48 | # Gradle 49 | .idea/**/gradle.xml 50 | .idea/**/libraries 51 | 52 | # Gradle and Maven with auto-import 53 | # When using Gradle or Maven with auto-import, you should exclude module files, 54 | # since they will be recreated, and may cause churn. Uncomment if using 55 | # auto-import. 56 | # .idea/artifacts 57 | # .idea/compiler.xml 58 | # .idea/jarRepositories.xml 59 | # .idea/modules.xml 60 | # .idea/*.iml 61 | # .idea/modules 62 | # *.iml 63 | # *.ipr 64 | 65 | # CMake 66 | cmake-build-*/ 67 | 68 | # Mongo Explorer plugin 69 | .idea/**/mongoSettings.xml 70 | 71 | # File-based project format 72 | *.iws 73 | 74 | # IntelliJ 75 | out/ 76 | 77 | # mpeltonen/sbt-idea plugin 78 | .idea_modules/ 79 | 80 | # JIRA plugin 81 | atlassian-ide-plugin.xml 82 | 83 | # Cursive Clojure plugin 84 | .idea/replstate.xml 85 | 86 | # SonarLint plugin 87 | .idea/sonarlint/ 88 | 89 | # Crashlytics plugin (for Android Studio and IntelliJ) 90 | com_crashlytics_export_strings.xml 91 | crashlytics.properties 92 | crashlytics-build.properties 93 | fabric.properties 94 | 95 | # Editor-based Rest Client 96 | .idea/httpRequests 97 | 98 | # Android studio 3.1+ serialized cache file 99 | .idea/caches/build_file_checksums.ser 100 | 101 | ### Intellij+all Patch ### 102 | # Ignore everything but code style settings and run configurations 103 | # that are supposed to be shared within teams. 104 | 105 | .idea/* 106 | 107 | !.idea/codeStyles 108 | !.idea/runConfigurations 109 | 110 | ### Linux ### 111 | *~ 112 | 113 | # temporary files which can be created if a process still has a handle open of a deleted file 114 | .fuse_hidden* 115 | 116 | # KDE directory preferences 117 | .directory 118 | 119 | # Linux trash folder which might appear on any partition or disk 120 | .Trash-* 121 | 122 | # .nfs files are created when an open file is removed but is still being accessed 123 | .nfs* 124 | 125 | ### macOS ### 126 | # General 127 | .DS_Store 128 | .AppleDouble 129 | .LSOverride 130 | 131 | # Icon must end with two \r 132 | Icon 133 | 134 | 135 | # Thumbnails 136 | ._* 137 | 138 | # Files that might appear in the root of a volume 139 | .DocumentRevisions-V100 140 | .fseventsd 141 | .Spotlight-V100 142 | .TemporaryItems 143 | .Trashes 144 | .VolumeIcon.icns 145 | .com.apple.timemachine.donotpresent 146 | 147 | # Directories potentially created on remote AFP share 148 | .AppleDB 149 | .AppleDesktop 150 | Network Trash Folder 151 | Temporary Items 152 | .apdisk 153 | 154 | ### macOS Patch ### 155 | # iCloud generated files 156 | *.icloud 157 | 158 | ### OSX ### 159 | # General 160 | 161 | # Icon must end with two \r 162 | 163 | # Thumbnails 164 | 165 | # Files that might appear in the root of a volume 166 | 167 | # Directories potentially created on remote AFP share 168 | 169 | ### Rust ### 170 | # Generated by Cargo 171 | # will have compiled files and executables 172 | debug/ 173 | target/ 174 | 175 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 176 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 177 | Cargo.lock 178 | 179 | # These are backup files generated by rustfmt 180 | **/*.rs.bk 181 | 182 | # MSVC Windows builds of rustc generate these, which store debugging information 183 | 184 | ### rust-analyzer ### 185 | # Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules) 186 | rust-project.json 187 | 188 | 189 | ### VisualStudioCode ### 190 | .vscode/* 191 | !.vscode/settings.json 192 | !.vscode/tasks.json 193 | !.vscode/launch.json 194 | !.vscode/extensions.json 195 | !.vscode/*.code-snippets 196 | 197 | # Local History for Visual Studio Code 198 | .history/ 199 | 200 | # Built Visual Studio Code Extensions 201 | *.vsix 202 | 203 | ### VisualStudioCode Patch ### 204 | # Ignore all local history of files 205 | .history 206 | .ionide 207 | 208 | ### Windows ### 209 | # Windows thumbnail cache files 210 | Thumbs.db 211 | Thumbs.db:encryptable 212 | ehthumbs.db 213 | ehthumbs_vista.db 214 | 215 | # Dump file 216 | *.stackdump 217 | 218 | # Folder config file 219 | [Dd]esktop.ini 220 | 221 | # Recycle Bin used on file shares 222 | $RECYCLE.BIN/ 223 | 224 | # Windows Installer files 225 | *.cab 226 | *.msi 227 | *.msix 228 | *.msm 229 | *.msp 230 | 231 | # Windows shortcuts 232 | *.lnk 233 | 234 | # End of https://www.toptal.com/developers/gitignore/api/windows,visualstudiocode,rust-analyzer,rust,osx,macos,linux,dotenv,database,intellij+all 235 | 236 | # Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) 237 | 238 | scenarios 239 | src/public/* 240 | !src/public/.gitkeep 241 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ui"] 2 | path = ui 3 | url = git@github.com:Root-Branch/cardamon-ui.git 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cardamon" 3 | version = "0.2.0" 4 | edition = "2021" 5 | authors = [ 6 | "Oliver Winks ", 7 | "William Kimbell ", 8 | ] 9 | description = "Cardamon is a tool to help development teams measure the power consumption and carbon emissions of their software." 10 | license-file = "LICENSE" 11 | repository = "https://github.com/Root-Branch/cardamon" 12 | documentation = "https://docs.rs/cardamon" 13 | homepage = "https://github.com/Root-Branch/cardamon" 14 | keywords = ["green-software", "environmental", "sustainability"] 15 | categories = [ 16 | "command-line-utilities", 17 | "development-tools", 18 | "development-tools::profiling", 19 | ] 20 | readme = "README.md" 21 | include = ["src/**/*.rs", "src/public", "src/templates"] 22 | default-run = "cardamon" 23 | 24 | [package.metadata.binstall.overrides.x86_64-pc-windows-msvc] 25 | pkg-fmt = "zip" 26 | 27 | [[bin]] 28 | name = "cardamon" 29 | path = "src/main.rs" 30 | 31 | [[bin]] 32 | name = "migrator" 33 | path = "src/migrator.rs" 34 | 35 | [dependencies] 36 | anyhow = { version = "1.0.75", features = ["std"] } 37 | async-trait = "0.1.81" 38 | axum = { version = "0.7.1", features = ["json", "macros"] } 39 | bollard = "0.17.1" 40 | bytes = "1.6.0" 41 | chrono = { version = "0.4.31", features = ["serde"] } 42 | clap = { version = "4.4.10", features = ["derive"] } 43 | colored = "2.1.0" 44 | ctrlc = "3.4.5" 45 | dotenvy = "0.15.7" 46 | futures-util = "0.3.30" 47 | http = "1.1.0" 48 | itertools = "0.13.0" 49 | mime_guess = { version = "2.0.5" } 50 | num_cpus = "1.16.0" 51 | phf = { version = "0.11", features = ["macros"] } 52 | reqwest = { version = "0.12.7", features = ["json"] } 53 | rust-embed = "8.5.0" 54 | sea-orm = { version = "1.0.0", features = [ 55 | "sqlx-sqlite", 56 | "sqlx-postgres", 57 | "sqlx-mysql", 58 | "runtime-tokio-rustls", 59 | "macros", 60 | "tests-cfg", 61 | ] } 62 | sea-orm-macros = { version = "1.0.0" } 63 | serde = { version = "1.0.193", features = ["derive"] } 64 | serde_json = "1.0.117" 65 | shlex = "1.3.0" 66 | subprocess = "0.2.9" 67 | sysinfo = "0.31.2" 68 | tar = "0.4.41" 69 | term-table = "1.4.0" 70 | textplots = "0.8.6" 71 | # textplots = "0.8.6" 72 | # time = "0.3.36" 73 | tokio = { version = "1.37.0", features = ["full"] } 74 | tokio-util = "0.7.11" 75 | toml = "0.8.12" 76 | tower-http = { version = "0.5.2", features = ["cors", "fs"] } 77 | tracing = "0.1.40" 78 | tracing-subscriber = { version = "0.3.18", features = [ 79 | "registry", 80 | "env-filter", 81 | ] } 82 | 83 | [dependencies.sea-orm-migration] 84 | version = "1.0.0" # sea-orm-migration version 85 | features = [ 86 | # Enable following runtime and db backend features if you want to run migration via CLI 87 | "runtime-tokio-rustls", 88 | "sqlx-postgres", 89 | "sqlx-sqlite", 90 | "sqlx-mysql", 91 | ] 92 | 93 | [dev-dependencies] 94 | nanoid = "0.4.0" 95 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | # PolyForm Shield License 1.0.0 2 | 3 | 4 | 5 | ## Acceptance 6 | 7 | In order to get any license under these terms, you must agree 8 | to them as both strict obligations and conditions to all 9 | your licenses. 10 | 11 | ## Copyright License 12 | 13 | The licensor grants you a copyright license for the 14 | software to do everything you might do with the software 15 | that would otherwise infringe the licensor's copyright 16 | in it for any permitted purpose. However, you may 17 | only distribute the software according to [Distribution 18 | License](#distribution-license) and make changes or new works 19 | based on the software according to [Changes and New Works 20 | License](#changes-and-new-works-license). 21 | 22 | ## Distribution License 23 | 24 | The licensor grants you an additional copyright license 25 | to distribute copies of the software. Your license 26 | to distribute covers distributing the software with 27 | changes and new works permitted by [Changes and New Works 28 | License](#changes-and-new-works-license). 29 | 30 | ## Notices 31 | 32 | You must ensure that anyone who gets a copy of any part of 33 | the software from you also gets a copy of these terms or the 34 | URL for them above, as well as copies of any plain-text lines 35 | beginning with `Required Notice:` that the licensor provided 36 | with the software. For example: 37 | 38 | > Required Notice: Copyright Yoyodyne, Inc. (http://example.com) 39 | 40 | ## Changes and New Works License 41 | 42 | The licensor grants you an additional copyright license to 43 | make changes and new works based on the software for any 44 | permitted purpose. 45 | 46 | ## Patent License 47 | 48 | The licensor grants you a patent license for the software that 49 | covers patent claims the licensor can license, or becomes able 50 | to license, that you would infringe by using the software. 51 | 52 | ## Noncompete 53 | 54 | Any purpose is a permitted purpose, except for providing any 55 | product that competes with the software or any product the 56 | licensor or any of its affiliates provides using the software. 57 | 58 | ## Competition 59 | 60 | Goods and services compete even when they provide functionality 61 | through different kinds of interfaces or for different technical 62 | platforms. Applications can compete with services, libraries 63 | with plugins, frameworks with development tools, and so on, 64 | even if they're written in different programming languages 65 | or for different computer architectures. Goods and services 66 | compete even when provided free of charge. If you market a 67 | product as a practical substitute for the software or another 68 | product, it definitely competes. 69 | 70 | ## New Products 71 | 72 | If you are using the software to provide a product that does 73 | not compete, but the licensor or any of its affiliates brings 74 | your product into competition by providing a new version of 75 | the software or another product using the software, you may 76 | continue using versions of the software available under these 77 | terms beforehand to provide your competing product, but not 78 | any later versions. 79 | 80 | ## Discontinued Products 81 | 82 | You may begin using the software to compete with a product 83 | or service that the licensor or any of its affiliates has 84 | stopped providing, unless the licensor includes a plain-text 85 | line beginning with `Licensor Line of Business:` with the 86 | software that mentions that line of business. For example: 87 | 88 | > Licensor Line of Business: YoyodyneCMS Content Management 89 | System (http://example.com/cms) 90 | 91 | ## Sales of Business 92 | 93 | If the licensor or any of its affiliates sells a line of 94 | business developing the software or using the software 95 | to provide a product, the buyer can also enforce 96 | [Noncompete](#noncompete) for that product. 97 | 98 | ## Fair Use 99 | 100 | You may have "fair use" rights for the software under the 101 | law. These terms do not limit them. 102 | 103 | ## No Other Rights 104 | 105 | These terms do not allow you to sublicense or transfer any of 106 | your licenses to anyone else, or prevent the licensor from 107 | granting licenses to anyone else. These terms do not imply 108 | any other licenses. 109 | 110 | ## Patent Defense 111 | 112 | If you make any written claim that the software infringes or 113 | contributes to infringement of any patent, your patent license 114 | for the software granted under these terms ends immediately. If 115 | your company makes such a claim, your patent license ends 116 | immediately for work on behalf of your company. 117 | 118 | ## Violations 119 | 120 | The first time you are notified in writing that you have 121 | violated any of these terms, or done anything with the software 122 | not covered by your licenses, your licenses can nonetheless 123 | continue if you come into full compliance with these terms, 124 | and take practical steps to correct past violations, within 125 | 32 days of receiving notice. Otherwise, all your licenses 126 | end immediately. 127 | 128 | ## No Liability 129 | 130 | ***As far as the law allows, the software comes as is, without 131 | any warranty or condition, and the licensor will not be liable 132 | to you for any damages arising out of these terms or the use 133 | or nature of the software, under any kind of legal claim.*** 134 | 135 | ## Definitions 136 | 137 | The **licensor** is the individual or entity offering these 138 | terms, and the **software** is the software the licensor makes 139 | available under these terms. 140 | 141 | A **product** can be a good or service, or a combination 142 | of them. 143 | 144 | **You** refers to the individual or entity agreeing to these 145 | terms. 146 | 147 | **Your company** is any legal entity, sole proprietorship, 148 | or other kind of organization that you work for, plus all 149 | its affiliates. 150 | 151 | **Affiliates** means the other organizations than an 152 | organization has control over, is under the control of, or is 153 | under common control with. 154 | 155 | **Control** means ownership of substantially all the assets of 156 | an entity, or the power to direct its management and policies 157 | by vote, contract, or otherwise. Control can be direct or 158 | indirect. 159 | 160 | **Your licenses** are all the licenses granted to you for the 161 | software under these terms. 162 | 163 | **Use** means anything you do with the software requiring one 164 | of your licenses. 165 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

Cardamon

3 |

🌱 The Car(bon) da(shboard) and live mon(itor)

4 |

Built with ❤️ by the Root & Branch team

5 | 6 | Uh, it's cardmom ACKSUALLY! - we know, but cardamon is a better acronym. 7 | 8 |
9 | 10 | --- 11 | 12 | Cardamon is a tool to help development teams measure the power consumption and carbon emissions of their software. 13 | 14 | - [Introduction](#introduction) 15 | - [Installation](#installation) 16 | - [Quickstart](#quickstart) 17 | - [Environment Variables](#environment-variables) 18 | - [Configuration](#configuration) 19 | - [CLI](#cli) 20 | - [FAQ](#faq) 21 | - [License](#license) 22 | 23 | # Introduction 24 | 25 | Cardamon is built around the concept of observations and scenarios. 26 | 27 | A scenario encapsulates a usage behaviour that you want to measure (e.g. add items to basket). You can then run your code against these repeateable behaviours and see how your software power consumption changes over time. You can view this in the cardamon-ui. Cardamon scenarios are compatible with [ISO/IEC 21031 - Software Carbon Intensity (SCI) specification](https://www.iso.org/standard/86612.html). 28 | 29 | An observation is a measurement of one or more scenarios. 30 | 31 | # Installation 32 | 33 | The easiest way to install Cardamon is using our install script. 34 | 35 | **Linux & Mac** 36 | 37 | `curl -fsSL https://cardamon.io/install.sh | sh` 38 | 39 | **Windows** 40 | 41 | ``` 42 | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser 43 | irm -Uri https://cardamon.io/install.ps1 | iex 44 | ``` 45 | 46 | **Cargo** 47 | 48 | Alternatively you can build Cardamon from source if you have `cargo` installed. 49 | 50 | `cargo install cardamon` 51 | 52 | # Quickstart 53 | 54 | `cardamon init` - create a new cardamon configuration file. 55 | 56 | `cardamon run ` - runs the specified observation. 57 | 58 | `cardamon stats` - shows stats per scenario 59 | 60 | # Environment Variables 61 | 62 | By default, Cardamon saves your data to a locally stored SQLite database. If you would like to store Cardamon data in any other location then you can set the following environment variables. 63 | **DATABASE_URL** 64 | 65 | (omit database name from URL when using postgresql or mysql, use DATABASE_NAME instead) 66 | 67 | **DATABASE_NAME** 68 | 69 | (only required for postgresql and mysql) 70 | 71 | # Configuration 72 | 73 | ### CPU 74 | 75 | This contains information about the CPU used to run your application. The options are as follows: 76 | 77 | ```toml 78 | [cpu] 79 | 80 | # The manufacturers name for your processor 81 | name = "AMD Ryzen 7 PRO 6850U with Radeon Graphics" 82 | 83 | # The processors average power consumption in watts 84 | avg_power = 11.223 85 | ``` 86 | 87 | ### Processes 88 | 89 | Processes are the things you would like cardamon to start/stop and measure during a run. Currently only executables and docker containers are supported but podman and kubernetes are planned. You can specify as many processes as you like. Below is an example process: 90 | 91 | ```toml 92 | [[process]] 93 | 94 | # must be unique 95 | name = "db" 96 | 97 | # The command to start this process 98 | up = "docker compose up -d" 99 | 100 | # (OPTIONAL) The command to stop this process. Cardamon will pass the PID of the process to this command. You can 101 | # use `{pid}` as a placeholder in the command e.g. `kill {pid}` 102 | down = "docker compose down" 103 | 104 | # The type of process which is being executed. Can be "docker" | "baremetal" 105 | process.type = "docker" 106 | 107 | # (OPTIONAL) Docker processes may initiate multiple containers from a single command, e.g. `docker compose up -d`. 108 | # This is the list of containers started by this process that you would like cardamon to measure 109 | process.containers = ["postgres"] 110 | 111 | # (OPTIONAL) Where to redirect this processes stdout and stderr. "null" ignores output, "parent" attaches the 112 | # processes output to three cardamon process, "file" writes stdout and stderr to a file of the same name as this 113 | # process e.g. .stdout. Will default to "file" 114 | redirect.to = "file" 115 | ``` 116 | 117 | ### Scenarios 118 | 119 | Scenarios are designed to put your application under some amount of load. they should represent some use case of your application. For example, if you're application is a REST API a scenario may simply be a list of curl commands performing some tasks. 120 | 121 | ```toml 122 | [[scenario]] 123 | 124 | # Must be unique 125 | name = "sleep" 126 | 127 | # (OPTIONAL) A short description of the scenario to remind you what it does 128 | desc = "Sleeps for 10 seconds, a real scenario would call your app" 129 | 130 | # The command to execute this scenario 131 | command = "sleep 10" 132 | 133 | # (OPTIONAL) The number of times cardamon should execute this scenario per run. It's better to run scenarios 134 | # multiple times and take an average. Defaults to 1 135 | iterations = 2 136 | 137 | # A list of the processes which need to be started before executing this scenario 138 | processes = ["test_proc"] 139 | ``` 140 | 141 | ### Observations 142 | 143 | An observation is how we take a 'measurement'. Observations can be run in two modes. As a live monitor, where you specify processes to measure. Or as a scenario runner, where you specify scenarios to run. 144 | 145 | ```toml 146 | [[observation]] 147 | 148 | # Must be unique 149 | name = "my_observation" 150 | 151 | # A list of scenarios to execute whilst observing the application. Only required if no processes are defined 152 | scenarios = ["sleep"] 153 | 154 | # A list of processes to execute and observe. Running an observation with this property set runs Cardamon in 155 | # Live Monitor mode 156 | processes = ["test_proc"] 157 | ``` 158 | 159 | # CLI 160 | 161 | ### Init 162 | 163 | `cardamon init` 164 | 165 | Produces a new cardamon.toml file. 166 | 167 | ### Run 168 | 169 | `cardamon run ` 170 | 171 | Runs a single observation. 172 | 173 | **_Options_** 174 | 175 | - **\*name**: The name of the observation you would like to run\* 176 | - **\*pids**: A comma separated list of PIDs started externally to cardamon that you would like cardamon to measure\* 177 | - **\*containers**: A comma separated list of container names, started externally to cardamon, that you would like cardamon to measure\* 178 | - **\*external_only**: If set, cardamon will not try to start any processes and will instead only measure the pids specified by the `pids` and `containers` option\* 179 | 180 | ### Stats 181 | 182 | `cardamon stats [scenario_name]` 183 | 184 | Shows the stats for previous runs of scenarios. 185 | 186 | **_Options_** 187 | 188 | - **\*scenario_name**: An optional argument for the scenario you want to show stats for\* 189 | - **\*previous_runs**: The number of previous runs to show\* 190 | 191 | ### Ui 192 | 193 | `cardamon ui [port]` 194 | 195 | Start the UI server. 196 | 197 | **_Options_** 198 | 199 | - **\*port**: The port to listen on\* 200 | 201 | ## FAQ 202 | 203 | ### Can I use Cardamon on my own project or at my work? 204 | 205 | > Cardamon is released under the PolyForm Shield License 1.0. Anyone can use Cardamon in-house to build their own software, including commercially. 206 | > If you want to use Cardamon to offer a competing service to Root & Branch (e.g. instrument another company's software) then you will need permission, please get in touch. We have lots of green software industry friends who are able to use Cardamon. 207 | 208 | ### I'd like to use Cardamon to measure the power consumption of my software, but I don't know how 209 | 210 | > We're a friendly bunch! Feel free to create an issue in github (make sure to give the `help` label) and we will help in anyway we can. Alternatively email us at 211 | 212 | ### How can I contribute? 213 | 214 | > There are many ways you can contribute to the project. 215 | > 216 | > - Help us improve the documentation. 217 | > - Translate the docs into other languages. 218 | > - Create example projects to show others how to use Cardamon in their projects. 219 | > - Checkout the issues board on github, there's always features and fixes that need implementing. 220 | > - Spread the word! Tell others about the project and encourage them to use it. 221 | 222 | # License 223 | 224 | Cardamon is distributed under the terms of the PolyForm Shield License (Version 1.0). 225 | 226 | See [LICENSE](https://polyformproject.org/licenses/shield/1.0.0/) for details. 227 | 228 | _Copyright © 2023 Root & Branch ltd_ 229 | -------------------------------------------------------------------------------- /cardamon.bk.toml: -------------------------------------------------------------------------------- 1 | # CPU 2 | # ######################################## 3 | [cpu] 4 | name = "AMD Ryzen 7 PRO 6850U with Radeon Graphics" 5 | curve = [ 6 | 7.627190097500079, 7 | 0.07551567953624883, 8 | 20.45110313049153, 9 | -1.5261422759740344, 10 | ] 11 | 12 | # Processes 13 | # ######################################## 14 | [[process]] 15 | name = "rand-api" 16 | up = "./rand-api" 17 | down = "kill {pid}" 18 | redirect.to = "file" 19 | process.type = "baremetal" 20 | 21 | [[process]] 22 | name = "rand-api-docker" 23 | up = "docker run -d --name c1 -p 4244:4243 rand-api" 24 | down = "bash -c 'docker stop c1 && docker rm c1'" 25 | redirect.to = "file" 26 | process.type = "docker" 27 | process.containers = ["c1"] 28 | 29 | # Scenarios 30 | # ######################################## 31 | [[scenario]] 32 | name = "stress" 33 | desc = "" 34 | command = "sh scenario.sh" 35 | iterations = 2 36 | processes = ["rand-api", "rand-api-docker"] 37 | 38 | [[scenario]] 39 | name = "stress_metal" 40 | desc = "" 41 | command = "sh scenario.sh" 42 | iterations = 2 43 | processes = ["rand-api"] 44 | 45 | [[scenario]] 46 | name = "stress_docker" 47 | desc = "" 48 | command = "sh scenario.sh" 49 | iterations = 2 50 | processes = ["rand-api-docker"] 51 | 52 | # Observations 53 | # ######################################## 54 | [[observation]] 55 | name = "stress" 56 | scenarios = ["stress"] 57 | 58 | [[observation]] 59 | name = "stress_metal" 60 | scenarios = ["stress_metal"] 61 | 62 | [[observation]] 63 | name = "stress_docker" 64 | scenarios = ["stress_docker"] 65 | 66 | [[observation]] 67 | name = "live_monitor" 68 | processes = ["test_proc1", "test_proc2"] 69 | -------------------------------------------------------------------------------- /cardamon.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 PRO 6850U with Radeon Graphics" 3 | curve = [7.627190097500079,0.07551567953624883,20.45110313049153,-1.5261422759740344] 4 | 5 | # Processes 6 | # --------- 7 | # This array of tables describes the components of your application that you 8 | # would like cardamon to measure. 9 | # 10 | # processes contain the following properties: 11 | # name: 12 | # type - string 13 | # desc - must be unique 14 | # required - true 15 | # 16 | # up: 17 | # type - string 18 | # desc - command to execute the processes 19 | # required - true 20 | # 21 | # down: 22 | # type - string 23 | # desc - command to stop the process. In the case of bare-metal processes 24 | # cardamon will pass the PID of the process to this command. You can 25 | # use `{pid}` as a placeholder in the command e.g. `kill {pid}`. 26 | # default: empty string 27 | # required - false 28 | # 29 | # proccess.type: 30 | # type - "baremetal" | "docker" 31 | # desc - the type of process which is being executed 32 | # required - true 33 | # 34 | # process.containers: 35 | # type - string[] 36 | # desc - docker processes may initiate multiple containers from a single 37 | # command, e.g. `docker compose up -d`. This is the list of 38 | # containers started by this process you would like cardamon to 39 | # measure. 40 | # required - true (if `process.type` is "docker") 41 | # 42 | # redirect.to: 43 | # type - "null" | "parent" | "file" 44 | # desc - where to redirect this processes stdout and stderr. "null" ignores 45 | # output, "parent" attaches the processes output to cardamon, "file" 46 | # writes stdout and stderr to a file of the same name as this 47 | # process e.g. db.stdout. 48 | # default: "file" 49 | # required - false 50 | # 51 | # EXAMPLE 52 | # ------- 53 | # [[process]] 54 | # name = "db" 55 | # up = "docker compose up -d" 56 | # down = "docker compose down" 57 | # redirect.to = "file" 58 | # process.type = "docker" 59 | # process.containers = ["postgres"] 60 | 61 | [[process]] 62 | name = "test_proc" 63 | up = "bash -c \"while true; do shuf -i 0-1337 -n 1; done\"" 64 | down = "kill {pid}" 65 | redirect.to = "file" 66 | process.type = "baremetal" 67 | 68 | # Scenarios 69 | # --------- 70 | # This array of tables describes the scenarios that cardamon can run. They can 71 | # be any kind of executable and are designed to place your application under 72 | # consistent load each time they are run. Examples include bash scripts which 73 | # `curl` a REST endpoint or nodejs scripts using playwright.js to control a 74 | # webpage. 75 | # 76 | # scenarios contain the following properties: 77 | # name: 78 | # type - string 79 | # desc - must be unique 80 | # required - true 81 | # 82 | # desc: 83 | # type - string 84 | # desc - a short description of the scenario to remind you what it does 85 | # required - false 86 | # 87 | # command: 88 | # type - string 89 | # desc - the command to execute this scenario 90 | # required - true 91 | # 92 | # iterations: 93 | # type - integer 94 | # desc - the number of times cardamon should execute this scenario per run. 95 | # It's better to run scenarios multiple times and take an average. 96 | # default - 1 97 | # required - false 98 | # 99 | # processes: 100 | # type - string[] 101 | # desc - a list of the processes which need to be started before executing 102 | # this scenario. 103 | # required - true 104 | # 105 | [[scenario]] 106 | name = "sleep" 107 | desc = "Sleeps for 10 seconds, a real scenario would call your app" 108 | command = "sleep 10" 109 | iterations = 2 110 | processes = ["test_proc"] 111 | 112 | 113 | # Observations 114 | # --------------- 115 | # This array of tables allows you to group scenarios together to make it 116 | # easier to execute multiple scenarios in a single run. 117 | # 118 | # obserations contain the following properties: 119 | # name: 120 | # type - string 121 | # desc - a unique name 122 | # required - true 123 | # 124 | # observe.scenarios: 125 | # type - string[] 126 | # desc - a list of scenarios to execute whilst observing the processes 127 | # required to run all scenarios 128 | # required - required if observe.processes is not defined 129 | # 130 | # observe.processes: 131 | # type - string[] 132 | # desc - a list of processes to execute and observe. Running an observation 133 | # with this property set runs Cardamon in Live mode. 134 | # required - required if observe.scenarios is not defined. 135 | # 136 | [[observation]] 137 | name = "test_obs" 138 | scenarios = ["sleep"] -------------------------------------------------------------------------------- /examples/docker-unix/README.md: -------------------------------------------------------------------------------- 1 | ### Warning 2 | 3 | Having two services in a docker compose and running both ( seperately) causes a network name conflict 4 | -------------------------------------------------------------------------------- /examples/docker-unix/cardamon.unix.toml: -------------------------------------------------------------------------------- 1 | 2 | 3 | [[processes]] 4 | name = "db" 5 | up = "bash -c '(cd examples/docker-unix/db && docker compose up -d db)'" 6 | down = "bash -c '(cd examples/docker-unix/db && docker compose down -v )'" 7 | redirect.to = "parent" 8 | process.type = "docker" 9 | process.containers = ["db"] 10 | [[processes]] 11 | name = "test" 12 | up = "bash -c '(cd examples/docker-unix/test && docker compose up -d test)'" 13 | down = "bash -c '(cd examples/docker-unix/test && docker compose down -v)'" 14 | redirect.to = "parent" 15 | process.type = "docker" 16 | process.containers = ["test"] 17 | 18 | [[scenarios]] 19 | name = "basket_10" 20 | desc = "Adds ten items to the basket" 21 | command = "sleep 15" 22 | iterations = 2 23 | processes = ["test", "db"] 24 | 25 | [[observations]] 26 | name = "obs_1" 27 | scenarios = ["basket_10"] 28 | -------------------------------------------------------------------------------- /examples/docker-unix/db/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | db: 3 | image: postgres:13 4 | environment: 5 | POSTGRES_PASSWORD: example 6 | container_name: db 7 | -------------------------------------------------------------------------------- /examples/docker-unix/test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | test: 3 | image: progrium/stress 4 | command: --cpu 1 --timeout 300s 5 | deploy: 6 | resources: 7 | limits: 8 | cpus: '1' 9 | -------------------------------------------------------------------------------- /examples/tod/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.22.5-alpine 2 | 3 | WORKDIR /app 4 | 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | 8 | COPY . . 9 | 10 | RUN go build -o main . 11 | 12 | EXPOSE 8080 13 | 14 | CMD ["./main"] 15 | -------------------------------------------------------------------------------- /examples/tod/cardamon.toml: -------------------------------------------------------------------------------- 1 | [computer] 2 | cpu_name = "AMD Ryzen 7 7840U w/ Radeon 780M Graphics (16) @ 5.132GHz" 3 | cpu_avg_power= 28.0 4 | 5 | [[processes]] 6 | name = "backend" 7 | up = "docker compose up --build" 8 | down = "docker compose down" 9 | redirect.to = "parent" 10 | process.type = "docker" 11 | process.containers = ["redis","todo-app"] 12 | 13 | [[scenarios]] 14 | name = "rust_test" # Required 15 | desc = "Test each endpoint 100 times" # Optional 16 | command = "bash -c '(cd testing && cargo run)'" # Required - commands for running scenarios 17 | iterations = 1 # Optional - defaults to 1 18 | processes = ["backend"] # Required - prepend process name with `_` to ignore 19 | 20 | [[observations]] 21 | name = "todo" # Required 22 | scenarios = ["rust_test"] # Required 23 | -------------------------------------------------------------------------------- /examples/tod/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | redis: 3 | image: redis:latest 4 | container_name: my-redis 5 | ports: 6 | - "6379:6379" 7 | command: redis-server --appendonly no 8 | networks: 9 | - app-network 10 | 11 | todo-app: 12 | build: . 13 | container_name: todo-app 14 | ports: 15 | - "8080:8080" 16 | depends_on: 17 | - redis 18 | networks: 19 | - app-network 20 | 21 | networks: 22 | app-network: 23 | driver: bridge 24 | -------------------------------------------------------------------------------- /examples/tod/go.mod: -------------------------------------------------------------------------------- 1 | module tod 2 | 3 | go 1.22.5 4 | 5 | require ( 6 | github.com/go-redis/redis/v8 v8.11.5 7 | github.com/gorilla/mux v1.8.1 8 | ) 9 | 10 | require ( 11 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 12 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 13 | ) 14 | -------------------------------------------------------------------------------- /examples/tod/go.sum: -------------------------------------------------------------------------------- 1 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= 2 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 3 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 4 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 5 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 6 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 7 | github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= 8 | github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= 9 | github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= 10 | github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= 11 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= 12 | github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 13 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= 14 | github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= 15 | github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= 16 | github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= 17 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= 18 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= 19 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= 20 | golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 21 | golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= 22 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 23 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 24 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 25 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 26 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 27 | -------------------------------------------------------------------------------- /examples/tod/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/go-redis/redis/v8" 9 | "github.com/gorilla/mux" 10 | ) 11 | 12 | var redisClient *redis.Client 13 | 14 | type Note struct { 15 | ID string `json:"id"` 16 | Text string `json:"text"` 17 | } 18 | 19 | func main() { 20 | // Initialize Redis client 21 | redisClient = redis.NewClient(&redis.Options{ 22 | Addr: "redis:6379", // Using the service name from docker-compose 23 | DB: 0, 24 | }) 25 | 26 | // Initialize router 27 | r := mux.NewRouter() 28 | 29 | // Define routes 30 | r.HandleFunc("/notes", getNotes).Methods("GET") 31 | r.HandleFunc("/notes/{id}", getNote).Methods("GET") 32 | r.HandleFunc("/notes", setNote).Methods("POST") 33 | r.HandleFunc("/notes/{id}", deleteNote).Methods("DELETE") 34 | 35 | // Start server 36 | log.Println("Server is running on port 8080") 37 | log.Fatal(http.ListenAndServe(":8080", r)) 38 | } 39 | 40 | func getNotes(w http.ResponseWriter, r *http.Request) { 41 | ctx := r.Context() 42 | keys, err := redisClient.Keys(ctx, "*").Result() 43 | if err != nil { 44 | http.Error(w, err.Error(), http.StatusInternalServerError) 45 | return 46 | } 47 | 48 | notes := []Note{} 49 | for _, key := range keys { 50 | val, err := redisClient.Get(ctx, key).Result() 51 | if err != nil { 52 | http.Error(w, err.Error(), http.StatusInternalServerError) 53 | return 54 | } 55 | notes = append(notes, Note{ID: key, Text: val}) 56 | } 57 | 58 | json.NewEncoder(w).Encode(notes) 59 | } 60 | 61 | func getNote(w http.ResponseWriter, r *http.Request) { 62 | ctx := r.Context() 63 | vars := mux.Vars(r) 64 | id := vars["id"] 65 | 66 | val, err := redisClient.Get(ctx, id).Result() 67 | if err == redis.Nil { 68 | http.Error(w, "Note not found", http.StatusNotFound) 69 | return 70 | } else if err != nil { 71 | http.Error(w, err.Error(), http.StatusInternalServerError) 72 | return 73 | } 74 | 75 | json.NewEncoder(w).Encode(Note{ID: id, Text: val}) 76 | } 77 | 78 | func setNote(w http.ResponseWriter, r *http.Request) { 79 | ctx := r.Context() 80 | var note Note 81 | err := json.NewDecoder(r.Body).Decode(¬e) 82 | if err != nil { 83 | http.Error(w, err.Error(), http.StatusBadRequest) 84 | return 85 | } 86 | 87 | err = redisClient.Set(ctx, note.ID, note.Text, 0).Err() 88 | if err != nil { 89 | http.Error(w, err.Error(), http.StatusInternalServerError) 90 | return 91 | } 92 | 93 | w.WriteHeader(http.StatusCreated) 94 | json.NewEncoder(w).Encode(note) 95 | } 96 | 97 | func deleteNote(w http.ResponseWriter, r *http.Request) { 98 | ctx := r.Context() 99 | vars := mux.Vars(r) 100 | id := vars["id"] 101 | 102 | _, err := redisClient.Del(ctx, id).Result() 103 | if err != nil { 104 | http.Error(w, err.Error(), http.StatusInternalServerError) 105 | return 106 | } 107 | 108 | w.WriteHeader(http.StatusNoContent) 109 | } 110 | -------------------------------------------------------------------------------- /examples/tod/testing/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "testing" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | reqwest = {version = "0.12.5", features = ["json"] } 8 | serde_json = "1.0.124" 9 | tokio = { version = "1.37.0", features = ["full"] } 10 | 11 | -------------------------------------------------------------------------------- /examples/tod/testing/src/main.rs: -------------------------------------------------------------------------------- 1 | use core::time; 2 | use reqwest::Client; 3 | use serde_json::json; 4 | use std::{thread, time::Instant}; 5 | use tokio; 6 | 7 | const BASE_URL: &str = "http://localhost:8080"; 8 | const NUM_REQUESTS: usize = 1000; 9 | 10 | #[tokio::main] 11 | async fn main() -> Result<(), Box> { 12 | let client = Client::new(); 13 | 14 | println!("Starting API endpoint tests..."); 15 | 16 | // Test POST /notes 17 | let post_start = Instant::now(); 18 | for i in 0..NUM_REQUESTS { 19 | let response = client 20 | .post(format!("{}/notes", BASE_URL)) 21 | .json(&json!({ 22 | "id": format!("test{}", i), 23 | "text": format!("Test note {}", i) 24 | })) 25 | .send() 26 | .await; 27 | match response { 28 | Ok(res) => { 29 | if res.status().is_success() { 30 | print!(".") 31 | } else { 32 | print!("x") 33 | } 34 | } 35 | Err(..) => thread::sleep(time::Duration::from_secs(1)), 36 | } 37 | } 38 | println!("\nPOST /notes: {:?}", post_start.elapsed()); 39 | 40 | // Test GET /notes 41 | let get_all_start = Instant::now(); 42 | for _ in 0..NUM_REQUESTS { 43 | let response = client.get(format!("{}/notes", BASE_URL)).send().await?; 44 | 45 | if response.status().is_success() { 46 | print!("."); 47 | } else { 48 | print!("x"); 49 | } 50 | } 51 | println!("\nGET /notes: {:?}", get_all_start.elapsed()); 52 | 53 | // Test GET /notes/{id} 54 | let get_one_start = Instant::now(); 55 | for i in 0..NUM_REQUESTS { 56 | let response = client 57 | .get(format!("{}/notes/test{}", BASE_URL, i % 100)) 58 | .send() 59 | .await?; 60 | 61 | if response.status().is_success() { 62 | print!("."); 63 | } else { 64 | print!("x"); 65 | } 66 | } 67 | println!("\nGET /notes/{{id}}: {:?}", get_one_start.elapsed()); 68 | 69 | // Test DELETE /notes/{id} 70 | let delete_start = Instant::now(); 71 | for i in 0..NUM_REQUESTS { 72 | let response = client 73 | .delete(format!("{}/notes/test{}", BASE_URL, i % 100)) 74 | .send() 75 | .await?; 76 | 77 | if response.status().is_success() { 78 | print!("."); 79 | } else { 80 | print!("x"); 81 | } 82 | } 83 | println!("\nDELETE /notes/{{id}}: {:?}", delete_start.elapsed()); 84 | 85 | println!("API endpoint tests completed."); 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /fixtures/cardamon.missing_process.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 Pro 6850U" 3 | tdp = 15 4 | 5 | [[process]] 6 | name = "db" 7 | up = "powershell sleep 5" # "docker compose up -d" 8 | process.type = "docker" 9 | process.containers = ["postgres"] 10 | 11 | [[process]] 12 | name = "server" 13 | up = "powershell sleep 5" # "yarn dev" 14 | process.type = "baremetal" 15 | 16 | [[scenario]] 17 | name = "basket_10" 18 | desc = "Adds ten items to the basket" 19 | command = "node ./scenarios/basket_10.js" 20 | iterations = 1 21 | processes = ["missing", "server"] 22 | 23 | [[observation]] 24 | name = "checkout" 25 | scenarios = ["basket_10"] 26 | 27 | [[observation]] 28 | name = "live_monitor" 29 | processes = ["missing", "server"] 30 | -------------------------------------------------------------------------------- /fixtures/cardamon.missing_scenario.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 Pro 6850U" 3 | tdp = 15 4 | 5 | [[process]] 6 | name = "db" 7 | up = "powershell sleep 5" # "docker compose up -d" 8 | process.type = "docker" 9 | process.containers = ["postgres"] 10 | 11 | [[process]] 12 | name = "server" 13 | up = "powershell sleep 5" # "yarn dev" 14 | process.type = "baremetal" 15 | 16 | [[scenario]] 17 | name = "basket_10" 18 | desc = "Adds ten items to the basket" 19 | command = "node ./scenarios/basket_10.js" 20 | iterations = 1 21 | processes = ["db", "server"] 22 | 23 | [[observation]] 24 | name = "checkout" 25 | scenarios = ["missing"] 26 | -------------------------------------------------------------------------------- /fixtures/cardamon.multiple_iterations.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 Pro 6850U" 3 | tdp = 15 4 | 5 | [[process]] 6 | name = "db" 7 | up = "powershell sleep 5" # "docker compose up -d" 8 | process.type = "docker" 9 | process.containers = ["postgres"] 10 | 11 | [[process]] 12 | name = "server" 13 | up = "powershell sleep 5" # "yarn dev" 14 | process.type = "baremetal" 15 | 16 | [[scenario]] 17 | name = "basket_10" 18 | desc = "Adds ten items to the basket" 19 | command = "node ./scenarios/basket_10.js" 20 | iterations = 2 21 | processes = ["db", "server"] 22 | 23 | [[observation]] 24 | name = "checkout" 25 | scenarios = ["basket_10"] 26 | -------------------------------------------------------------------------------- /fixtures/cardamon.multiple_scenarios.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 Pro 6850U" 3 | tdp = 15 4 | 5 | [[process]] 6 | name = "db" 7 | up = "powershell sleep 5" # "docker compose up -d" 8 | process.type = "docker" 9 | process.containers = ["postgres"] 10 | 11 | [[process]] 12 | name = "server" 13 | up = "powershell sleep 5" # "yarn dev" 14 | process.type = "baremetal" 15 | 16 | [[process]] 17 | name = "mailgun" 18 | up = "powershell sleep 5" # docker compose -f docker-compose.mailgun.yml up -d 19 | process.type = "docker" 20 | process.containers = ["mailgun"] 21 | 22 | [[scenario]] 23 | name = "basket_10" 24 | desc = "Adds ten items to the basket" 25 | command = "node ./scenarios/basket_10.js" 26 | iterations = 1 27 | processes = ["db", "server"] 28 | 29 | [[scenario]] 30 | name = "user_signup" 31 | desc = "signs up 10 users" 32 | command = "node ./scenarios/user_signup.js" 33 | iterations = 1 34 | processes = ["db", "server", "mailgun"] 35 | 36 | [[observation]] 37 | name = "checkout" 38 | scenarios = ["basket_10", "user_signup"] 39 | 40 | [[observation]] 41 | name = "live_monitor" 42 | processes = ["db", "server", "mailgun"] 43 | -------------------------------------------------------------------------------- /fixtures/cardamon.success.toml: -------------------------------------------------------------------------------- 1 | [cpu] 2 | name = "AMD Ryzen 7 Pro 6850U" 3 | tdp = 15 4 | 5 | [[process]] 6 | name = "db" 7 | up = "powershell sleep 5" # "docker compose up -d" 8 | process.type = "docker" 9 | process.containers = ["postgres"] 10 | 11 | [[process]] 12 | name = "server" 13 | up = "powershell sleep 5" # "yarn dev" 14 | process.type = "baremetal" 15 | 16 | [[scenario]] 17 | name = "basket_10" 18 | desc = "Adds ten items to the basket" 19 | command = "node ./scenarios/basket_10.js" 20 | iterations = 1 21 | processes = ["db", "server"] 22 | 23 | [[observation]] 24 | name = "checkout" 25 | scenarios = ["basket_10"] 26 | -------------------------------------------------------------------------------- /fixtures/cpus.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM cpu; 2 | 3 | INSERT INTO cpu (id, name, tdp, power_curve_id) VALUES 4 | (1, 'AMD Ryzen 7 PRO 6850U', null, 1), 5 | (2, 'AMD Ryzen 7 PRO 6850U', 15, null); 6 | 7 | -------------------------------------------------------------------------------- /fixtures/iterations.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM iteration; 2 | 3 | INSERT INTO iteration ( 4 | run_id, scenario_name, count, start_time, stop_time 5 | ) 6 | VALUES 7 | (1, 'scenario_1', 1, 1717507590000, 1717507591000), 8 | (1, 'scenario_2', 1, 1717507592000, 1717507593000), 9 | (1, 'scenario_2', 2, 1717507594000, 1717507595000), 10 | (1, 'scenario_3', 1, 1717507596000, 1717507597000), 11 | (1, 'scenario_3', 2, 1717507598000, 1717507599000), 12 | (1, 'scenario_3', 3, 1717507600000, 1717507601000), 13 | 14 | (2, 'scenario_2', 1, 1717507690000, 1717507691000), 15 | (2, 'scenario_2', 2, 1717507692000, 1717507693000), 16 | (2, 'scenario_3', 1, 1717507694000, 1717507695000), 17 | (2, 'scenario_3', 2, 1717507696000, 1717507697000), 18 | (2, 'scenario_3', 3, 1717507698000, 1717507699000), 19 | 20 | (3, 'scenario_3', 1, 1717507790000, 1717507791000), 21 | (3, 'scenario_3', 2, 1717507792000, 1717507793000), 22 | (3, 'scenario_3', 3, 1717507794000, 1717507795000); 23 | -------------------------------------------------------------------------------- /fixtures/metrics.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM metrics; 2 | 3 | INSERT INTO metrics ( 4 | run_id, 5 | process_id, 6 | process_name, 7 | cpu_usage, 8 | cpu_total_usage, 9 | cpu_core_count, 10 | time_stamp 11 | ) 12 | VALUES 13 | 14 | -- run_1, scenario_1, it 1 15 | (1, '1337', 'yarn', 10, 100, 4, 1717507590000), 16 | (1, '1337', 'yarn', 10, 100, 4, 1717507590200), 17 | (1, '1337', 'yarn', 10, 100, 4, 1717507590400), 18 | (1, '1337', 'yarn', 10, 100, 4, 1717507590600), 19 | (1, '1337', 'yarn', 10, 100, 4, 1717507590800), 20 | (1, '1338', 'docker', 20, 100, 4, 1717507590000), 21 | (1, '1338', 'docker', 20, 100, 4, 1717507590200), 22 | (1, '1338', 'docker', 20, 100, 4, 1717507590400), 23 | (1, '1338', 'docker', 20, 100, 4, 1717507590600), 24 | (1, '1338', 'docker', 20, 100, 4, 1717507590800), 25 | 26 | -- run_1, scenario_2, it 1 27 | (1, '1337', 'yarn', 10, 100, 4, 1717507592000), 28 | (1, '1337', 'yarn', 12, 100, 4, 1717507592200), 29 | (1, '1337', 'yarn', 14, 100, 4, 1717507592400), 30 | (1, '1337', 'yarn', 16, 100, 4, 1717507592600), 31 | (1, '1337', 'yarn', 18, 100, 4, 1717507592800), 32 | (1, '1338', 'docker', 20, 100, 4, 1717507592000), 33 | (1, '1338', 'docker', 20, 100, 4, 1717507592200), 34 | (1, '1338', 'docker', 20, 100, 4, 1717507592400), 35 | (1, '1338', 'docker', 20, 100, 4, 1717507592600), 36 | (1, '1338', 'docker', 20, 100, 4, 1717507592800), 37 | 38 | -- run_1, scenario_2, it 2 39 | (1, '1337', 'yarn', 11, 100, 4, 1717507594000), 40 | (1, '1337', 'yarn', 13, 100, 4, 1717507594200), 41 | (1, '1337', 'yarn', 15, 100, 4, 1717507594400), 42 | (1, '1337', 'yarn', 17, 100, 4, 1717507594600), 43 | (1, '1337', 'yarn', 19, 100, 4, 1717507594800), 44 | (1, '1338', 'docker', 25, 100, 4, 1717507594000), 45 | (1, '1338', 'docker', 25, 100, 4, 1717507594200), 46 | (1, '1338', 'docker', 25, 100, 4, 1717507594400), 47 | (1, '1338', 'docker', 25, 100, 4, 1717507594600), 48 | (1, '1338', 'docker', 25, 100, 4, 1717507594800), 49 | 50 | -- run_1, scenario_3, it 1 51 | (1, '1337', 'yarn', 65, 100, 4, 1717507596000), 52 | (1, '1337', 'yarn', 98, 100, 4, 1717507596200), 53 | (1, '1337', 'yarn', 87, 100, 4, 1717507596400), 54 | (1, '1337', 'yarn', 96, 100, 4, 1717507596600), 55 | (1, '1337', 'yarn', 68, 100, 4, 1717507596800), 56 | (1, '1338', 'docker', 65, 100, 4, 1717507596000), 57 | (1, '1338', 'docker', 98, 100, 4, 1717507596200), 58 | (1, '1338', 'docker', 87, 100, 4, 1717507596400), 59 | (1, '1338', 'docker', 96, 100, 4, 1717507596600), 60 | (1, '1338', 'docker', 68, 100, 4, 1717507596800), 61 | 62 | -- run_1, scenario_3, it 2 63 | (1, '1337', 'yarn', 65, 100, 4, 1717507598000), 64 | (1, '1337', 'yarn', 98, 100, 4, 1717507598200), 65 | (1, '1337', 'yarn', 87, 100, 4, 1717507598400), 66 | (1, '1337', 'yarn', 96, 100, 4, 1717507598600), 67 | (1, '1337', 'yarn', 68, 100, 4, 1717507598800), 68 | (1, '1338', 'docker', 65, 100, 4, 1717507598000), 69 | (1, '1338', 'docker', 98, 100, 4, 1717507598200), 70 | (1, '1338', 'docker', 87, 100, 4, 1717507598400), 71 | (1, '1338', 'docker', 96, 100, 4, 1717507598600), 72 | (1, '1338', 'docker', 68, 100, 4, 1717507598800), 73 | 74 | -- run_1, scenario_3, it 3 75 | (1, '1337', 'yarn', 65, 100, 4, 1717507600000), 76 | (1, '1337', 'yarn', 98, 100, 4, 1717507600200), 77 | (1, '1337', 'yarn', 87, 100, 4, 1717507600400), 78 | (1, '1337', 'yarn', 96, 100, 4, 1717507600600), 79 | (1, '1337', 'yarn', 68, 100, 4, 1717507600800), 80 | (1, '1338', 'docker', 65, 100, 4, 1717507600000), 81 | (1, '1338', 'docker', 98, 100, 4, 1717507600200), 82 | (1, '1338', 'docker', 87, 100, 4, 1717507600400), 83 | (1, '1338', 'docker', 96, 100, 4, 1717507600600), 84 | (1, '1338', 'docker', 68, 100, 4, 1717507600800), 85 | 86 | 87 | 88 | -- run_2, scenario_2, it 1 89 | (2, '1337', 'yarn', 65, 100, 4, 1717507690000), 90 | (2, '1337', 'yarn', 98, 100, 4, 1717507690200), 91 | (2, '1337', 'yarn', 87, 100, 4, 1717507690400), 92 | (2, '1337', 'yarn', 96, 100, 4, 1717507690600), 93 | (2, '1337', 'yarn', 68, 100, 4, 1717507690800), 94 | (2, '1338', 'docker', 65, 100, 4, 1717507690000), 95 | (2, '1338', 'docker', 98, 100, 4, 1717507690200), 96 | (2, '1338', 'docker', 87, 100, 4, 1717507690400), 97 | (2, '1338', 'docker', 96, 100, 4, 1717507690600), 98 | (2, '1338', 'docker', 68, 100, 4, 1717507690800), 99 | 100 | -- run_2, scenario_2, it 2 101 | (2, '1337', 'yarn', 65, 100, 4, 1717507692000), 102 | (2, '1337', 'yarn', 98, 100, 4, 1717507692200), 103 | (2, '1337', 'yarn', 87, 100, 4, 1717507692400), 104 | (2, '1337', 'yarn', 96, 100, 4, 1717507692600), 105 | (2, '1337', 'yarn', 68, 100, 4, 1717507692800), 106 | (2, '1338', 'docker', 65, 100, 4, 1717507692000), 107 | (2, '1338', 'docker', 98, 100, 4, 1717507692200), 108 | (2, '1338', 'docker', 87, 100, 4, 1717507692400), 109 | (2, '1338', 'docker', 96, 100, 4, 1717507692600), 110 | (2, '1338', 'docker', 68, 100, 4, 1717507692800), 111 | 112 | -- run_2, scenario_3, it 1 113 | (2, '1337', 'yarn', 65, 100, 4, 1717507694000), 114 | (2, '1337', 'yarn', 98, 100, 4, 1717507694200), 115 | (2, '1337', 'yarn', 87, 100, 4, 1717507694400), 116 | (2, '1337', 'yarn', 96, 100, 4, 1717507694600), 117 | (2, '1337', 'yarn', 68, 100, 4, 1717507694800), 118 | (2, '1338', 'docker', 65, 100, 4, 1717507694000), 119 | (2, '1338', 'docker', 98, 100, 4, 1717507694200), 120 | (2, '1338', 'docker', 87, 100, 4, 1717507694400), 121 | (2, '1338', 'docker', 96, 100, 4, 1717507694600), 122 | (2, '1338', 'docker', 68, 100, 4, 1717507694800), 123 | 124 | -- run_2, scenario_3, it 2 125 | (2, '1337', 'yarn', 65, 100, 4, 1717507696000), 126 | (2, '1337', 'yarn', 98, 100, 4, 1717507696200), 127 | (2, '1337', 'yarn', 87, 100, 4, 1717507696400), 128 | (2, '1337', 'yarn', 96, 100, 4, 1717507696600), 129 | (2, '1337', 'yarn', 68, 100, 4, 1717507696800), 130 | (2, '1338', 'docker', 65, 100, 4, 1717507696000), 131 | (2, '1338', 'docker', 98, 100, 4, 1717507696200), 132 | (2, '1338', 'docker', 87, 100, 4, 1717507696400), 133 | (2, '1338', 'docker', 96, 100, 4, 1717507696600), 134 | (2, '1338', 'docker', 68, 100, 4, 1717507696800), 135 | 136 | -- run_2, scenario_3, it 3 137 | (2, '1337', 'yarn', 65, 100, 4, 1717507698000), 138 | (2, '1337', 'yarn', 98, 100, 4, 1717507698200), 139 | (2, '1337', 'yarn', 87, 100, 4, 1717507698400), 140 | (2, '1337', 'yarn', 96, 100, 4, 1717507698600), 141 | (2, '1337', 'yarn', 68, 100, 4, 1717507698800), 142 | (2, '1338', 'docker', 65, 100, 4, 1717507698000), 143 | (2, '1338', 'docker', 98, 100, 4, 1717507698200), 144 | (2, '1338', 'docker', 87, 100, 4, 1717507698400), 145 | (2, '1338', 'docker', 96, 100, 4, 1717507698600), 146 | (2, '1338', 'docker', 68, 100, 4, 1717507698800), 147 | 148 | 149 | 150 | -- run_3, scenario_3, it 1 151 | (3, '1337', 'yarn', 65, 100, 4, 1717507790000), 152 | (3, '1337', 'yarn', 98, 100, 4, 1717507790200), 153 | (3, '1337', 'yarn', 87, 100, 4, 1717507790400), 154 | (3, '1337', 'yarn', 96, 100, 4, 1717507790600), 155 | (3, '1337', 'yarn', 68, 100, 4, 1717507790800), 156 | (3, '1338', 'docker', 65, 100, 4, 1717507790000), 157 | (3, '1338', 'docker', 98, 100, 4, 1717507790200), 158 | (3, '1338', 'docker', 87, 100, 4, 1717507790400), 159 | (3, '1338', 'docker', 96, 100, 4, 1717507790600), 160 | (3, '1338', 'docker', 68, 100, 4, 1717507790800), 161 | 162 | -- run_3, scenario_3, it 2 163 | (3, '1337', 'yarn', 65, 100, 4, 1717507792000), 164 | (3, '1337', 'yarn', 98, 100, 4, 1717507792200), 165 | (3, '1337', 'yarn', 87, 100, 4, 1717507792400), 166 | (3, '1337', 'yarn', 96, 100, 4, 1717507792600), 167 | (3, '1337', 'yarn', 68, 100, 4, 1717507792800), 168 | (3, '1338', 'docker', 65, 100, 4, 1717507792000), 169 | (3, '1338', 'docker', 98, 100, 4, 1717507792200), 170 | (3, '1338', 'docker', 87, 100, 4, 1717507792400), 171 | (3, '1338', 'docker', 96, 100, 4, 1717507792600), 172 | (3, '1338', 'docker', 68, 100, 4, 1717507792800), 173 | 174 | -- run_3, scenario_3, it 3 175 | (3, '1337', 'yarn', 65, 100, 4, 1717507794000), 176 | (3, '1337', 'yarn', 98, 100, 4, 1717507794200), 177 | (3, '1337', 'yarn', 87, 100, 4, 1717507794400), 178 | (3, '1337', 'yarn', 96, 100, 4, 1717507794600), 179 | (3, '1337', 'yarn', 68, 100, 4, 1717507794800), 180 | (3, '1338', 'docker', 65, 100, 4, 1717507794000), 181 | (3, '1338', 'docker', 98, 100, 4, 1717507794200), 182 | (3, '1338', 'docker', 87, 100, 4, 1717507794400), 183 | (3, '1338', 'docker', 96, 100, 4, 1717507794600), 184 | (3, '1338', 'docker', 68, 100, 4, 1717507794800); 185 | -------------------------------------------------------------------------------- /fixtures/power_curves.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM power_curve; 2 | 3 | INSERT INTO power_curve (id, a, b, c, d) 4 | VALUES (1, 7.627190097500079, 0.07551567953624883, 20.45110313049153, -1.5261422759740344); 5 | -------------------------------------------------------------------------------- /fixtures/runs.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM run; 2 | 3 | INSERT INTO run (id, is_live, cpu_id, region, start_time, stop_time) 4 | VALUES 5 | (1, false, 1, 'GB', 1717507590000, 1717507601000), 6 | (2, false, 1, 'GB', 1717507690000, 1717507699000), 7 | (3, false, 1, 'GB', 1717507790000, 1717507795000); 8 | -------------------------------------------------------------------------------- /src/carbon_intensity.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use chrono::{DateTime, Datelike, Months, Utc}; 3 | use phf::phf_map; 4 | use serde_json::Value; 5 | 6 | pub const GLOBAL_CI: f64 = 0.494; // g/Wh 7 | 8 | static ISO_3166: phf::Map<&'static str, &'static str> = phf_map! { 9 | "AF" => "AFG", "AX" => "ALA", "AL" => "ALB", "DZ" => "DZA", "AS" => "ASM", "AD" => "AND", 10 | "AO" => "AGO", "AI" => "AIA", "AQ" => "ATA", "AG" => "ATG", "AR" => "ARG", "AM" => "ARM", 11 | "AW" => "ABW", "AU" => "AUS", "AT" => "AUT", "AZ" => "AZE", "BS" => "BHS", "BH" => "BHR", 12 | "BD" => "BGD", "BB" => "BRB", "BY" => "BLR", "BE" => "BEL", "BZ" => "BLZ", "BJ" => "BEN", 13 | "BM" => "BMU", "BT" => "BTN", "BO" => "BOL", "BQ" => "BES", "BA" => "BIH", "BW" => "BWA", 14 | "BV" => "BVT", "BR" => "BRA", "IO" => "IOT", "BN" => "BRN", "BG" => "BGR", "BF" => "BFA", 15 | "BI" => "BDI", "CV" => "CPV", "KH" => "KHM", "CM" => "CMR", "CA" => "CAN", "KY" => "CYM", 16 | "CF" => "CAF", "TD" => "TCD", "CL" => "CHL", "CN" => "CHN", "CX" => "CXR", "CC" => "CCK", 17 | "CO" => "COL", "KM" => "COM", "CG" => "COG", "CD" => "COD", "CK" => "COK", "CR" => "CRI", 18 | "CI" => "CIV", "HR" => "HRV", "CU" => "CUB", "CW" => "CUW", "CY" => "CYP", "CZ" => "CZE", 19 | "DK" => "DNK", "DJ" => "DJI", "DM" => "DMA", "DO" => "DOM", "EC" => "ECU", "EG" => "EGY", 20 | "SV" => "SLV", "GQ" => "GNQ", "ER" => "ERI", "EE" => "EST", "SZ" => "SWZ", "ET" => "ETH", 21 | "FK" => "FLK", "FO" => "FRO", "FJ" => "FJI", "FI" => "FIN", "FR" => "FRA", "GF" => "GUF", 22 | "PF" => "PYF", "TF" => "ATF", "GA" => "GAB", "GM" => "GMB", "GE" => "GEO", "DE" => "DEU", 23 | "GH" => "GHA", "GI" => "GIB", "GR" => "GRC", "GL" => "GRL", "GD" => "GRD", "GP" => "GLP", 24 | "GU" => "GUM", "GT" => "GTM", "GG" => "GGY", "GN" => "GIN", "GW" => "GNB", "GY" => "GUY", 25 | "HT" => "HTI", "HM" => "HMD", "VA" => "VAT", "HN" => "HND", "HK" => "HKG", "HU" => "HUN", 26 | "IS" => "ISL", "IN" => "IND", "ID" => "IDN", "IR" => "IRN", "IQ" => "IRQ", "IE" => "IRL", 27 | "IM" => "IMN", "IL" => "ISR", "IT" => "ITA", "JM" => "JAM", "JP" => "JPN", "JE" => "JEY", 28 | "JO" => "JOR", "KZ" => "KAZ", "KE" => "KEN", "KI" => "KIR", "KP" => "PRK", "KR" => "KOR", 29 | "KW" => "KWT", "KG" => "KGZ", "LA" => "LAO", "LV" => "LVA", "LB" => "LBN", "LS" => "LSO", 30 | "LR" => "LBR", "LY" => "LBY", "LI" => "LIE", "LT" => "LTU", "LU" => "LUX", "MO" => "MAC", 31 | "MG" => "MDG", "MW" => "MWI", "MY" => "MYS", "MV" => "MDV", "ML" => "MLI", "MT" => "MLT", 32 | "MH" => "MHL", "MQ" => "MTQ", "MR" => "MRT", "MU" => "MUS", "YT" => "MYT", "MX" => "MEX", 33 | "FM" => "FSM", "MD" => "MDA", "MC" => "MCO", "MN" => "MNG", "ME" => "MNE", "MS" => "MSR", 34 | "MA" => "MAR", "MZ" => "MOZ", "MM" => "MMR", "NA" => "NAM", "NR" => "NRU", "NP" => "NPL", 35 | "NL" => "NLD", "NC" => "NCL", "NZ" => "NZL", "NI" => "NIC", "NE" => "NER", "NG" => "NGA", 36 | "NU" => "NIU", "NF" => "NFK", "MK" => "MKD", "MP" => "MNP", "NO" => "NOR", "OM" => "OMN", 37 | "PK" => "PAK", "PW" => "PLW", "PS" => "PSE", "PA" => "PAN", "PG" => "PNG", "PY" => "PRY", 38 | "PE" => "PER", "PH" => "PHL", "PN" => "PCN", "PL" => "POL", "PT" => "PRT", "PR" => "PRI", 39 | "QA" => "QAT", "RE" => "REU", "RO" => "ROU", "RU" => "RUS", "RW" => "RWA", "BL" => "BLM", 40 | "SH" => "SHN", "KN" => "KNA", "LC" => "LCA", "MF" => "MAF", "PM" => "SPM", "VC" => "VCT", 41 | "WS" => "WSM", "SM" => "SMR", "ST" => "STP", "SA" => "SAU", "SN" => "SEN", "RS" => "SRB", 42 | "SC" => "SYC", "SL" => "SLE", "SG" => "SGP", "SX" => "SXM", "SK" => "SVK", "SI" => "SVN", 43 | "SB" => "SLB", "SO" => "SOM", "ZA" => "ZAF", "GS" => "SGS", "SS" => "SSD", "ES" => "ESP", 44 | "LK" => "LKA", "SD" => "SDN", "SR" => "SUR", "SJ" => "SJM", "SE" => "SWE", "CH" => "CHE", 45 | "SY" => "SYR", "TW" => "TWN", "TJ" => "TJK", "TZ" => "TZA", "TH" => "THA", "TL" => "TLS", 46 | "TG" => "TGO", "TK" => "TKL", "TO" => "TON", "TT" => "TTO", "TN" => "TUN", "TR" => "TUR", 47 | "TM" => "TKM", "TC" => "TCA", "TV" => "TUV", "UG" => "UGA", "UA" => "UKR", "AE" => "ARE", 48 | "GB" => "GBR", "US" => "USA", "UM" => "UMI", "UY" => "URY", "UZ" => "UZB", "VU" => "VUT", 49 | "VE" => "VEN", "VN" => "VNM", "VG" => "VGB", "VI" => "VIR", "WF" => "WLF", "EH" => "ESH", 50 | "YE" => "YEM", "ZM" => "ZMB", "ZW" => "ZWE", 51 | }; 52 | 53 | const EMBER_API_BASE_URL: &str = "https://api.ember-energy.org/v1/carbon-intensity"; 54 | const EMBER_KEY: &str = "c5e07f2c-5d07-4b99-a78e-661097d874e6"; 55 | 56 | pub fn valid_region_code(code: &str) -> bool { 57 | ISO_3166.get_key(code).is_some() 58 | } 59 | 60 | fn try_parse_region(json_obj: Value) -> Option { 61 | json_obj.get("country")?.as_str().map(|str| str.to_string()) 62 | } 63 | 64 | pub async fn fetch_region_code() -> anyhow::Result { 65 | let client = reqwest::Client::new(); 66 | 67 | let resp = client 68 | .get("https://api.country.is/") 69 | .header("Content-Type", "application/json") 70 | .send() 71 | .await?; 72 | 73 | let json_obj = resp.json().await?; 74 | try_parse_region(json_obj).context("Error fetching region from IP") 75 | } 76 | 77 | fn try_parse_ci(json_obj: &Value) -> Option { 78 | json_obj 79 | .get("stats")? 80 | .get("query_value_range")? 81 | .get("emissions_intensity_gco2_per_kwh")? 82 | .get("max")? 83 | .as_f64() 84 | .map(|ci| ci / 1000.0) // g/kWh -> g/Wh 85 | } 86 | 87 | /// Attempts to fetch carbon intensity for the given region from Ember. 88 | pub async fn fetch_ci(code: &str, date: &DateTime) -> anyhow::Result { 89 | let code = ISO_3166.get(code).context("Incorrect ISO 3166 code")?; 90 | 91 | let client = reqwest::Client::new(); 92 | 93 | let end = date; 94 | let start = end 95 | .checked_sub_months(Months::new(1)) 96 | .context("Error parsing month")?; 97 | 98 | let start_date = format!("{}-{}", start.year(), start.month()); 99 | let end_date = format!("{}-{}", end.year(), end.month()); 100 | 101 | let url = format!( 102 | "{}/monthly?entity_code={}&start_date={}&end_date={}&api_key={}", 103 | EMBER_API_BASE_URL, code, start_date, end_date, EMBER_KEY 104 | ); 105 | 106 | let resp = client 107 | .get(url) 108 | .header("Content-Type", "application/json") 109 | .send() 110 | .await?; 111 | 112 | let json_obj = resp.json().await?; 113 | try_parse_ci(&json_obj).context("Error parsing carbon intensity") 114 | } 115 | 116 | #[cfg(test)] 117 | mod tests { 118 | use super::*; 119 | 120 | #[tokio::test] 121 | async fn can_fetch_region_ci() -> anyhow::Result<()> { 122 | let now = Utc::now(); 123 | let ci = fetch_ci("GB", &now).await?; 124 | assert!(ci > 0.0); 125 | Ok(()) 126 | } 127 | 128 | #[tokio::test] 129 | async fn incorrect_region_should_cause_error() -> anyhow::Result<()> { 130 | let now = Utc::now(); 131 | let ci = fetch_ci("ZZ", &now).await; 132 | assert!(ci.is_err()); 133 | Ok(()) 134 | } 135 | 136 | #[tokio::test] 137 | async fn can_fetch_ip() -> anyhow::Result<()> { 138 | let region = fetch_region_code().await?; 139 | assert!(!region.is_empty()); 140 | 141 | let now = Utc::now(); 142 | let ci = fetch_ci(®ion, &now).await?; 143 | assert!(ci > 0.0); 144 | 145 | Ok(()) 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use itertools::Itertools; 3 | use serde::{Deserialize, Serialize}; 4 | use std::{ 5 | collections::HashSet, 6 | fs::{self, File}, 7 | io::{Read, Write}, 8 | }; 9 | 10 | #[cfg(not(windows))] 11 | static EXAMPLE_CONFIG: &str = include_str!("templates/cardamon.unix.toml"); 12 | #[cfg(windows)] 13 | static EXAMPLE_CONFIG: &str = include_str!("templates/cardamon.win.toml"); 14 | 15 | #[cfg(not(windows))] 16 | static LINE_ENDING: &str = "\n"; 17 | #[cfg(windows)] 18 | static LINE_ENDING: &str = "\r\n"; 19 | 20 | // ******** ******** ******** 21 | // ** CONFIGURATION ** 22 | // ******** ******** ******** 23 | #[derive(Debug, Deserialize, Serialize)] 24 | pub struct Config { 25 | pub cpu: Cpu, 26 | #[serde(rename(serialize = "process", deserialize = "process"))] 27 | pub processes: Vec, 28 | #[serde(rename(serialize = "scenario", deserialize = "scenario"))] 29 | pub scenarios: Vec, 30 | #[serde(rename(serialize = "observation", deserialize = "observation"))] 31 | pub observations: Vec, 32 | } 33 | impl Config { 34 | pub fn write_example_to_file( 35 | cpu_name: &str, 36 | cpu_power: Power, 37 | path: &std::path::Path, 38 | ) -> anyhow::Result { 39 | // remove the line containing tdp 40 | let mut lines = EXAMPLE_CONFIG.lines().map(|s| s.to_string()).collect_vec(); 41 | 42 | // add a line at the top of the file containing the new tdp 43 | let mut new_conf_lines = match cpu_power { 44 | Power::Tdp(tdp) => vec![ 45 | "[cpu]".to_string(), 46 | format!("name = \"{}\"", cpu_name), 47 | format!("tdp = {}", tdp), 48 | "".to_string(), 49 | ], 50 | 51 | Power::Curve(a, b, c, d) => vec![ 52 | "[cpu]".to_string(), 53 | format!("name = \"{}\"", cpu_name), 54 | format!("curve = [{},{},{},{}]", a, b, c, d), 55 | "".to_string(), 56 | ], 57 | }; 58 | 59 | new_conf_lines.append(&mut lines); 60 | let conf_str = new_conf_lines.join(LINE_ENDING); 61 | 62 | // write to file 63 | let mut file = File::create_new(path)?; 64 | File::write_all(&mut file, conf_str.as_bytes())?; 65 | Ok(file) 66 | } 67 | 68 | pub fn try_from_path(path: &std::path::Path) -> anyhow::Result { 69 | let mut config_str = String::new(); 70 | fs::File::open(path)?.read_to_string(&mut config_str)?; 71 | Config::try_from_str(&config_str) 72 | } 73 | 74 | pub fn try_from_str(conf_str: &str) -> anyhow::Result { 75 | toml::from_str::(conf_str).map_err(|e| anyhow::anyhow!("TOML parsing error: {}", e)) 76 | } 77 | 78 | fn find_observation(&self, obs_name: &str) -> Option<&Observation> { 79 | self.observations.iter().find(|obs| match obs { 80 | Observation::LiveMonitor { name, processes: _ } => name == obs_name, 81 | Observation::ScenarioRunner { name, scenarios: _ } => name == obs_name, 82 | }) 83 | } 84 | 85 | pub fn find_scenario(&self, scenario_name: &str) -> anyhow::Result<&Scenario> { 86 | self.scenarios 87 | .iter() 88 | .find(|scenario| scenario.name == scenario_name) 89 | .context(format!( 90 | "Unable to find scenario with name {}", 91 | scenario_name 92 | )) 93 | } 94 | 95 | pub fn find_scenarios(&self, scenario_names: &[&String]) -> anyhow::Result> { 96 | let mut scenarios = vec![]; 97 | for scenario_name in scenario_names { 98 | let scenario = self.find_scenario(scenario_name)?; 99 | scenarios.push(scenario); 100 | } 101 | Ok(scenarios) 102 | } 103 | 104 | /// Finds a process in the config with the given name. 105 | /// 106 | /// # Arguments 107 | /// * proc_name - the name of the process to find 108 | /// 109 | /// # Returns 110 | /// Some process if it can be found, None otherwise 111 | fn find_process(&self, proc_name: &str) -> anyhow::Result<&Process> { 112 | self.processes 113 | .iter() 114 | .find(|proc| proc.name == proc_name) 115 | .context(format!("Unable to find process with name {}", proc_name)) 116 | } 117 | 118 | fn find_processes(&self, proc_names: &[&String]) -> anyhow::Result> { 119 | let mut processes = vec![]; 120 | for proc_name in proc_names { 121 | let proc = self.find_process(proc_name)?; 122 | processes.push(proc); 123 | } 124 | Ok(processes) 125 | } 126 | 127 | pub fn create_execution_plan( 128 | &self, 129 | cpu: Cpu, 130 | obs_name: &str, 131 | external_only: bool, 132 | ) -> anyhow::Result { 133 | let obs = self.find_observation(obs_name).context(format!( 134 | "Couldn't find an observation with name {}", 135 | obs_name 136 | ))?; 137 | 138 | let mut processes_to_execute = vec![]; 139 | 140 | let exec_plan = match &obs { 141 | Observation::ScenarioRunner { name: _, scenarios } => { 142 | let scenario_names = scenarios.iter().collect_vec(); 143 | let scenarios = self.find_scenarios(&scenario_names)?; 144 | 145 | // find the intersection of processes between all the scenarios 146 | if !external_only { 147 | let mut proc_set: HashSet = HashSet::new(); 148 | for scenario_name in scenario_names { 149 | let scenario = self.find_scenario(scenario_name).context(format!( 150 | "Unable to find scenario with name {}", 151 | scenario_name 152 | ))?; 153 | for proc_name in &scenario.processes { 154 | proc_set.insert(proc_name.clone()); 155 | } 156 | } 157 | 158 | let proc_names = proc_set.iter().collect_vec(); 159 | processes_to_execute = self.find_processes(&proc_names)?; 160 | } 161 | ExecutionPlan::new( 162 | cpu, 163 | processes_to_execute, 164 | ExecutionMode::Observation(scenarios), 165 | ) 166 | } 167 | 168 | Observation::LiveMonitor { name: _, processes } => { 169 | if !external_only { 170 | let proc_names = processes.iter().collect_vec(); 171 | processes_to_execute = self.find_processes(&proc_names)?; 172 | } 173 | ExecutionPlan::new(cpu, processes_to_execute, ExecutionMode::Live) 174 | } 175 | }; 176 | 177 | Ok(exec_plan) 178 | } 179 | } 180 | 181 | #[derive(Debug, Deserialize, PartialEq, Serialize, Clone)] 182 | #[serde(rename_all = "lowercase")] 183 | pub enum Power { 184 | Curve(f64, f64, f64, f64), 185 | Tdp(f64), 186 | } 187 | 188 | #[derive(Debug, Deserialize, PartialEq, Serialize, Clone)] 189 | pub struct Cpu { 190 | pub name: String, 191 | #[serde(flatten)] 192 | pub power: Power, 193 | } 194 | 195 | #[derive(Debug, Deserialize, PartialEq, Clone, Copy, Serialize)] 196 | #[serde(tag = "to", rename_all = "lowercase")] 197 | pub enum Redirect { 198 | Null, 199 | Parent, 200 | File, 201 | } 202 | 203 | #[derive(Debug, Deserialize, PartialEq, Serialize)] 204 | #[serde(tag = "type", rename_all = "lowercase")] 205 | pub enum ProcessType { 206 | BareMetal, 207 | Docker { containers: Vec }, 208 | } 209 | 210 | #[derive(Debug, Deserialize, PartialEq, Serialize)] 211 | pub struct Process { 212 | pub name: String, 213 | pub up: String, 214 | pub down: Option, 215 | pub redirect: Option, 216 | #[serde(rename = "process")] 217 | pub process_type: ProcessType, 218 | } 219 | 220 | #[derive(Debug, Deserialize, PartialEq, Serialize)] 221 | pub struct Scenario { 222 | pub name: String, 223 | pub desc: String, 224 | pub command: String, 225 | pub iterations: i32, 226 | pub processes: Vec, 227 | } 228 | 229 | #[derive(Debug, Deserialize, PartialEq, Serialize)] 230 | #[serde(untagged)] 231 | pub enum Observation { 232 | LiveMonitor { 233 | name: String, 234 | processes: Vec, 235 | }, 236 | ScenarioRunner { 237 | name: String, 238 | scenarios: Vec, 239 | }, 240 | } 241 | 242 | // #[derive(Debug, Deserialize, Serialize)] 243 | // pub struct Observation { 244 | // pub name: String, 245 | // #[serde(rename = "observe")] 246 | // pub observation_mode: ObservationMode, 247 | // } 248 | 249 | // ******** ******** ******** 250 | // ** EXECUTION PLAN ** 251 | // ******** ******** ******** 252 | #[derive(Debug, Clone)] 253 | pub enum ProcessToObserve { 254 | ExternalPid(u32), 255 | ExternalContainers(Vec), 256 | 257 | /// ManagedPid represents a baremetal processes started by Cardamon 258 | ManagedPid { 259 | process_name: String, 260 | pid: u32, 261 | down: Option, 262 | }, 263 | 264 | /// ManagedContainers represents a docker processes started by Cardamon 265 | ManagedContainers { 266 | process_name: String, 267 | container_names: Vec, 268 | down: Option, 269 | }, 270 | } 271 | 272 | #[derive(Debug)] 273 | pub enum ExecutionMode<'a> { 274 | Live, 275 | Observation(Vec<&'a Scenario>), 276 | Trigger, 277 | } 278 | 279 | #[derive(Debug)] 280 | pub struct ExecutionPlan<'a> { 281 | pub cpu: Cpu, 282 | pub external_processes_to_observe: Option>, 283 | pub processes_to_execute: Vec<&'a Process>, 284 | pub execution_mode: ExecutionMode<'a>, 285 | } 286 | impl<'a> ExecutionPlan<'a> { 287 | pub fn new( 288 | cpu: Cpu, 289 | processes_to_execute: Vec<&'a Process>, 290 | execution_mode: ExecutionMode<'a>, 291 | ) -> Self { 292 | ExecutionPlan { 293 | cpu, 294 | external_processes_to_observe: None, 295 | processes_to_execute, 296 | execution_mode, 297 | } 298 | } 299 | 300 | /// Adds a process that has not been started by Cardamon to this execution plan for observation. 301 | /// 302 | /// # Arguments 303 | /// * process_to_observe - A process which has been started externally to Cardamon. 304 | pub fn observe_external_process(&mut self, process_to_observe: ProcessToObserve) { 305 | match &mut self.external_processes_to_observe { 306 | None => self.external_processes_to_observe = Some(vec![process_to_observe]), 307 | Some(vec) => vec.push(process_to_observe), 308 | }; 309 | } 310 | } 311 | 312 | #[cfg(test)] 313 | mod tests { 314 | use itertools::Itertools; 315 | 316 | use super::*; 317 | use std::path::Path; 318 | 319 | #[test] 320 | fn can_load_config_file() -> anyhow::Result<()> { 321 | Config::try_from_path(Path::new("./fixtures/cardamon.success.toml"))?; 322 | Ok(()) 323 | } 324 | 325 | #[test] 326 | fn can_find_observation_by_name() -> anyhow::Result<()> { 327 | let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.success.toml"))?; 328 | let observation = cfg.find_observation("checkout"); 329 | assert!(observation.is_some()); 330 | 331 | let observation = cfg.find_observation("nope"); 332 | assert!(observation.is_none()); 333 | 334 | Ok(()) 335 | } 336 | 337 | #[test] 338 | fn can_find_scenario_by_name() -> anyhow::Result<()> { 339 | let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.multiple_scenarios.toml"))?; 340 | let scenario = cfg.find_scenario("user_signup"); 341 | assert!(scenario.is_ok()); 342 | 343 | let scenario = cfg.find_scenario("nope"); 344 | assert!(scenario.is_err()); 345 | 346 | Ok(()) 347 | } 348 | 349 | #[test] 350 | fn can_find_process_by_name() -> anyhow::Result<()> { 351 | let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.success.toml"))?; 352 | let process = cfg.find_process("server"); 353 | assert!(process.is_ok()); 354 | 355 | let process = cfg.find_process("nope"); 356 | assert!(process.is_err()); 357 | 358 | Ok(()) 359 | } 360 | 361 | // #[test] 362 | // fn collecting_processes_works() -> anyhow::Result<()> { 363 | // let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.multiple_scenarios.toml"))?; 364 | // 365 | // let obs_name = "test_app"; 366 | // let obs = cfg.find_observation(obs_name).context("")?; 367 | // 368 | // let process_names = cfg 369 | // .collect_processes(obs.)? 370 | // .into_iter() 371 | // .map(|proc_to_exec| match proc_to_exec.process.process_type { 372 | // ProcessType::BareMetal => proc_to_exec.process.name.as_str(), 373 | // ProcessType::Docker { containers: _ } => proc_to_exec.process.name.as_str(), 374 | // }) 375 | // .sorted() 376 | // .collect::>(); 377 | // 378 | // assert_eq!(process_names, ["db", "mailgun", "server"]); 379 | // 380 | // Ok(()) 381 | // } 382 | 383 | // #[test] 384 | // fn multiple_iterations_should_create_more_scenarios_to_execute() -> anyhow::Result<()> { 385 | // let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.multiple_iterations.toml"))?; 386 | // let scenario = cfg 387 | // .find_scenario("basket_10") 388 | // .expect("scenario 'basket_10' should exist!"); 389 | // let scenarios_to_execute = vec![ScenarioToExecute::new(scenario)]; 390 | // assert_eq!(scenarios_to_execute.len(), 2); 391 | // Ok(()) 392 | // } 393 | 394 | #[test] 395 | fn can_create_exec_plan_for_observation() -> anyhow::Result<()> { 396 | let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.multiple_scenarios.toml"))?; 397 | 398 | let cpu = Cpu { 399 | name: "AMD Ryzen 7 6850U".to_string(), 400 | power: Power::Tdp(11.2), 401 | }; 402 | 403 | let exec_plan = cfg.create_execution_plan(cpu, "checkout", false)?; 404 | match exec_plan.execution_mode { 405 | ExecutionMode::Observation(scenarios) => { 406 | let scenario_names = scenarios 407 | .iter() 408 | .map(|s| s.name.as_str()) 409 | .sorted() 410 | .collect_vec(); 411 | 412 | let process_names: Vec<&str> = exec_plan 413 | .processes_to_execute 414 | .into_iter() 415 | .map(|proc| match proc.process_type { 416 | ProcessType::Docker { containers: _ } => proc.name.as_str(), 417 | ProcessType::BareMetal => proc.name.as_str(), 418 | }) 419 | .sorted() 420 | .collect(); 421 | 422 | assert_eq!(scenario_names, ["basket_10", "user_signup"]); 423 | assert_eq!(process_names, ["db", "mailgun", "server"]); 424 | } 425 | 426 | _ => panic!("oops! was expecting a ObservationMode::Scenarios"), 427 | } 428 | 429 | Ok(()) 430 | } 431 | 432 | #[test] 433 | fn can_create_exec_plan_for_monitor() -> anyhow::Result<()> { 434 | let cfg = Config::try_from_path(Path::new("./fixtures/cardamon.multiple_scenarios.toml"))?; 435 | 436 | let cpu = Cpu { 437 | name: "AMD Ryzen 7 6850U".to_string(), 438 | power: Power::Tdp(11.2), 439 | }; 440 | 441 | let exec_plan = cfg.create_execution_plan(cpu, "live_monitor", false)?; 442 | match exec_plan.execution_mode { 443 | ExecutionMode::Live => { 444 | let process_names: Vec<&str> = exec_plan 445 | .processes_to_execute 446 | .into_iter() 447 | .map(|proc| proc.name.as_str()) 448 | .sorted() 449 | .collect(); 450 | 451 | assert_eq!(process_names, ["db", "mailgun", "server"]); 452 | } 453 | 454 | _ => panic!("oops! was expecting a ObservationMode::Monitor"), 455 | } 456 | Ok(()) 457 | } 458 | } 459 | -------------------------------------------------------------------------------- /src/dao.rs: -------------------------------------------------------------------------------- 1 | pub mod cpu; 2 | pub mod iteration; 3 | pub mod metrics; 4 | pub mod pagination; 5 | pub mod run; 6 | pub mod scenario; 7 | -------------------------------------------------------------------------------- /src/dao/cpu.rs: -------------------------------------------------------------------------------- 1 | use crate::entities::cpu; 2 | use anyhow::{self, Context}; 3 | use sea_orm::*; 4 | 5 | pub async fn fetch_by_name(name: &str, db: &DatabaseConnection) -> anyhow::Result { 6 | cpu::Entity::find() 7 | .filter(cpu::Column::Name.eq(name)) 8 | .one(db) 9 | .await? 10 | .context(format!("Error fetching CPU with name {}", name)) 11 | } 12 | -------------------------------------------------------------------------------- /src/dao/iteration.rs: -------------------------------------------------------------------------------- 1 | use super::pagination::Page; 2 | use crate::{ 3 | dao::pagination::Pages, 4 | entities::iteration::{self, Entity as Iteration}, 5 | }; 6 | use anyhow::{self, Context}; 7 | use sea_orm::*; 8 | use sea_query::{Alias, Query}; 9 | use tracing::trace; 10 | 11 | #[derive(DerivePartialModel, FromQueryResult)] 12 | #[sea_orm(entity = "Iteration")] 13 | pub struct RunId { 14 | pub run_id: i32, 15 | } 16 | 17 | // VERIFIED (NoPage) 18 | pub async fn fetch_runs_all( 19 | scenarios: &Vec, 20 | page: Option, 21 | db: &DatabaseConnection, 22 | ) -> anyhow::Result<(Vec, Pages)> { 23 | if scenarios.is_empty() { 24 | return Err(anyhow::anyhow!("Cannot get runs for no scenarios!")); 25 | } 26 | trace!("page = {:?}", page); 27 | 28 | match page { 29 | Some(Page { size, num }) => { 30 | if scenarios.len() > 1 { 31 | return Err(anyhow::anyhow!( 32 | "Unable to paginate over runs if multiple scenarios are selected!" 33 | )); 34 | } 35 | 36 | // get count without pagination 37 | let count_query = iteration::Entity::find() 38 | .select_only() 39 | .select_column(iteration::Column::RunId) 40 | .distinct() 41 | .order_by(iteration::Column::StartTime, Order::Desc); 42 | let count = count_query.count(db).await?; 43 | let page_count = (count as f64 / size as f64).ceil() as u64; 44 | trace!("count = {}", count); 45 | 46 | // get data 47 | let sub_query = Query::select() 48 | .column(iteration::Column::RunId) 49 | .distinct() 50 | .from(iteration::Entity) 51 | .order_by(iteration::Column::StartTime, Order::Desc) 52 | .limit(size) 53 | .offset(size * num) 54 | .to_owned(); 55 | let query = iteration::Entity::find() 56 | .filter(iteration::Column::ScenarioName.is_in(scenarios)) 57 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 58 | .order_by_desc(iteration::Column::StartTime); 59 | 60 | // println!("\n [QUERY] {:?}", query.build(DatabaseBackend::Sqlite).sql); 61 | 62 | let res = query.all(db).await?; 63 | 64 | Ok((res, Pages::Required(page_count))) 65 | } 66 | 67 | None => { 68 | let sub_query = Query::select() 69 | .column(iteration::Column::RunId) 70 | .distinct() 71 | .from(iteration::Entity) 72 | .order_by(iteration::Column::StartTime, Order::Desc) 73 | .to_owned(); 74 | let query = iteration::Entity::find() 75 | .filter(iteration::Column::ScenarioName.is_in(scenarios)) 76 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 77 | .order_by_desc(iteration::Column::StartTime); 78 | 79 | // println!("\n [QUERY] {:?}", query.build(DatabaseBackend::Sqlite).sql); 80 | 81 | let res = query.all(db).await?; 82 | Ok((res, Pages::NotRequired)) 83 | } 84 | } 85 | } 86 | 87 | // VERIFIED (NoPage) 88 | /// Return all iterations for the given scenario in the given date range. Page the results. 89 | pub async fn fetch_runs_in_range( 90 | scenarios: &Vec, 91 | from: i64, 92 | to: i64, 93 | page: Option, 94 | db: &DatabaseConnection, 95 | ) -> anyhow::Result<(Vec, Pages)> { 96 | if scenarios.is_empty() { 97 | return Err(anyhow::anyhow!("Cannot get runs for no scenarios!")); 98 | } 99 | trace!("page = {:?}", page); 100 | 101 | match page { 102 | Some(Page { size, num }) => { 103 | if scenarios.len() > 1 { 104 | return Err(anyhow::anyhow!( 105 | "Unable to paginate over runs if multiple scenarios are selected!" 106 | )); 107 | } 108 | 109 | // get count 110 | let count_query = iteration::Entity::find() 111 | .select_only() 112 | .select_column(iteration::Column::RunId) 113 | .distinct() 114 | .filter(iteration::Column::StopTime.gt(from)) 115 | .filter(iteration::Column::StartTime.lte(to)) 116 | .order_by(iteration::Column::StartTime, Order::Desc); 117 | let count = count_query.count(db).await?; 118 | let page_count = (count as f64 / size as f64).ceil() as u64; 119 | 120 | // get data 121 | let sub_query = Query::select() 122 | .column(iteration::Column::RunId) 123 | .distinct() 124 | .from(iteration::Entity) 125 | .cond_where(iteration::Column::StopTime.gte(from)) 126 | .and_where(iteration::Column::StartTime.lte(to)) 127 | .order_by(iteration::Column::StartTime, Order::Desc) 128 | .limit(size) 129 | .offset(size * num) 130 | .to_owned(); 131 | let query = iteration::Entity::find() 132 | .filter(iteration::Column::ScenarioName.is_in(scenarios)) 133 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 134 | .order_by_desc(iteration::Column::StartTime); 135 | 136 | let res = query.all(db).await?; 137 | 138 | Ok((res, Pages::Required(page_count))) 139 | } 140 | 141 | None => { 142 | let sub_query = Query::select() 143 | .column(iteration::Column::RunId) 144 | .distinct() 145 | .from(iteration::Entity) 146 | .cond_where(iteration::Column::StopTime.gte(from)) 147 | .and_where(iteration::Column::StartTime.lte(to)) 148 | .order_by(iteration::Column::StartTime, Order::Desc) 149 | .to_owned(); 150 | let query = iteration::Entity::find() 151 | .filter(iteration::Column::ScenarioName.is_in(scenarios)) 152 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 153 | .order_by_desc(iteration::Column::StartTime); 154 | 155 | println!("\n [QUERY] {}", query.build(DatabaseBackend::Sqlite).sql); 156 | 157 | let res = query.all(db).await?; 158 | Ok((res, Pages::NotRequired)) 159 | } 160 | } 161 | } 162 | 163 | // VERIFIED (NoPage) 164 | pub async fn fetch_runs_last_n( 165 | scenarios: &Vec, 166 | last_n: u64, 167 | page: Option, 168 | db: &DatabaseConnection, 169 | ) -> anyhow::Result<(Vec, Pages)> { 170 | if scenarios.is_empty() { 171 | return Err(anyhow::anyhow!("Cannot get runs for no scenarios!")); 172 | } 173 | 174 | match page { 175 | Some(Page { size, num }) => { 176 | if scenarios.len() > 1 { 177 | return Err(anyhow::anyhow!( 178 | "Unable to paginate over runs if multiple scenarios are selected!" 179 | )); 180 | } 181 | 182 | // get count 183 | let count_query = iteration::Entity::find() 184 | .select_only() 185 | .select_column(iteration::Column::RunId) 186 | .distinct() 187 | .order_by(iteration::Column::StartTime, Order::Desc) 188 | .limit(last_n); 189 | let count = count_query.count(db).await?; 190 | let page_count = (count as f64 / size as f64).ceil() as u64; 191 | 192 | // get data 193 | let sub_sub_query = Query::select() 194 | .column(iteration::Column::RunId) 195 | .distinct() 196 | .from(iteration::Entity) 197 | .order_by(iteration::Column::StartTime, Order::Desc) 198 | .limit(last_n) 199 | .to_owned(); 200 | let sub_query = Query::select() 201 | .from_subquery(sub_sub_query, Alias::new("A")) 202 | .limit(size) 203 | .offset(size * num) 204 | .to_owned(); 205 | let query = iteration::Entity::find() 206 | .filter(iteration::Column::ScenarioName.is_in(scenarios)) 207 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 208 | .order_by_desc(iteration::Column::StartTime); 209 | 210 | let res = query.all(db).await?; 211 | 212 | Ok((res, Pages::Required(page_count))) 213 | 214 | // // SELECT * 215 | // // FROM iteration 216 | // // WHERE scenario_name IN ?1 AND run_id IN ( 217 | // // SELECT run_id 218 | // // FROM iteration 219 | // // WHERE scenario_name IN ?1 220 | // // GROUP BY run_id 221 | // // ORDER BY start_time DESC 222 | // // LIMIT ?2 223 | // // ) 224 | // let scenario = scenarios.first().unwrap(); 225 | // 226 | // let sub_query = Query::select() 227 | // .expr(Expr::col(iteration::Column::RunId)) 228 | // .from(iteration::Entity) 229 | // .cond_where(iteration::Column::ScenarioName.eq(scenario)) 230 | // .group_by_col(iteration::Column::RunId) 231 | // .order_by(iteration::Column::StartTime, Order::Desc) 232 | // .limit(last_n) 233 | // .to_owned(); 234 | // 235 | // let query = iteration::Entity::find().filter( 236 | // Condition::all() 237 | // .add(iteration::Column::ScenarioName.eq(scenario)) 238 | // .add(iteration::Column::RunId.in_subquery(sub_query)), 239 | // ); 240 | // 241 | // let count = query.clone().count(db).await?; 242 | // 243 | // let res = query.paginate(db, page.size).fetch_page(page.num).await?; 244 | // 245 | // Ok((res, Pages::Required(page_count))) 246 | } 247 | 248 | None => { 249 | let mut res = vec![]; 250 | for scenario in scenarios { 251 | let sub_query = Query::select() 252 | .column(iteration::Column::RunId) 253 | .distinct() 254 | .from(iteration::Entity) 255 | .cond_where(iteration::Column::ScenarioName.eq(scenario)) 256 | .order_by(iteration::Column::StartTime, Order::Desc) 257 | .limit(last_n) 258 | .to_owned(); 259 | let query = iteration::Entity::find() 260 | .filter(iteration::Column::ScenarioName.eq(scenario)) 261 | .filter(iteration::Column::RunId.in_subquery(sub_query)) 262 | .order_by_desc(iteration::Column::StartTime); 263 | 264 | let mut iterations = query.all(db).await?; 265 | res.append(&mut iterations); 266 | } 267 | 268 | Ok((res, Pages::NotRequired)) 269 | } 270 | } 271 | } 272 | 273 | pub async fn fetch_live(run_id: i32, db: &DatabaseConnection) -> anyhow::Result { 274 | iteration::Entity::find() 275 | .filter(iteration::Column::RunId.eq(run_id)) 276 | .one(db) 277 | .await? 278 | .context(format!("Unable to find live iteration for run {}", run_id)) 279 | } 280 | 281 | #[cfg(test)] 282 | mod tests { 283 | use crate::{dao, db_connect, db_migrate, tests::setup_fixtures}; 284 | 285 | #[tokio::test] 286 | async fn fetch_iterations_of_last_n_runs_for_schema() -> anyhow::Result<()> { 287 | let db = db_connect("sqlite::memory:", None).await?; 288 | db_migrate(&db).await?; 289 | setup_fixtures( 290 | &[ 291 | "./fixtures/power_curves.sql", 292 | "./fixtures/cpus.sql", 293 | "./fixtures/runs.sql", 294 | "./fixtures/iterations.sql", 295 | ], 296 | &db, 297 | ) 298 | .await?; 299 | 300 | // fetch the latest scenario_1 run 301 | let (scenario_iterations, _) = 302 | dao::iteration::fetch_runs_last_n(&vec!["scenario_1".to_string()], 1, None, &db) 303 | .await?; 304 | 305 | let run_ids = scenario_iterations 306 | .iter() 307 | .map(|run| run.run_id) 308 | .collect::>(); 309 | assert_eq!(run_ids, vec![1]); 310 | 311 | let iterations = scenario_iterations 312 | .iter() 313 | .map(|run| run.count) 314 | .collect::>(); 315 | assert_eq!(iterations, vec![1]); 316 | 317 | // fetch the last 2 scenario_3 runs 318 | let (scenario_iterations, _) = 319 | dao::iteration::fetch_runs_last_n(&vec!["scenario_3".to_string()], 2, None, &db) 320 | .await?; 321 | 322 | let run_ids = scenario_iterations 323 | .iter() 324 | .map(|run| run.run_id) 325 | .collect::>(); 326 | assert_eq!(run_ids, vec![3, 3, 3, 2, 2, 2]); 327 | 328 | let iterations = scenario_iterations 329 | .iter() 330 | .map(|run| run.count) 331 | .collect::>(); 332 | assert_eq!(iterations, vec![3, 2, 1, 3, 2, 1]); 333 | 334 | Ok(()) 335 | } 336 | } 337 | -------------------------------------------------------------------------------- /src/dao/metrics.rs: -------------------------------------------------------------------------------- 1 | use crate::entities::metrics; 2 | use anyhow::{self, Context}; 3 | use sea_orm::*; 4 | 5 | pub async fn fetch_within( 6 | run: i32, 7 | from: i64, 8 | to: i64, 9 | db: &DatabaseConnection, 10 | ) -> anyhow::Result> { 11 | let query = metrics::Entity::find().filter( 12 | Condition::all() 13 | .add(metrics::Column::RunId.eq(run)) 14 | .add(metrics::Column::TimeStamp.gte(from)) 15 | .add(metrics::Column::TimeStamp.lte(to)), 16 | ); 17 | 18 | query.all(db).await.context(format!( 19 | "Error fetching metrics gathered between: {} and {}", 20 | from, to 21 | )) 22 | } 23 | 24 | #[cfg(test)] 25 | mod tests { 26 | use crate::{dao, db_connect, db_migrate, tests::setup_fixtures}; 27 | use itertools::Itertools; 28 | 29 | #[tokio::test] 30 | async fn fetch_metrics_within() -> anyhow::Result<()> { 31 | let db = db_connect("sqlite::memory:", None).await?; 32 | db_migrate(&db).await?; 33 | setup_fixtures( 34 | &[ 35 | "./fixtures/power_curves.sql", 36 | "./fixtures/cpus.sql", 37 | "./fixtures/runs.sql", 38 | "./fixtures/metrics.sql", 39 | ], 40 | &db, 41 | ) 42 | .await?; 43 | 44 | let metrics = dao::metrics::fetch_within(1, 1717507600000, 1717507600200, &db).await?; 45 | 46 | assert_eq!(metrics.len(), 4); 47 | 48 | let process_names: Vec<&str> = metrics 49 | .iter() 50 | .map(|metric| metric.process_name.as_str()) 51 | .unique() 52 | .collect(); 53 | 54 | assert_eq!(process_names, vec!["yarn", "docker"]); 55 | 56 | Ok(()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/dao/pagination.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | pub struct Page { 3 | pub size: u64, 4 | pub num: u64, 5 | } 6 | impl Page { 7 | pub fn new(size: u64, num: u64) -> Self { 8 | Self { size, num } 9 | } 10 | 11 | pub fn offset(&self) -> u64 { 12 | self.size * self.num 13 | } 14 | } 15 | 16 | #[derive(Debug)] 17 | pub enum Pages { 18 | NotRequired, 19 | Required(u64), 20 | } 21 | -------------------------------------------------------------------------------- /src/dao/run.rs: -------------------------------------------------------------------------------- 1 | use crate::entities::run; 2 | use anyhow::{self, Context}; 3 | use sea_orm::*; 4 | 5 | pub async fn fetch(id: i32, db: &DatabaseConnection) -> anyhow::Result { 6 | run::Entity::find() 7 | .filter(run::Column::Id.eq(id)) 8 | .one(db) 9 | .await? 10 | .context(format!("Error fetching run with id {}", id)) 11 | } 12 | -------------------------------------------------------------------------------- /src/dao/scenario.rs: -------------------------------------------------------------------------------- 1 | use super::pagination::{Page, Pages}; 2 | use crate::entities::iteration::{self, Entity as Iteration}; 3 | use anyhow::{self, Context}; 4 | use sea_orm::*; 5 | use tracing::trace; 6 | 7 | #[derive(DerivePartialModel, FromQueryResult, Debug)] 8 | #[sea_orm(entity = "Iteration")] 9 | pub struct ScenarioName { 10 | pub scenario_name: String, 11 | } 12 | 13 | pub async fn fetch(name: &String, db: &DatabaseConnection) -> anyhow::Result> { 14 | iteration::Entity::find() 15 | .select_only() 16 | .select_column(iteration::Column::ScenarioName) 17 | .distinct() 18 | .filter(iteration::Column::ScenarioName.eq(name)) 19 | .into_partial_model::() 20 | .one(db) 21 | .await 22 | .map_err(anyhow::Error::from) 23 | } 24 | 25 | pub async fn fetch_all( 26 | page: &Option, 27 | db: &DatabaseConnection, 28 | ) -> anyhow::Result<(Vec, Pages)> { 29 | trace!("page = {:?}", page); 30 | let query = iteration::Entity::find() 31 | .select_only() 32 | .select_column(iteration::Column::ScenarioName) 33 | .distinct() 34 | .order_by_desc(iteration::Column::StartTime); 35 | 36 | match page { 37 | Some(page) => { 38 | let count = query.clone().count(db).await?; 39 | let page_count = (count as f64 / page.size as f64).ceil() as u64; 40 | 41 | let res = query 42 | .into_partial_model() 43 | .paginate(db, page.size) 44 | .fetch_page(page.num) 45 | .await?; 46 | 47 | Ok((res, Pages::Required(page_count))) 48 | } 49 | 50 | None => { 51 | let res = query.into_partial_model().all(db).await?; 52 | Ok((res, Pages::NotRequired)) 53 | } 54 | } 55 | } 56 | 57 | pub async fn fetch_in_run( 58 | run: &str, 59 | page: &Option, 60 | db: &DatabaseConnection, 61 | ) -> anyhow::Result<(Vec, Pages)> { 62 | trace!("page = {:?}", page); 63 | let query = iteration::Entity::find() 64 | .select_only() 65 | .select_column(iteration::Column::ScenarioName) 66 | .distinct() 67 | .filter(iteration::Column::RunId.eq(run)) 68 | .order_by_desc(iteration::Column::StartTime); 69 | 70 | match page { 71 | Some(page) => { 72 | let count = query.clone().count(db).await.context(format!( 73 | "Error counting all scenarios executed in run {}", 74 | run 75 | ))?; 76 | let page_count = (count as f64 / page.size as f64).ceil() as u64; 77 | 78 | let res = query 79 | .into_partial_model() 80 | .paginate(db, page.size) 81 | .fetch_page(page.num) 82 | .await?; 83 | 84 | Ok((res, Pages::Required(page_count))) 85 | } 86 | 87 | None => { 88 | let res = query.into_partial_model().all(db).await?; 89 | Ok((res, Pages::NotRequired)) 90 | } 91 | } 92 | } 93 | 94 | pub async fn fetch_in_range( 95 | from: i64, 96 | to: i64, 97 | page: &Option, 98 | db: &DatabaseConnection, 99 | ) -> anyhow::Result<(Vec, Pages)> { 100 | trace!("page = {:?}", page); 101 | let query = iteration::Entity::find() 102 | .select_only() 103 | .select_column(iteration::Column::ScenarioName) 104 | .distinct() 105 | .filter( 106 | Condition::all() 107 | .add(iteration::Column::StopTime.gt(from)) 108 | .add(iteration::Column::StartTime.lt(to)), 109 | ) 110 | .order_by_desc(iteration::Column::StartTime); 111 | 112 | match page { 113 | Some(page) => { 114 | let count = query.clone().count(db).await.context(format!( 115 | "Error counting scenarios in run between: from {}, to {}", 116 | from, to 117 | ))?; 118 | let page_count = (count as f64 / page.size as f64).ceil() as u64; 119 | 120 | let res = query 121 | .into_partial_model() 122 | .paginate(db, page.size) 123 | .fetch_page(page.num) 124 | .await?; 125 | 126 | Ok((res, Pages::Required(page_count))) 127 | } 128 | 129 | None => { 130 | let res = query.into_partial_model().all(db).await?; 131 | Ok((res, Pages::NotRequired)) 132 | } 133 | } 134 | } 135 | 136 | // VERIFIED 137 | pub async fn fetch_by_query( 138 | name: &str, 139 | page: &Option, 140 | db: &DatabaseConnection, 141 | ) -> anyhow::Result<(Vec, Pages)> { 142 | trace!("page = {:?}", page); 143 | let query = iteration::Entity::find() 144 | .select_only() 145 | .select_column(iteration::Column::ScenarioName) 146 | .distinct() 147 | .filter(iteration::Column::ScenarioName.like(format!("%{}%", name))) 148 | .order_by_desc(iteration::Column::StartTime); 149 | 150 | println!("\n [QUERY] {:?}", query.build(DatabaseBackend::Sqlite).sql); 151 | 152 | match page { 153 | Some(page) => { 154 | let count = query.clone().count(db).await?; 155 | let page_count = (count as f64 / page.size as f64).ceil() as u64; 156 | 157 | let res = query 158 | .into_partial_model() 159 | .paginate(db, page.size) 160 | .fetch_page(page.num) 161 | .await?; 162 | 163 | Ok((res, Pages::Required(page_count))) 164 | } 165 | 166 | None => { 167 | let res = query.into_partial_model().all(db).await?; 168 | Ok((res, Pages::NotRequired)) 169 | } 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/data.rs: -------------------------------------------------------------------------------- 1 | pub mod dataset; 2 | pub mod dataset_builder; 3 | 4 | use serde::Serialize; 5 | 6 | #[derive(Debug, Serialize, Clone)] 7 | pub struct Data { 8 | pub pow: f64, 9 | pub co2: f64, 10 | } 11 | impl Default for Data { 12 | fn default() -> Self { 13 | Data { 14 | pow: 0_f64, 15 | co2: 0_f64, 16 | } 17 | } 18 | } 19 | impl std::ops::Add<&Data> for Data { 20 | type Output = Data; 21 | 22 | fn add(self, rhs: &Data) -> Data { 23 | Data { 24 | pow: self.pow + rhs.pow, 25 | co2: self.co2 + rhs.co2, 26 | } 27 | } 28 | } 29 | impl std::ops::Add for Data { 30 | type Output = Data; 31 | 32 | fn add(self, rhs: Data) -> Data { 33 | Data { 34 | pow: self.pow + rhs.pow, 35 | co2: self.co2 + rhs.co2, 36 | } 37 | } 38 | } 39 | impl Data { 40 | pub fn sum(data: &[&Data]) -> Self { 41 | data.iter().fold(Data::default(), |acc, item| acc + *item) 42 | } 43 | 44 | pub fn mean(data: &[&Data]) -> Self { 45 | let len = data.len() as f64; 46 | let mut data = data.iter().fold(Data::default(), |acc, item| acc + *item); 47 | 48 | data.pow /= len; 49 | data.co2 /= len; 50 | 51 | data 52 | } 53 | } 54 | 55 | #[derive(Debug, Serialize, Clone)] 56 | pub struct ProcessMetrics { 57 | pub proc_id: String, 58 | pub timestamp: i64, 59 | pub cpu_usage: f64, 60 | } 61 | 62 | #[derive(Debug, Serialize)] 63 | pub struct ProcessData { 64 | pub process_id: String, 65 | pub data: Data, 66 | pub pow_perc: f64, 67 | pub iteration_metrics: Vec>, 68 | } 69 | 70 | #[derive(Debug, Serialize)] 71 | pub struct RunData { 72 | pub run_id: i32, 73 | pub region: Option, 74 | pub ci: f64, 75 | pub start_time: i64, 76 | pub stop_time: i64, 77 | pub data: Data, 78 | pub process_data: Vec, 79 | } 80 | impl RunData { 81 | pub fn duration(&self) -> f64 { 82 | (self.stop_time - self.start_time) as f64 / 1000.0 83 | } 84 | } 85 | 86 | #[derive(Debug, Serialize)] 87 | pub struct ScenarioData { 88 | pub scenario_name: String, 89 | pub data: Data, 90 | pub run_data: Vec, 91 | pub trend: f64, 92 | } 93 | -------------------------------------------------------------------------------- /src/data/dataset.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Power, 3 | dao::{self, pagination::Pages}, 4 | data::Data, 5 | entities::{self, iteration::Model as Iteration, metrics::Model as Metrics}, 6 | }; 7 | use anyhow::Context; 8 | use itertools::Itertools; 9 | use sea_orm::{DatabaseConnection, ModelTrait}; 10 | use std::collections::HashMap; 11 | 12 | use super::{ProcessData, ProcessMetrics, RunData, ScenarioData}; 13 | 14 | pub enum AggregationMethod { 15 | MostRecent, 16 | Average, 17 | Sum, 18 | } 19 | 20 | pub enum LiveDataFilter { 21 | IncludeLive, 22 | ExcludeLive, 23 | OnlyLive, 24 | } 25 | 26 | /// Associates a single ScenarioIteration with all the metrics captured for it. 27 | #[derive(Debug)] 28 | pub struct IterationMetrics { 29 | iteration: Iteration, 30 | metrics: Vec, 31 | } 32 | impl IterationMetrics { 33 | pub fn new(iteration: Iteration, metrics: Vec) -> Self { 34 | Self { iteration, metrics } 35 | } 36 | 37 | pub fn iteration(&self) -> &Iteration { 38 | &self.iteration 39 | } 40 | 41 | pub fn metrics(&self) -> &[Metrics] { 42 | &self.metrics 43 | } 44 | 45 | pub fn by_process(&self) -> HashMap> { 46 | let mut metrics_by_process: HashMap> = HashMap::new(); 47 | for metric in self.metrics.iter() { 48 | let proc_name = metric.process_name.clone(); 49 | metrics_by_process 50 | .entry(proc_name) 51 | .and_modify(|v| v.push(metric)) 52 | .or_insert(vec![metric]); // if entry doesn't exist then create a new vec 53 | } 54 | 55 | metrics_by_process 56 | } 57 | } 58 | 59 | /// Data in cardamon is organised as a table. Each row is a scenario and each column is a run 60 | /// of that scenario. 61 | /// 62 | /// Example: Dataset containing the most recent 3 runs of 3 different scenarios. 63 | /// ============================================ 64 | /// || scenarios || run_1 | run_2 | run_3 || 65 | /// ||--------------||--------------------------|| 66 | /// || add_10_items || | | || 67 | /// || add_10_users || | | || 68 | /// || checkout || | | || 69 | /// ============================================ 70 | /// 71 | /// Example: Dataset containing the 2nd page of runs for the `add_10_items` scenario. 72 | /// ================================================================================ 73 | /// || scenarios || run_1 | run_2 | run_3 | run_4 | run_5 | run_6 || 74 | /// ||--------------||--------|--------|--------|-----------|-----------|-----------|| 75 | /// || || | | | ********************************* || 76 | /// || add_10_items || | | | * | | * || 77 | /// || || | | | ********************************* || 78 | /// ================================================================================ 79 | /// 80 | #[derive(Debug)] 81 | pub struct Dataset { 82 | data: Vec, 83 | pub total_scenarios: Pages, 84 | pub total_runs: Pages, 85 | } 86 | impl<'a> Dataset { 87 | pub fn new(data: Vec, total_scenarios: Pages, total_runs: Pages) -> Self { 88 | Self { 89 | data, 90 | total_scenarios, 91 | total_runs, 92 | } 93 | } 94 | 95 | pub fn data(&'a self) -> &'a [IterationMetrics] { 96 | &self.data 97 | } 98 | 99 | pub fn is_empty(&'a self) -> bool { 100 | self.data.is_empty() 101 | } 102 | 103 | pub fn by_scenario(&'a self, live_data_filter: LiveDataFilter) -> Vec> { 104 | // get all the scenarios in the dataset 105 | let unique_scenario_names = self 106 | .data 107 | .iter() 108 | // .sorted_by(|a, b| b.iteration.start_time.cmp(&a.iteration.start_time)) 109 | .map(|x| &x.iteration.scenario_name) 110 | .unique(); 111 | 112 | let scenario_names = match live_data_filter { 113 | LiveDataFilter::IncludeLive => unique_scenario_names.collect_vec(), 114 | LiveDataFilter::ExcludeLive => unique_scenario_names 115 | .filter(|name| !name.starts_with("live")) 116 | .collect_vec(), 117 | LiveDataFilter::OnlyLive => unique_scenario_names 118 | .filter(|name| name.starts_with("live")) 119 | .collect_vec(), 120 | }; 121 | 122 | scenario_names 123 | .into_iter() 124 | .map(|scenario_name| { 125 | let data = self 126 | .data 127 | .iter() 128 | .filter(|x| &x.iteration.scenario_name == scenario_name) 129 | .collect::>(); 130 | 131 | ScenarioDataset { 132 | scenario_name, 133 | data, 134 | } 135 | }) 136 | .collect::>() 137 | } 138 | } 139 | 140 | /// Dataset containing data associated with a single scenario but potentially containing data 141 | /// taken from multiple cardamon runs. 142 | /// 143 | /// Guarenteed to contain only data associated with a single scenario. 144 | #[derive(Debug)] 145 | pub struct ScenarioDataset<'a> { 146 | scenario_name: &'a str, 147 | data: Vec<&'a IterationMetrics>, 148 | } 149 | impl<'a> ScenarioDataset<'a> { 150 | pub fn scenario_name(&'a self) -> &'a str { 151 | self.scenario_name 152 | } 153 | 154 | pub fn data(&'a self) -> &'a [&'a IterationMetrics] { 155 | &self.data 156 | } 157 | 158 | pub fn by_run(&'a self) -> Vec> { 159 | let runs = self 160 | .data 161 | .iter() 162 | .sorted_by(|a, b| b.iteration.start_time.cmp(&a.iteration.start_time)) 163 | .map(|x| &x.iteration.run_id) 164 | .unique() 165 | .collect::>(); 166 | 167 | runs.into_iter() 168 | .map(|run_id| { 169 | let data = self 170 | .data 171 | .iter() 172 | .filter(|x| &x.iteration.run_id == run_id) 173 | .cloned() 174 | .collect::>(); 175 | 176 | ScenarioRunDataset { 177 | scenario_name: self.scenario_name, 178 | run_id: *run_id, 179 | data, 180 | } 181 | }) 182 | .collect::>() 183 | } 184 | 185 | pub async fn apply_model( 186 | &'a self, 187 | db: &DatabaseConnection, 188 | model: &impl Fn(&Vec<&Metrics>, &Power, f64) -> Data, 189 | aggregation_method: AggregationMethod, 190 | ) -> anyhow::Result { 191 | let mut all_run_data = vec![]; 192 | for scenario_run_dataset in self.by_run() { 193 | let run_data = scenario_run_dataset.apply_model(db, model).await?; 194 | all_run_data.push(run_data); 195 | } 196 | 197 | // use the aggregation method to calculate the data for this scenario 198 | let data = match aggregation_method { 199 | AggregationMethod::MostRecent => all_run_data.first().context("no data!")?.data.clone(), 200 | 201 | AggregationMethod::Average => Data::mean( 202 | &all_run_data 203 | .iter() 204 | .map(|run_data| &run_data.data) 205 | .collect_vec(), 206 | ), 207 | 208 | AggregationMethod::Sum => Data::sum( 209 | &all_run_data 210 | .iter() 211 | .map(|run_data| &run_data.data) 212 | .collect_vec(), 213 | ), 214 | }; 215 | 216 | // calculate trend 217 | let mut delta_sum = 0_f64; 218 | let mut delta_sum_abs = 0_f64; 219 | for i in 0..all_run_data.len() - 1 { 220 | let delta = all_run_data[i + 1].data.pow - all_run_data[i].data.pow; 221 | delta_sum += delta; 222 | delta_sum_abs += delta.abs(); 223 | } 224 | 225 | Ok(ScenarioData { 226 | scenario_name: self.scenario_name.to_string(), 227 | data, 228 | run_data: all_run_data, 229 | trend: if delta_sum_abs != 0_f64 { 230 | delta_sum / delta_sum_abs 231 | } else { 232 | 0_f64 233 | }, 234 | }) 235 | } 236 | } 237 | 238 | /// Dataset containing data associated with a single scenario in a single cardamon run but 239 | /// potentially containing data taken from multiple scenario iterations. 240 | /// 241 | /// Guarenteed to contain only data associated with a single scenario and cardamon run. 242 | #[derive(Debug)] 243 | pub struct ScenarioRunDataset<'a> { 244 | scenario_name: &'a str, 245 | run_id: i32, 246 | data: Vec<&'a IterationMetrics>, 247 | } 248 | impl<'a> ScenarioRunDataset<'a> { 249 | pub fn scenario_name(&'a self) -> &'a str { 250 | self.scenario_name 251 | } 252 | 253 | pub fn run_id(&'a self) -> i32 { 254 | self.run_id 255 | } 256 | 257 | pub fn data(&'a self) -> &'a [&'a IterationMetrics] { 258 | &self.data 259 | } 260 | 261 | pub fn by_iteration(&'a self) -> ScenarioRunIterationDataset { 262 | &self.data 263 | } 264 | 265 | pub async fn apply_model( 266 | &'a self, 267 | db: &DatabaseConnection, 268 | model: &impl Fn(&Vec<&Metrics>, &Power, f64) -> Data, 269 | ) -> anyhow::Result { 270 | let run = dao::run::fetch(self.run_id, db).await?; 271 | let cpu = run 272 | .find_related(entities::cpu::Entity) 273 | .one(db) 274 | .await? 275 | .context("Run is missing CPU!")?; 276 | let power = cpu 277 | .find_related(entities::power_curve::Entity) 278 | .one(db) 279 | .await? 280 | .map(|power| { 281 | Power::Curve( 282 | power.a as f64, 283 | power.b as f64, 284 | power.c as f64, 285 | power.d as f64, 286 | ) 287 | }) 288 | .or(cpu.tdp.map(|tdp| Power::Tdp(tdp as f64))) 289 | .context("Run is missing CPU or CPU is missing power")?; 290 | 291 | let start_time = run.start_time; 292 | let stop_time = run.stop_time; 293 | 294 | // build up process map 295 | // proc_id | data & metrics per iteration for proc per iteration 296 | // ======================================= 297 | // proc_id -> [<(data, [metrics)>, <(data, metrics)>] <- 2 iterations 298 | // proc_id -> [<(data, metrics)>, <(data, metrics)>] <- 2 iterations 299 | let mut proc_iteration_data_map: HashMap, Vec>)> = 300 | HashMap::new(); 301 | for scenario_run_iteration_dataset in self.by_iteration() { 302 | for (proc_id, metrics) in scenario_run_iteration_dataset.by_process() { 303 | // run the RAB model to get power and co2 emissions 304 | let cardamon_data = model(&metrics, &power, run.carbon_intensity); 305 | 306 | // convert the metrics database model into metrics data 307 | let proc_metrics = metrics 308 | .iter() 309 | .map(|metrics| ProcessMetrics { 310 | proc_id: proc_id.clone(), 311 | timestamp: metrics.time_stamp, 312 | cpu_usage: metrics.cpu_usage, 313 | }) 314 | .collect_vec(); 315 | 316 | // if key already exists in map the append cardamon_data to the end of the 317 | // iteration data vector for that key, else create a new vector for that key. 318 | let data_vec = match proc_iteration_data_map.get_mut(&proc_id) { 319 | Some((proc_data, iteration_metrics)) => { 320 | let mut data = vec![]; 321 | data.append(proc_data); 322 | data.push(cardamon_data); 323 | 324 | let mut metrics = vec![]; 325 | metrics.append(iteration_metrics); 326 | metrics.push(proc_metrics); 327 | 328 | (data, metrics) 329 | } 330 | 331 | None => (vec![cardamon_data], vec![proc_metrics]), 332 | }; 333 | proc_iteration_data_map.insert(proc_id.to_string(), data_vec); 334 | } 335 | } 336 | 337 | // average data for each process across all iterations 338 | let proc_data_map: HashMap>)> = 339 | proc_iteration_data_map 340 | .into_iter() 341 | .map(|(k, (data, metrics))| { 342 | ( 343 | k.to_string(), 344 | (Data::mean(&data.iter().collect_vec()), metrics), 345 | ) 346 | }) 347 | .collect(); 348 | 349 | // calculate total run data (pow + co2) 350 | let total_run_data = Data::sum(&proc_data_map.values().map(|(data, _)| data).collect_vec()); 351 | 352 | // convert proc_data_map to vector of ProcessData 353 | let process_data = proc_data_map 354 | .into_iter() 355 | .map(|(process_id, (data, iteration_metrics))| ProcessData { 356 | process_id, 357 | pow_perc: data.pow / total_run_data.pow, 358 | data, 359 | iteration_metrics, 360 | }) 361 | .collect_vec(); 362 | 363 | Ok(RunData { 364 | run_id: self.run_id, 365 | region: run.region, 366 | ci: run.carbon_intensity, 367 | start_time, 368 | stop_time, 369 | data: total_run_data, 370 | process_data, 371 | }) 372 | } 373 | } 374 | 375 | type ScenarioRunIterationDataset<'a> = &'a [&'a IterationMetrics]; 376 | 377 | #[cfg(test)] 378 | mod tests { 379 | use itertools::Itertools; 380 | 381 | use crate::{ 382 | data::{dataset::LiveDataFilter, dataset_builder::DatasetBuilder}, 383 | db_connect, db_migrate, 384 | tests::setup_fixtures, 385 | }; 386 | 387 | #[tokio::test] 388 | async fn dataset_builder_should_build_a_correct_dataset() -> anyhow::Result<()> { 389 | let db = db_connect("sqlite::memory:", None).await?; 390 | db_migrate(&db).await?; 391 | setup_fixtures( 392 | &[ 393 | "./fixtures/power_curves.sql", 394 | "./fixtures/cpus.sql", 395 | "./fixtures/runs.sql", 396 | "./fixtures/iterations.sql", 397 | "./fixtures/metrics.sql", 398 | ], 399 | &db, 400 | ) 401 | .await?; 402 | 403 | let dataset = DatasetBuilder::new() 404 | .scenarios_all() 405 | .all() 406 | .last_n_runs(3) 407 | .all() 408 | .build(&db) 409 | .await?; 410 | 411 | assert_eq!(dataset.data.len(), 14); 412 | 413 | Ok(()) 414 | } 415 | 416 | #[tokio::test] 417 | async fn dataset_can_be_broken_down_to_scenario_datasets() -> anyhow::Result<()> { 418 | let db = db_connect("sqlite::memory:", None).await?; 419 | db_migrate(&db).await?; 420 | setup_fixtures( 421 | &[ 422 | "./fixtures/power_curves.sql", 423 | "./fixtures/cpus.sql", 424 | "./fixtures/runs.sql", 425 | "./fixtures/iterations.sql", 426 | "./fixtures/metrics.sql", 427 | ], 428 | &db, 429 | ) 430 | .await?; 431 | 432 | let dataset = DatasetBuilder::new() 433 | .scenarios_all() 434 | .all() 435 | .last_n_runs(3) 436 | .all() 437 | .build(&db) 438 | .await?; 439 | 440 | let scenario_datasets = dataset.by_scenario(LiveDataFilter::ExcludeLive); 441 | assert_eq!(scenario_datasets.len(), 3); 442 | 443 | // make sure the scenario names are correct 444 | let scenario_names = scenario_datasets 445 | .iter() 446 | .map(|ds| ds.scenario_name) 447 | .collect::>(); 448 | assert_eq!( 449 | vec!["scenario_3", "scenario_2", "scenario_1"], 450 | scenario_names 451 | ); 452 | 453 | // make sure the data in the datasets are correct 454 | for scenario_dataset in scenario_datasets { 455 | match scenario_dataset.scenario_name { 456 | "scenario_1" => { 457 | assert_eq!(scenario_dataset.data.len(), 1); 458 | assert!( 459 | scenario_dataset 460 | .data 461 | .iter() 462 | .flat_map(|x| &x.metrics) 463 | .collect_vec() 464 | .len() 465 | == 10 466 | ); 467 | } 468 | 469 | "scenario_2" => { 470 | assert_eq!(scenario_dataset.data.len(), 4); 471 | assert!( 472 | scenario_dataset 473 | .data 474 | .iter() 475 | .flat_map(|x| &x.metrics) 476 | .collect_vec() 477 | .len() 478 | == 40 479 | ); 480 | } 481 | 482 | "scenario_3" => { 483 | assert_eq!(scenario_dataset.data.len(), 9); 484 | assert!( 485 | scenario_dataset 486 | .data 487 | .iter() 488 | .flat_map(|x| &x.metrics) 489 | .collect_vec() 490 | .len() 491 | == 90 492 | ); 493 | } 494 | 495 | _ => panic!("Unknown scenario in dataset"), 496 | } 497 | } 498 | 499 | Ok(()) 500 | } 501 | 502 | #[tokio::test] 503 | async fn scenario_dataset_can_be_broken_down_to_scenario_run_datasets() -> anyhow::Result<()> { 504 | let db = db_connect("sqlite::memory:", None).await?; 505 | db_migrate(&db).await?; 506 | setup_fixtures( 507 | &[ 508 | "./fixtures/power_curves.sql", 509 | "./fixtures/cpus.sql", 510 | "./fixtures/runs.sql", 511 | "./fixtures/iterations.sql", 512 | "./fixtures/metrics.sql", 513 | ], 514 | &db, 515 | ) 516 | .await?; 517 | 518 | let dataset = DatasetBuilder::new() 519 | .scenarios_all() 520 | .all() 521 | .last_n_runs(3) 522 | .all() 523 | .build(&db) 524 | .await?; 525 | 526 | for scenario_dataset in dataset.by_scenario(LiveDataFilter::ExcludeLive) { 527 | let scenario_run_datasets = scenario_dataset.by_run(); 528 | 529 | match scenario_dataset.scenario_name { 530 | "scenario_1" => { 531 | assert_eq!(scenario_run_datasets.len(), 1); 532 | let run_ids = scenario_run_datasets 533 | .iter() 534 | .map(|ds| ds.run_id) 535 | .collect::>(); 536 | assert_eq!(vec![1], run_ids); 537 | } 538 | 539 | "scenario_2" => { 540 | assert_eq!(scenario_run_datasets.len(), 2); 541 | let run_ids = scenario_run_datasets 542 | .iter() 543 | .map(|ds| ds.run_id) 544 | .collect::>(); 545 | assert_eq!(vec![2, 1], run_ids); 546 | } 547 | 548 | "scenario_3" => { 549 | assert_eq!(scenario_run_datasets.len(), 3); 550 | let run_ids = scenario_run_datasets 551 | .iter() 552 | .map(|ds| ds.run_id) 553 | .collect::>(); 554 | assert_eq!(vec![3, 2, 1], run_ids); 555 | } 556 | 557 | _ => panic!("unknown scenario in dataset!"), 558 | } 559 | } 560 | 561 | Ok(()) 562 | } 563 | 564 | #[tokio::test] 565 | async fn scenario_run_dataset_can_be_broken_down_to_scenario_run_iteration_datasets( 566 | ) -> anyhow::Result<()> { 567 | let db = db_connect("sqlite::memory:", None).await?; 568 | db_migrate(&db).await?; 569 | setup_fixtures( 570 | &[ 571 | "./fixtures/power_curves.sql", 572 | "./fixtures/cpus.sql", 573 | "./fixtures/runs.sql", 574 | "./fixtures/iterations.sql", 575 | "./fixtures/metrics.sql", 576 | ], 577 | &db, 578 | ) 579 | .await?; 580 | 581 | let dataset = DatasetBuilder::new() 582 | .scenarios_all() 583 | .all() 584 | .last_n_runs(3) 585 | .all() 586 | .build(&db) 587 | .await?; 588 | 589 | for scenario_dataset in dataset.by_scenario(LiveDataFilter::ExcludeLive) { 590 | for scenario_run_dataset in scenario_dataset.by_run() { 591 | let scenario_run_iteration_datasets = scenario_run_dataset.by_iteration(); 592 | 593 | match scenario_dataset.scenario_name { 594 | "scenario_1" => { 595 | assert_eq!(scenario_run_iteration_datasets.len(), 1); 596 | let it_ids = scenario_run_iteration_datasets 597 | .iter() 598 | .map(|ds| ds.iteration.count) 599 | .collect::>(); 600 | assert_eq!(vec![1], it_ids); 601 | } 602 | 603 | "scenario_2" => { 604 | assert_eq!(scenario_run_iteration_datasets.len(), 2); 605 | let it_ids = scenario_run_iteration_datasets 606 | .iter() 607 | .map(|ds| ds.iteration.count) 608 | .collect::>(); 609 | assert_eq!(vec![2, 1], it_ids); 610 | } 611 | 612 | "scenario_3" => { 613 | assert_eq!(scenario_run_iteration_datasets.len(), 3); 614 | let it_ids = scenario_run_iteration_datasets 615 | .iter() 616 | .map(|ds| ds.iteration.count) 617 | .collect::>(); 618 | assert_eq!(vec![3, 2, 1], it_ids); 619 | } 620 | 621 | _ => panic!("unknown scenario in dataset!"), 622 | } 623 | } 624 | } 625 | 626 | Ok(()) 627 | } 628 | } 629 | -------------------------------------------------------------------------------- /src/entities/cpu.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] 6 | #[sea_orm(table_name = "cpu")] 7 | pub struct Model { 8 | #[sea_orm(primary_key)] 9 | pub id: i32, 10 | pub name: String, 11 | #[sea_orm(column_type = "Double", nullable)] 12 | pub tdp: Option, 13 | pub power_curve_id: Option, 14 | } 15 | 16 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 17 | pub enum Relation { 18 | #[sea_orm( 19 | belongs_to = "super::power_curve::Entity", 20 | from = "Column::PowerCurveId", 21 | to = "super::power_curve::Column::Id", 22 | on_update = "NoAction", 23 | on_delete = "NoAction" 24 | )] 25 | PowerCurve, 26 | #[sea_orm(has_many = "super::run::Entity")] 27 | Run, 28 | } 29 | 30 | impl Related for Entity { 31 | fn to() -> RelationDef { 32 | Relation::PowerCurve.def() 33 | } 34 | } 35 | 36 | impl Related for Entity { 37 | fn to() -> RelationDef { 38 | Relation::Run.def() 39 | } 40 | } 41 | 42 | impl ActiveModelBehavior for ActiveModel {} 43 | -------------------------------------------------------------------------------- /src/entities/iteration.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)] 6 | #[sea_orm(table_name = "iteration")] 7 | pub struct Model { 8 | #[sea_orm(primary_key)] 9 | pub id: i32, 10 | pub run_id: i32, 11 | pub scenario_name: String, 12 | pub count: i32, 13 | pub start_time: i64, 14 | pub stop_time: i64, 15 | } 16 | 17 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 18 | pub enum Relation { 19 | #[sea_orm( 20 | belongs_to = "super::run::Entity", 21 | from = "Column::RunId", 22 | to = "super::run::Column::Id", 23 | on_update = "NoAction", 24 | on_delete = "NoAction" 25 | )] 26 | Run, 27 | } 28 | 29 | impl Related for Entity { 30 | fn to() -> RelationDef { 31 | Relation::Run.def() 32 | } 33 | } 34 | 35 | impl ActiveModelBehavior for ActiveModel {} 36 | -------------------------------------------------------------------------------- /src/entities/metrics.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] 6 | #[sea_orm(table_name = "metrics")] 7 | pub struct Model { 8 | #[sea_orm(primary_key)] 9 | pub id: i32, 10 | pub run_id: i32, 11 | pub process_id: String, 12 | pub process_name: String, 13 | #[sea_orm(column_type = "Double")] 14 | pub cpu_usage: f64, 15 | #[sea_orm(column_type = "Double")] 16 | pub cpu_total_usage: f64, 17 | pub cpu_core_count: i32, 18 | pub time_stamp: i64, 19 | } 20 | 21 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 22 | pub enum Relation { 23 | #[sea_orm( 24 | belongs_to = "super::run::Entity", 25 | from = "Column::RunId", 26 | to = "super::run::Column::Id", 27 | on_update = "NoAction", 28 | on_delete = "NoAction" 29 | )] 30 | Run, 31 | } 32 | 33 | impl Related for Entity { 34 | fn to() -> RelationDef { 35 | Relation::Run.def() 36 | } 37 | } 38 | 39 | impl ActiveModelBehavior for ActiveModel {} 40 | -------------------------------------------------------------------------------- /src/entities/mod.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | pub mod prelude; 4 | 5 | pub mod cpu; 6 | pub mod iteration; 7 | pub mod metrics; 8 | pub mod power_curve; 9 | pub mod run; 10 | -------------------------------------------------------------------------------- /src/entities/power_curve.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] 6 | #[sea_orm(table_name = "power_curve")] 7 | pub struct Model { 8 | #[sea_orm(primary_key)] 9 | pub id: i32, 10 | #[sea_orm(column_type = "Double")] 11 | pub a: f64, 12 | #[sea_orm(column_type = "Double")] 13 | pub b: f64, 14 | #[sea_orm(column_type = "Double")] 15 | pub c: f64, 16 | #[sea_orm(column_type = "Double")] 17 | pub d: f64, 18 | } 19 | 20 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 21 | pub enum Relation { 22 | #[sea_orm(has_many = "super::cpu::Entity")] 23 | Cpu, 24 | } 25 | 26 | impl Related for Entity { 27 | fn to() -> RelationDef { 28 | Relation::Cpu.def() 29 | } 30 | } 31 | 32 | impl ActiveModelBehavior for ActiveModel {} 33 | -------------------------------------------------------------------------------- /src/entities/prelude.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | pub use super::cpu::Entity as Cpu; 4 | pub use super::iteration::Entity as Iteration; 5 | pub use super::metrics::Entity as Metrics; 6 | pub use super::power_curve::Entity as PowerCurve; 7 | pub use super::run::Entity as Run; 8 | -------------------------------------------------------------------------------- /src/entities/run.rs: -------------------------------------------------------------------------------- 1 | //! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0 2 | 3 | use sea_orm::entity::prelude::*; 4 | 5 | #[derive(Clone, Debug, PartialEq, DeriveEntityModel)] 6 | #[sea_orm(table_name = "run")] 7 | pub struct Model { 8 | #[sea_orm(primary_key)] 9 | pub id: i32, 10 | pub is_live: bool, 11 | pub cpu_id: i32, 12 | pub start_time: i64, 13 | pub stop_time: i64, 14 | pub region: Option, 15 | #[sea_orm(column_type = "Double")] 16 | pub carbon_intensity: f64, 17 | } 18 | 19 | #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] 20 | pub enum Relation { 21 | #[sea_orm( 22 | belongs_to = "super::cpu::Entity", 23 | from = "Column::CpuId", 24 | to = "super::cpu::Column::Id", 25 | on_update = "NoAction", 26 | on_delete = "NoAction" 27 | )] 28 | Cpu, 29 | #[sea_orm(has_many = "super::iteration::Entity")] 30 | Iteration, 31 | #[sea_orm(has_many = "super::metrics::Entity")] 32 | Metrics, 33 | } 34 | 35 | impl Related for Entity { 36 | fn to() -> RelationDef { 37 | Relation::Cpu.def() 38 | } 39 | } 40 | 41 | impl Related for Entity { 42 | fn to() -> RelationDef { 43 | Relation::Iteration.def() 44 | } 45 | } 46 | 47 | impl Related for Entity { 48 | fn to() -> RelationDef { 49 | Relation::Metrics.def() 50 | } 51 | } 52 | 53 | impl ActiveModelBehavior for ActiveModel {} 54 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use cardamon::{ 3 | carbon_intensity::{fetch_ci, fetch_region_code, valid_region_code, GLOBAL_CI}, 4 | cleanup_stdout_stderr, 5 | config::{self, Config, ExecutionPlan, ProcessToObserve}, 6 | data::{dataset::LiveDataFilter, dataset_builder::DatasetBuilder, Data}, 7 | db_connect, db_migrate, init_config, 8 | models::rab_model, 9 | run, server, 10 | }; 11 | use chrono::{TimeZone, Utc}; 12 | use clap::{Parser, Subcommand}; 13 | use colored::Colorize; 14 | use dotenvy::dotenv; 15 | use itertools::Itertools; 16 | use std::{env, path::Path}; 17 | use term_table::{row, row::Row, rows, table_cell::*, Table, TableStyle}; 18 | use tracing_subscriber::EnvFilter; 19 | // use textplots::{AxisBuilder, Chart, Plot, Shape, TickDisplay, TickDisplayBuilder}; 20 | 21 | #[derive(Parser, Debug)] 22 | #[command(author = "Oliver Winks (@ohuu), William Kimbell (@seal)", version, about, long_about = None)] 23 | pub struct Cli { 24 | #[arg(short, long)] 25 | pub file: Option, 26 | 27 | #[command(subcommand)] 28 | pub command: Commands, 29 | } 30 | 31 | #[derive(Subcommand, Debug)] 32 | pub enum Commands { 33 | #[command(about = "Runs a single observation")] 34 | Run { 35 | #[arg(help = "Please provide an observation name")] 36 | name: String, 37 | 38 | #[arg(value_name = "REGION", short, long)] 39 | region: Option, 40 | 41 | #[arg(value_name = "EXTERNAL PIDs", short, long, value_delimiter = ',')] 42 | pids: Option>, 43 | 44 | #[arg( 45 | value_name = "EXTERNAL CONTAINER NAMES", 46 | short, 47 | long, 48 | value_delimiter = ',' 49 | )] 50 | containers: Option>, 51 | 52 | #[arg(long)] 53 | external_only: bool, 54 | }, 55 | 56 | Stats { 57 | #[arg( 58 | help = "Please provide a scenario name ('live_' for live monitor data)" 59 | )] 60 | scenario_name: Option, 61 | 62 | #[arg(value_name = "NUMBER OF PREVIOUS", short = 'n')] 63 | previous_runs: Option, 64 | }, 65 | 66 | #[command(about = "Start the Cardamon UI server")] 67 | Ui { 68 | #[arg(short, long)] 69 | port: Option, 70 | }, 71 | 72 | #[command(about = "Wizard for creating a cardamon.toml file")] 73 | Init, 74 | } 75 | 76 | fn load_config(file: &Option) -> anyhow::Result { 77 | // Initialize config if it exists 78 | match file { 79 | Some(path) => { 80 | println!("> using config {}", path.green()); 81 | config::Config::try_from_path(Path::new(path)) 82 | } 83 | None => { 84 | println!("> using config {}", "./cardamon.toml".green()); 85 | config::Config::try_from_path(Path::new("./cardamon.toml")) 86 | } 87 | } 88 | } 89 | 90 | fn add_external_processes( 91 | pids: Option>, 92 | containers: Option>, 93 | exec_plan: &mut ExecutionPlan, 94 | ) -> anyhow::Result<()> { 95 | // add external processes to observe. 96 | for pid in pids.unwrap_or_default() { 97 | let pid = pid.parse::()?; 98 | println!("> including external process {}", pid.to_string().green()); 99 | exec_plan.observe_external_process(ProcessToObserve::ExternalPid(pid)); 100 | } 101 | if let Some(container_names) = containers { 102 | exec_plan.observe_external_process(ProcessToObserve::ExternalContainers(container_names)); 103 | } 104 | 105 | Ok(()) 106 | } 107 | 108 | async fn get_or_validate_region_code(region_code: Option) -> Option { 109 | match region_code { 110 | None => { 111 | print!("> fetching region from IP address"); 112 | match fetch_region_code().await { 113 | Err(err) => { 114 | println!("\t{}", "✗".red()); 115 | println!("\t{}", format!("- {}", err).bright_black()); 116 | None 117 | } 118 | 119 | Ok(code) => { 120 | println!("\t{}", "✓".green()); 121 | println!( 122 | "\t{}", 123 | format!("- using region code {}", code).bright_black() 124 | ); 125 | Some(code) 126 | } 127 | } 128 | } 129 | 130 | Some(code) => { 131 | print!("> validating region code"); 132 | if valid_region_code(&code) { 133 | println!("\t{}", "✓".green()); 134 | Some(code) 135 | } else { 136 | println!("\t{}", "✗".red()); 137 | None 138 | } 139 | } 140 | } 141 | } 142 | 143 | async fn get_carbon_intensity(region_code: &Option) -> f64 { 144 | let now = Utc::now(); 145 | match region_code { 146 | Some(code) => { 147 | print!("> fetching carbon intensity for {}", code); 148 | match fetch_ci(&code, &now).await { 149 | Ok(ci) => { 150 | println!("\t{}", "✓".green()); 151 | println!( 152 | "\t{}", 153 | format!("- using {:.3} gWh CO2eq", ci).bright_black() 154 | ); 155 | ci 156 | } 157 | 158 | Err(_) => { 159 | println!("\t{}", "✗".red()); 160 | println!("\t{}", "- using global avg 0.494 gWh CO2eq".bright_black()); 161 | GLOBAL_CI 162 | } 163 | } 164 | } 165 | 166 | None => { 167 | print!( 168 | "> using global avg carbon intensity {} gWh CO2eq", 169 | "0.494".green() 170 | ); 171 | GLOBAL_CI 172 | } 173 | } 174 | } 175 | 176 | #[tokio::main] 177 | async fn main() -> anyhow::Result<()> { 178 | // read .env file if it exists 179 | dotenv().ok(); 180 | 181 | // Parse clap args 182 | let args = Cli::parse(); 183 | 184 | let log_filter = env::var("LOG_FILTER").unwrap_or("warn".to_string()); 185 | 186 | // Set up tracing subscriber 187 | let subscriber = tracing_subscriber::fmt() 188 | .with_env_filter(EnvFilter::new(log_filter)) 189 | .with_target(false) 190 | // .compact() 191 | .pretty() 192 | .finish(); 193 | tracing::subscriber::set_global_default(subscriber)?; 194 | 195 | // connect to the database and run migrations 196 | let database_url = 197 | env::var("DATABASE_URL").unwrap_or("sqlite://cardamon.db?mode=rwc".to_string()); 198 | let database_name = env::var("DATABASE_NAME").unwrap_or("".to_string()); 199 | let db_conn = db_connect(&database_url, Some(&database_name)).await?; 200 | db_migrate(&db_conn).await?; 201 | 202 | match args.command { 203 | Commands::Init => { 204 | init_config().await; 205 | } 206 | 207 | Commands::Run { 208 | name, 209 | region, 210 | pids, 211 | containers, 212 | external_only, 213 | } => { 214 | let config = load_config(&args.file) 215 | .context("Error loading configuration, please run `cardamon init`")?; 216 | 217 | // get the carbon intensity 218 | let region_code = get_or_validate_region_code(region).await; 219 | let ci = get_carbon_intensity(®ion_code).await; 220 | 221 | // create an execution plan 222 | let cpu = config.cpu.clone(); 223 | let mut execution_plan = config.create_execution_plan(cpu, &name, external_only)?; 224 | 225 | // add external processes to observe. 226 | add_external_processes(pids, containers, &mut execution_plan)?; 227 | 228 | // Cleanup previous runs stdout and stderr 229 | cleanup_stdout_stderr()?; 230 | 231 | // run it! 232 | let observation_dataset_rows = run(execution_plan, ®ion_code, ci, &db_conn).await?; 233 | let observation_dataset = observation_dataset_rows 234 | .last_n_runs(5) 235 | .all() 236 | .build(&db_conn) 237 | .await?; 238 | 239 | println!("\n{}", " Summary ".reversed().green()); 240 | for scenario_dataset in observation_dataset 241 | .by_scenario(LiveDataFilter::ExcludeLive) 242 | .iter() 243 | { 244 | let run_datasets = scenario_dataset.by_run(); 245 | 246 | // execute model for current run 247 | let (head, tail) = run_datasets 248 | .split_first() 249 | .expect("Dataset does not include recent run."); 250 | let run_data = head.apply_model(&db_conn, &rab_model).await?; 251 | 252 | // execute model for previous runs and calculate trend 253 | let mut tail_data = vec![]; 254 | for run_dataset in tail { 255 | let run_data = run_dataset.apply_model(&db_conn, &rab_model).await?; 256 | tail_data.push(run_data.data); 257 | } 258 | let tail_data = Data::mean(&tail_data.iter().collect_vec()); 259 | let trend = run_data.data.pow - tail_data.pow; 260 | let trend_str = match trend.is_nan() { 261 | true => "--".bright_black(), 262 | false => { 263 | if trend > 0.0 { 264 | format!("↑ {:.3}Wh", trend.abs()).red() 265 | } else { 266 | format!("↓ {:.3}Wh", trend).green() 267 | } 268 | } 269 | }; 270 | 271 | println!("{}:", scenario_dataset.scenario_name().to_string().green()); 272 | 273 | let table = Table::builder() 274 | .rows(rows![ 275 | row![ 276 | TableCell::builder("Region").build(), 277 | TableCell::builder("Duration (s)".bold()).build(), 278 | TableCell::builder("Power (Wh)".bold()).build(), 279 | TableCell::builder("CI (gWh)".bold()).build(), 280 | TableCell::builder("CO2 (g)".bold()).build(), 281 | TableCell::builder(format!("Trend (over {} runs)", tail.len()).bold()) 282 | .build() 283 | ], 284 | row![ 285 | TableCell::new(format!( 286 | "{}", 287 | run_data.region.clone().unwrap_or_default() 288 | )), 289 | TableCell::new(format!("{:.3}s", run_data.duration())), 290 | TableCell::new(format!("{:.3}Wh", run_data.data.pow)), 291 | TableCell::new(format!("{:.3}gWh", run_data.ci)), 292 | TableCell::new(format!("{:.3}g", run_data.data.co2)), 293 | TableCell::new(trend_str) 294 | ] 295 | ]) 296 | .style(TableStyle::rounded()) 297 | .build(); 298 | 299 | println!("{}", table.render()) 300 | } 301 | } 302 | 303 | Commands::Stats { 304 | scenario_name, 305 | previous_runs, 306 | } => { 307 | // build dataset 308 | let dataset_builder = DatasetBuilder::new(); 309 | let dataset_rows = match scenario_name { 310 | Some(scenario_name) => dataset_builder.scenario(&scenario_name).all(), 311 | None => dataset_builder.scenarios_all().all(), 312 | }; 313 | let dataset_cols = match previous_runs { 314 | Some(n) => dataset_rows.last_n_runs(n).all(), 315 | None => dataset_rows.runs_all().all(), 316 | }; 317 | let dataset = dataset_cols.build(&db_conn).await?; 318 | 319 | println!("\n{}", " Cardamon Stats \n".reversed().green()); 320 | if dataset.is_empty() { 321 | println!("\nno data found!"); 322 | } 323 | 324 | for scenario_dataset in dataset.by_scenario(LiveDataFilter::IncludeLive) { 325 | println!( 326 | "Scenario {}:", 327 | scenario_dataset.scenario_name().to_string().green() 328 | ); 329 | 330 | let mut table = Table::builder() 331 | .rows(rows![row![ 332 | TableCell::builder("Datetime (Utc)".bold()).build(), 333 | TableCell::builder("Region".bold()).build(), 334 | TableCell::builder("Duration (s)".bold()).build(), 335 | TableCell::builder("Power (Wh)".bold()).build(), 336 | TableCell::builder("CI (gWh)".bold()).build(), 337 | TableCell::builder("CO2 (g)".bold()).build() 338 | ]]) 339 | .style(TableStyle::rounded()) 340 | .build(); 341 | 342 | // let mut points: Vec<(f32, f32)> = vec![]; 343 | // let mut run = 0.0; 344 | for run_dataset in scenario_dataset.by_run() { 345 | let run_data = run_dataset.apply_model(&db_conn, &rab_model).await?; 346 | let run_region = run_data.region; 347 | let run_ci = run_data.ci; 348 | let run_start_time = Utc.timestamp_opt(run_data.start_time / 1000, 0).unwrap(); 349 | let run_duration = (run_data.stop_time - run_data.start_time) as f64 / 1000.0; 350 | let _per_min_factor = 60.0 / run_duration; 351 | 352 | table.add_row(row![ 353 | TableCell::new(run_start_time.format("%d/%m/%y %H:%M")), 354 | TableCell::new(run_region.unwrap_or_default()), 355 | TableCell::new(format!("{:.3}s", run_duration)), 356 | TableCell::new(format!("{:.4}Wh", run_data.data.pow)), 357 | TableCell::new(format!("{:.4}gWh", run_ci)), 358 | TableCell::new(format!("{:.4}g", run_data.data.co2)), 359 | ]); 360 | // points.push((run, run_data.data.pow as f32)); 361 | // run += 1.0; 362 | } 363 | println!("{}", table.render()); 364 | 365 | // let x_max = points.len() as f32; 366 | // let y_data = points.iter().map(|(_, y)| *y); 367 | // let y_min = y_data.clone().reduce(f32::min).unwrap_or(0.0); 368 | // let y_max = y_data.clone().reduce(f32::max).unwrap_or(0.0); 369 | // 370 | // Chart::new_with_y_range(128, 64, 0.0, x_max, y_min, y_max) 371 | // .x_axis_style(textplots::LineStyle::Solid) 372 | // .y_tick_display(TickDisplay::Sparse) 373 | // .lineplot(&Shape::Lines(&points)) 374 | // .nice(); 375 | } 376 | } 377 | 378 | Commands::Ui { port } => { 379 | let port = port.unwrap_or(1337); 380 | server::start(port, &db_conn).await? 381 | } 382 | } 383 | 384 | Ok(()) 385 | } 386 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | use crate::entities::metrics; 2 | use anyhow::anyhow; 3 | use sea_orm::*; 4 | 5 | #[derive(Debug)] 6 | pub struct MetricsLog { 7 | log: Vec, 8 | err: Vec, 9 | } 10 | impl MetricsLog { 11 | pub fn new() -> Self { 12 | Self { 13 | log: vec![], 14 | err: vec![], 15 | } 16 | } 17 | 18 | pub fn push_metrics(&mut self, metrics: CpuMetrics) { 19 | self.log.push(metrics); 20 | } 21 | 22 | pub fn push_error(&mut self, err: anyhow::Error) { 23 | self.err.push(err); 24 | } 25 | 26 | pub fn get_metrics(&self) -> &Vec { 27 | &self.log 28 | } 29 | 30 | pub fn get_errors(&self) -> &Vec { 31 | &self.err 32 | } 33 | 34 | pub fn has_errors(&self) -> bool { 35 | !self.err.is_empty() 36 | } 37 | 38 | pub fn clear(&mut self) { 39 | self.log.clear(); 40 | } 41 | 42 | pub async fn save(&self, run_id: i32, db: &DatabaseConnection) -> anyhow::Result<()> { 43 | // if metrics log contains errors then display them to the user and don't save anything 44 | if self.has_errors() { 45 | // log all the errors 46 | for err in &self.err { 47 | tracing::error!("{}", err); 48 | } 49 | return Err(anyhow!("Metric log contained errors, please see logs.")); 50 | } 51 | 52 | for metrics in &self.log { 53 | metrics.into_active_model(run_id).save(db).await?; 54 | } 55 | 56 | Ok(()) 57 | } 58 | } 59 | impl Default for MetricsLog { 60 | fn default() -> Self { 61 | Self::new() 62 | } 63 | } 64 | 65 | #[derive(Debug)] 66 | pub struct CpuMetrics { 67 | pub process_id: String, 68 | pub process_name: String, 69 | pub cpu_usage: f64, 70 | pub core_count: i32, 71 | pub timestamp: i64, 72 | } 73 | impl CpuMetrics { 74 | pub fn into_active_model(&self, run_id: i32) -> metrics::ActiveModel { 75 | metrics::ActiveModel { 76 | id: ActiveValue::NotSet, 77 | run_id: ActiveValue::Set(run_id), 78 | process_id: ActiveValue::Set(self.process_id.clone()), 79 | process_name: ActiveValue::Set(self.process_name.clone()), 80 | cpu_usage: ActiveValue::Set(self.cpu_usage), 81 | cpu_total_usage: ActiveValue::Set(0_f64), 82 | cpu_core_count: ActiveValue::Set(self.core_count), 83 | time_stamp: ActiveValue::Set(self.timestamp), 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/metrics_logger.rs: -------------------------------------------------------------------------------- 1 | pub mod bare_metal; 2 | pub mod docker; 3 | 4 | use crate::{metrics::MetricsLog, ProcessToObserve}; 5 | use std::sync::{Arc, Mutex}; 6 | use tokio::task::JoinSet; 7 | use tokio_util::sync::CancellationToken; 8 | 9 | pub struct StopHandle { 10 | pub token: CancellationToken, 11 | pub join_set: JoinSet<()>, 12 | pub shared_metrics_log: Arc>, 13 | } 14 | impl StopHandle { 15 | fn new( 16 | token: CancellationToken, 17 | join_set: JoinSet<()>, 18 | shared_metrics_log: Arc>, 19 | ) -> Self { 20 | Self { 21 | token, 22 | join_set, 23 | shared_metrics_log, 24 | } 25 | } 26 | 27 | pub async fn stop(mut self) -> anyhow::Result { 28 | // cancel loggers 29 | self.token.cancel(); 30 | loop { 31 | if self.join_set.join_next().await.is_none() { 32 | break; 33 | } 34 | } 35 | 36 | // take ownership of metrics log 37 | let metrics_log = Arc::try_unwrap(self.shared_metrics_log) 38 | .expect("Mutex guarding metrics_log shouldn't have multiple owners!") 39 | .into_inner() 40 | .expect("Should be able to take ownership of metrics_log"); 41 | 42 | // return error if metrics log contains any errors 43 | if metrics_log.has_errors() { 44 | return Err(anyhow::anyhow!( 45 | "Metrics log contains errors, please check trace" 46 | )); 47 | } 48 | 49 | Ok(metrics_log) 50 | } 51 | } 52 | 53 | /// Logs a single scenario run 54 | /// 55 | /// # Arguments 56 | /// 57 | /// * `processes` - The processes you wish to observe during the scenario run 58 | /// 59 | /// # Returns 60 | /// 61 | /// A `Result` containing the metrics log for the given scenario or an `Error` if either 62 | /// the scenario failed to complete successfully or any of the loggers contained errors. 63 | pub fn start_logging(processes_to_observe: Vec) -> anyhow::Result { 64 | let metrics_log = MetricsLog::new(); 65 | let metrics_log_mutex = Mutex::new(metrics_log); 66 | let shared_metrics_log = Arc::new(metrics_log_mutex); 67 | 68 | // split processes into bare metal & docker processes 69 | let mut a: Vec = vec![]; 70 | let mut b: Vec = vec![]; 71 | for proc in processes_to_observe { 72 | match proc { 73 | p @ ProcessToObserve::ExternalPid(_) => a.push(p.clone()), 74 | p @ ProcessToObserve::ExternalContainers(_) => b.push(p.clone()), 75 | 76 | p @ ProcessToObserve::ManagedPid { 77 | process_name: _, 78 | pid: _, 79 | down: _, 80 | } => a.push(p.clone()), 81 | p @ ProcessToObserve::ManagedContainers { 82 | process_name: _, 83 | container_names: _, 84 | down: _, 85 | } => b.push(p.clone()), 86 | } 87 | } 88 | 89 | // create a new cancellation token 90 | let token = CancellationToken::new(); 91 | 92 | // start threads to collect metrics 93 | let mut join_set = JoinSet::new(); 94 | if !a.is_empty() { 95 | let token = token.clone(); 96 | let shared_metrics_log = shared_metrics_log.clone(); 97 | tracing::debug!("Spawning bare metal thread"); 98 | join_set.spawn(async move { 99 | tracing::info!("Logging PIDs: {:?}", a); 100 | tokio::select! { 101 | _ = token.cancelled() => {} 102 | _ = bare_metal::keep_logging( 103 | a, 104 | shared_metrics_log, 105 | ) => {} 106 | } 107 | }); 108 | } 109 | 110 | if !b.is_empty() { 111 | let token = token.clone(); 112 | let shared_metrics_log = shared_metrics_log.clone(); 113 | 114 | join_set.spawn(async move { 115 | tracing::info!("Logging containers: {:?}", b); 116 | tokio::select! { 117 | _ = token.cancelled() => {} 118 | _ = docker::keep_logging( 119 | b, 120 | shared_metrics_log, 121 | ) => {} 122 | } 123 | }); 124 | } 125 | 126 | Ok(StopHandle::new(token, join_set, shared_metrics_log)) 127 | } 128 | -------------------------------------------------------------------------------- /src/metrics_logger/bare_metal.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::ProcessToObserve, 3 | metrics::{CpuMetrics, MetricsLog}, 4 | }; 5 | use chrono::Utc; 6 | use std::{ 7 | ops::Deref, 8 | sync::{Arc, Mutex}, 9 | }; 10 | use sysinfo::{Pid, System}; 11 | use tokio::time::Duration; 12 | use tracing::trace; 13 | 14 | /// Enters an infinite loop logging metrics for each process to the metrics log. This function is 15 | /// intended to be called from `metrics_logger::log_scenario` or `metrics_logger::log_live` 16 | /// 17 | /// **WARNING** 18 | /// 19 | /// This function should only be called from within a task that can execute it on another thread 20 | /// otherwise it will block the main thread completely. 21 | /// 22 | /// # Arguments 23 | /// 24 | /// * `pids` - The process ids to observe 25 | /// * `metrics_log` - A log of all observed metrics. Another thread should periodically save and 26 | /// flush this shared log. 27 | /// 28 | /// # Returns 29 | /// 30 | /// This function does not return, it requires that it's thread is cancelled. 31 | pub async fn keep_logging( 32 | processes_to_observe: Vec, 33 | metrics_log: Arc>, 34 | ) -> anyhow::Result<()> { 35 | let mut system = System::new_all(); 36 | 37 | loop { 38 | tokio::time::sleep(Duration::from_millis(1000)).await; 39 | system.refresh_all(); 40 | for process_to_observe in processes_to_observe.iter() { 41 | match process_to_observe { 42 | ProcessToObserve::ExternalPid(pid) => { 43 | let metrics = get_metrics(&mut system, *pid).await?; 44 | update_metrics_log(metrics, &metrics_log); 45 | } 46 | 47 | ProcessToObserve::ManagedPid { 48 | process_name, 49 | pid, 50 | down: _, 51 | } => { 52 | let mut metrics = get_metrics(&mut system, *pid).await?; 53 | metrics.process_name = process_name.clone(); 54 | update_metrics_log(metrics, &metrics_log); 55 | } 56 | 57 | _ => panic!(), 58 | } 59 | } 60 | } 61 | } 62 | 63 | fn update_metrics_log(metrics: CpuMetrics, metrics_log: &Arc>) { 64 | metrics_log 65 | .lock() 66 | .expect("Should be able to acquire lock on metrics log") 67 | .push_metrics(metrics); 68 | } 69 | 70 | async fn get_metrics(system: &mut System, pid: u32) -> anyhow::Result { 71 | if let Some(process) = system.process(Pid::from_u32(pid)) { 72 | let core_count = num_cpus::get_physical() as i32; 73 | 74 | // Cores can be 0, or system can be wrong, therefore divide here 75 | let cpu_usage = process.cpu_usage() as f64 / 100.0; 76 | let timestamp = Utc::now().timestamp_millis(); 77 | // Updated, .name just gives "bash" etc, short version 78 | // .exe gives proper path 79 | let process_name: String = process 80 | .exe() 81 | .map(|path| path.to_string_lossy().into_owned()) 82 | .unwrap_or_else(|| { 83 | let process_name = process.name().to_os_string(); 84 | let name_str = process_name.to_string_lossy(); 85 | name_str.deref().to_string() 86 | }); 87 | 88 | trace!("[PID {}] cpu_usage: {:?}", process.pid(), cpu_usage); 89 | let metrics = CpuMetrics { 90 | process_id: format!("{pid}"), 91 | process_name, 92 | cpu_usage, 93 | core_count, 94 | timestamp, 95 | }; 96 | 97 | Ok(metrics) 98 | } else { 99 | Err(anyhow::anyhow!(format!("process with id {pid} not found"))) 100 | } 101 | } 102 | 103 | #[cfg(test)] 104 | mod tests { 105 | use super::*; 106 | use anyhow::Context; 107 | use subprocess::Exec; 108 | use tokio::time::{sleep, Duration}; 109 | 110 | #[tokio::test] 111 | #[cfg(target_family = "windows")] 112 | async fn metrics_can_be_gatered_using_process_id() -> anyhow::Result<()> { 113 | // spawn a test process 114 | let mut proc = Exec::cmd("powershell") 115 | .arg("-Command") 116 | .arg(r#"while($true) {get-random | out-null}"#) 117 | .detached() 118 | .popen() 119 | .context("Failed to spawn detached process")?; 120 | let pid = proc.pid().context("Process should have a pid")?; 121 | 122 | // create a new sysinfo system 123 | let mut system = System::new_all(); 124 | 125 | // gather metrics for a little while 126 | let mut metrics_log = vec![]; 127 | let iterations = 50; 128 | for _ in 0..iterations { 129 | let metrics = get_metrics(&mut system, pid).await?; 130 | metrics_log.push(metrics); 131 | sleep(Duration::from_millis(200)).await; 132 | } 133 | proc.kill().context("Failed to kill process")?; 134 | 135 | // metrics log should have 10 entries 136 | assert_eq!(metrics_log.len(), iterations); 137 | 138 | // metrics should contain non-zero cpu_usage 139 | let cpu_usage = metrics_log.iter().fold(0_f64, |acc, metrics| { 140 | acc + metrics.cpu_usage / metrics.core_count as f64 141 | }) / iterations as f64; 142 | println!("{cpu_usage}"); 143 | assert!(cpu_usage > 0_f64); 144 | 145 | Ok(()) 146 | } 147 | 148 | #[tokio::test] 149 | #[cfg(target_family = "windows")] 150 | async fn should_return_err_if_wrong_pid() { 151 | // create a new sysinfo System 152 | let mut system = System::new_all(); 153 | 154 | // find a process id that doesn't exist 155 | system.refresh_all(); 156 | 157 | let mut rand_pid = 1337; 158 | loop { 159 | if !system.processes().contains_key(&Pid::from_u32(rand_pid)) { 160 | break; 161 | } else { 162 | rand_pid += 1; 163 | } 164 | } 165 | 166 | // attempt to gather metrics 167 | let res = get_metrics(&mut system, rand_pid).await; 168 | assert!(res.is_err()); 169 | } 170 | 171 | #[tokio::test] 172 | #[cfg(target_family = "unix")] 173 | async fn metrics_can_be_gatered_using_process_id() -> anyhow::Result<()> { 174 | // spawn a test process 175 | 176 | use subprocess::NullFile; 177 | let mut proc = Exec::cmd("bash") 178 | .arg("-c") 179 | .arg("while true; do shuf -i 0-1337 -n 1; done") 180 | .detached() 181 | .stdout(NullFile) 182 | .popen() 183 | .context("Failed to spawn detached process")?; 184 | let pid = proc.pid().context("Process should have a pid")?; 185 | 186 | // create a new sysinfo system 187 | let mut system = System::new_all(); 188 | system.refresh_all(); 189 | 190 | // gather metrics for a little while 191 | let mut metrics_log = vec![]; 192 | let iterations = 50; 193 | for _ in 0..iterations { 194 | let metrics = get_metrics(&mut system, pid).await?; 195 | metrics_log.push(metrics); 196 | sleep(Duration::from_millis(200)).await; 197 | } 198 | proc.kill().context("Failed to kill process")?; 199 | 200 | // metrics log should have 10 entries 201 | assert_eq!(metrics_log.len(), iterations); 202 | 203 | // metrics should contain non-zero cpu_usage 204 | let cpu_usage = metrics_log.iter().fold(0_f64, |acc, metrics| { 205 | acc + metrics.cpu_usage / metrics.core_count as f64 206 | }) / iterations as f64; 207 | assert!(cpu_usage > 0_f64); 208 | 209 | Ok(()) 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/metrics_logger/docker.rs: -------------------------------------------------------------------------------- 1 | use crate::config::ProcessToObserve; 2 | use crate::metrics::{CpuMetrics, MetricsLog}; 3 | use bollard::container::{ListContainersOptions, Stats, StatsOptions}; 4 | use bollard::Docker; 5 | use chrono::Utc; 6 | use futures_util::stream::StreamExt; 7 | use std::collections::HashMap; 8 | use std::sync::{Arc, Mutex}; 9 | use tracing::{debug, error, warn}; 10 | 11 | /// Enters an infinite loop logging metrics for each process to the metrics log. This function is 12 | /// intended to be called from `metrics_logger::log_scenario` or `metrics_logger::log_live` 13 | /// 14 | /// **WARNING** 15 | /// 16 | /// This function should only be called from within a task that can execute it on another thread 17 | /// otherwise it will block the main thread completely. 18 | /// 19 | /// # Arguments 20 | /// 21 | /// * `container_names` - The names of the containers to observe 22 | /// * `metrics_log` - A log of all observed metrics. Another thread should periodically save and 23 | /// flush this shared log. 24 | /// 25 | /// # Returns 26 | /// 27 | /// This function does not return, it requires that its thread is cancelled. 28 | pub async fn keep_logging( 29 | procs_to_observe: Vec, 30 | metrics_log: Arc>, 31 | ) { 32 | // This connects with system defaults, socket for unix, http for windows 33 | let docker = match Docker::connect_with_defaults() { 34 | Ok(docker) => { 35 | debug!("Successfully connected to Docker"); 36 | docker 37 | } 38 | Err(e) => { 39 | error!("Failed to connect to Docker: {}", e); 40 | return; 41 | } 42 | }; 43 | 44 | let mut container_names = vec![]; 45 | for proc_to_observe in procs_to_observe.into_iter() { 46 | match proc_to_observe { 47 | ProcessToObserve::ManagedContainers { 48 | process_name: _, 49 | container_names: names, 50 | down: _, 51 | } => { 52 | container_names.append(&mut names.clone()); 53 | } 54 | 55 | ProcessToObserve::ExternalContainers(names) => { 56 | container_names.append(&mut names.clone()) 57 | } 58 | 59 | _ => panic!("wat!"), 60 | } 61 | } 62 | 63 | // Only running containers, we re-try in a second if the container is not running yet 64 | let mut filter = HashMap::new(); 65 | filter.insert(String::from("status"), vec![String::from("running")]); 66 | filter.insert(String::from("name"), container_names.clone()); 67 | debug!("Listing containers with filter: {:?}", filter); 68 | 69 | let container_list = docker 70 | .list_containers(Some(ListContainersOptions { 71 | all: true, 72 | filters: filter, 73 | ..Default::default() 74 | })) 75 | .await; 76 | 77 | let containers = match container_list { 78 | Ok(containers) => { 79 | debug!( 80 | "Successfully listed containers. Count: {}", 81 | containers.len() 82 | ); 83 | containers 84 | } 85 | Err(e) => { 86 | error!("Failed to list containers: {}", e); 87 | return; 88 | // tokio::time::sleep(std::time::Duration::from_secs(1)).await; 89 | // continue; 90 | } 91 | }; 92 | 93 | // Wait 1s and re-try, this is not an error, containers take a while to spin up 94 | if containers.is_empty() { 95 | warn!("No running containers"); 96 | return; 97 | // tokio::time::sleep(std::time::Duration::from_secs(1)).await; 98 | // continue; 99 | } 100 | 101 | loop { 102 | for container in &containers { 103 | if let Some(container_id) = container.id.as_ref() { 104 | let container_name_with_slash = container 105 | .names 106 | .clone() 107 | .and_then(|names| names.first().cloned()) 108 | .unwrap_or_else(|| "unknown".to_string()); 109 | let container_name = &container_name_with_slash[1..container_name_with_slash.len()]; // Container name "test" would be "/test" here, remove first char 110 | 111 | let docker_stats = docker 112 | .stats( 113 | container_id, 114 | Some(StatsOptions { 115 | stream: false, 116 | ..Default::default() 117 | }), 118 | ) 119 | .next() 120 | .await; 121 | 122 | match docker_stats { 123 | Some(Ok(stats)) => { 124 | let cpu_metrics = 125 | calculate_cpu_metrics(container_id, container_name.to_string(), &stats); 126 | debug!( 127 | "Pushing metrics to metrics log form container name/s {:?}", 128 | container.names 129 | ); 130 | metrics_log.lock().unwrap().push_metrics(cpu_metrics); 131 | debug!("Logged metrics for container {}", container_id); 132 | } 133 | Some(Err(e)) => { 134 | error!("Error getting stats for container {}: {}", container_id, e); 135 | metrics_log.lock().unwrap().push_error(anyhow::anyhow!( 136 | "Error getting stats for container {}: {}", 137 | container_id, 138 | e 139 | )); 140 | } 141 | None => { 142 | error!("No stats received for container {}", container_id); 143 | } 144 | } 145 | } 146 | } 147 | } 148 | } 149 | 150 | fn calculate_cpu_metrics(container_id: &str, container_name: String, stats: &Stats) -> CpuMetrics { 151 | let core_count = stats.cpu_stats.online_cpus.unwrap_or(0); 152 | let cpu_delta = 153 | stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage; 154 | let system_delta = stats.cpu_stats.system_cpu_usage.unwrap_or(0) 155 | - stats.precpu_stats.system_cpu_usage.unwrap_or(0); 156 | let cpu_usage = if system_delta > 0 { 157 | (cpu_delta as f64 / system_delta as f64) * core_count as f64 158 | } else { 159 | 0.0 160 | }; 161 | debug!( 162 | "Calculated CPU metrics for container {} ({}), cpu percentage: {}", 163 | container_id, container_name, cpu_usage 164 | ); 165 | CpuMetrics { 166 | process_id: container_id.to_string(), 167 | process_name: container_name, 168 | cpu_usage, 169 | core_count: core_count as i32, 170 | timestamp: Utc::now().timestamp_millis(), 171 | } 172 | } 173 | 174 | pub async fn get_container_status(container_name: &str) -> anyhow::Result { 175 | let docker = Docker::connect_with_defaults().map_err(|e| { 176 | error!("Failed to connect to Docker: {}", e); 177 | anyhow::anyhow!("Failed to connect to Docker: {}", e) 178 | })?; 179 | 180 | debug!("Successfully connected to Docker"); 181 | 182 | let mut filter = HashMap::new(); 183 | filter.insert(String::from("name"), vec![container_name.to_string()]); 184 | 185 | debug!("Listing containers with filter: {:?}", filter); 186 | 187 | let containers = docker 188 | .list_containers(Some(ListContainersOptions { 189 | all: true, 190 | filters: filter, 191 | ..Default::default() 192 | })) 193 | .await 194 | .map_err(|e| { 195 | error!("Failed to list containers: {}", e); 196 | anyhow::anyhow!("Failed to list containers: {}", e) 197 | })?; 198 | 199 | debug!( 200 | "Successfully listed containers. Count: {}", 201 | containers.len() 202 | ); 203 | 204 | if containers.is_empty() { 205 | return Ok(String::from("not_found")); 206 | } 207 | 208 | let container = &containers[0]; 209 | let status = container.state.as_deref().unwrap_or("unknown").to_string(); 210 | debug!("Container '{}' status: {}", container_name, status); 211 | 212 | Ok(status) 213 | } 214 | 215 | #[cfg(test)] 216 | mod tests { 217 | use crate::{ 218 | config::ProcessToObserve, 219 | metrics::{CpuMetrics, MetricsLog}, 220 | metrics_logger::{ 221 | docker::{get_container_status, keep_logging}, 222 | StopHandle, 223 | }, 224 | }; 225 | use bollard::{ 226 | container::{Config, CreateContainerOptions, RemoveContainerOptions}, 227 | image::{BuildImageOptions, RemoveImageOptions}, 228 | Docker, 229 | }; 230 | use bytes::Bytes; 231 | use chrono::Utc; 232 | use core::time; 233 | use futures_util::StreamExt; 234 | use nanoid::nanoid; 235 | use std::{ 236 | io::Cursor, 237 | sync::{Arc, Mutex}, 238 | }; 239 | use tar::{Builder, Header}; 240 | use tokio::{task::JoinSet, time::sleep}; 241 | use tokio_util::sync::CancellationToken; 242 | 243 | async fn create_and_start_container(docker: &Docker) -> (String, String, String) { 244 | // container_id, 245 | // container_name 246 | // image_id 247 | // Smallest image I can create that doesn't exit ( 4.2mb), alpine is 7 ish 248 | let dockerfile = r#" 249 | FROM busybox 250 | CMD ["sleep", "infinity"] 251 | "#; 252 | 253 | // Bollard has 2 options for creating an image 254 | // 1 - Dockerfile from *remote* url 255 | // 2 - Dockerfile from *tar file* 256 | // We'll create an in-memory tar file and use this 257 | // We want the bytes of the tar file for building 258 | let tar_bytes = { 259 | // Create a buffer to hold tar archive data 260 | let mut tar_buffer = Vec::new(); 261 | // Use a nested block as we want to explicityly end the borrow of tar_buffer by 262 | // tar_builder 263 | { 264 | // Create a builder that'll write to our buffer 265 | let mut tar_builder = Builder::new(&mut tar_buffer); 266 | // Gnu format header, set path of file, size & permissions 267 | let mut header = Header::new_gnu(); 268 | header.set_path("Dockerfile").unwrap(); 269 | header.set_size(dockerfile.len() as u64); 270 | header.set_mode(0o644); 271 | header.set_cksum(); 272 | // Append to builder 273 | tar_builder 274 | .append(&header, Cursor::new(dockerfile)) 275 | .unwrap(); 276 | // Write to tar_buffer 277 | tar_builder.finish().unwrap(); 278 | } 279 | // return bytes ( wanted by bollard::build_image 280 | Bytes::from(tar_buffer) 281 | }; 282 | // Nano generates them with random from A-Z ) Plus _ and - 283 | // 2.. Removes _ and - as these are invalid 284 | let image_id = nanoid!(10, &nanoid::alphabet::SAFE[2..]).to_lowercase(); 285 | let image_id_latest = format!("{}:latest", image_id); 286 | // Build the image 287 | let options = BuildImageOptions { 288 | dockerfile: "Dockerfile", 289 | t: &image_id_latest, 290 | ..Default::default() 291 | }; 292 | // build image 293 | let mut build_stream = docker.build_image(options, None, Some(tar_bytes)); 294 | // Docker streams the build process of making an image, meaning you can stop half-way if 295 | // something is wrong / you want a timeout for example. 296 | // In this case we want to continue until there's no more 297 | while let Some(output) = build_stream.next().await { 298 | output.unwrap(); 299 | } 300 | // Create and start the container 301 | let container_name = format!( 302 | "cardamon-test-container-{}", 303 | nanoid!(10, &nanoid::alphabet::SAFE[2..]).to_lowercase() 304 | ); 305 | let container = docker 306 | .create_container( 307 | Some(CreateContainerOptions { 308 | name: container_name.as_str(), 309 | ..Default::default() 310 | }), 311 | Config { 312 | image: Some(image_id_latest), 313 | ..Default::default() 314 | }, 315 | ) 316 | .await 317 | .unwrap(); 318 | 319 | docker 320 | .start_container::(&container.id, None) 321 | .await 322 | .unwrap(); 323 | 324 | (container.id, container_name, image_id) 325 | } 326 | 327 | async fn cleanup_container(docker: &Docker, container_id: &str, image_id: &str) { 328 | // CLEANUP 329 | // We could "stop" container then "remove" container, but remove + force does this for us 330 | // ( Plus it sets the "grace" period docker has to 0, immediately stopping it ) 331 | docker 332 | .remove_container( 333 | container_id, 334 | Some(RemoveContainerOptions { 335 | force: true, 336 | v: true, 337 | link: false, 338 | }), 339 | ) 340 | .await 341 | .unwrap(); 342 | 343 | docker 344 | .remove_image( 345 | image_id, 346 | Some(RemoveImageOptions { 347 | force: true, 348 | noprune: false, 349 | }), 350 | None, 351 | ) 352 | .await 353 | .unwrap(); 354 | } 355 | 356 | #[test] 357 | fn test_metrics_log() { 358 | let mut log = MetricsLog::new(); 359 | 360 | let metrics = CpuMetrics { 361 | process_id: "123".to_string(), 362 | process_name: "test".to_string(), 363 | cpu_usage: 50.0, 364 | core_count: 4, 365 | timestamp: Utc::now().timestamp_millis(), 366 | }; 367 | 368 | log.push_metrics(metrics); 369 | assert_eq!(log.get_metrics().len(), 1); 370 | 371 | log.push_error(anyhow::anyhow!("Error here")); 372 | assert!(log.has_errors()); 373 | assert_eq!(log.get_errors().len(), 1); 374 | } 375 | 376 | #[tokio::test] 377 | async fn test_container_status() { 378 | // Test container status with a tiny container 379 | // Connect with system defaults ( socket on unix, http on windows ) 380 | let docker = Docker::connect_with_local_defaults().unwrap(); 381 | let (container_id, container_name, image_id) = create_and_start_container(&docker).await; 382 | 383 | // Test get_container_status 384 | let status = get_container_status(&container_name).await.unwrap(); 385 | assert_eq!(status, "running", "Container should be in 'running' state"); 386 | cleanup_container(&docker, &container_id, &image_id).await; 387 | } 388 | 389 | #[tokio::test] 390 | async fn test_keep_logging() { 391 | // pub async fn keep_logging(container_names: Vec, metrics_log: Arc>) { 392 | // Create a metrics log 393 | let metrics_log = MetricsLog::new(); 394 | 395 | // Wrap it in a mutex ( enabling lock + unlock avoiding race condition ) 396 | let metrics_log_mutex = Mutex::new(metrics_log); 397 | 398 | // Wrap in arc ( smart pointer, allows multiple mutable references ) 399 | let shared_metrics_log = Arc::new(metrics_log_mutex); 400 | 401 | // Connect to docker 402 | let docker = Docker::connect_with_local_defaults().unwrap(); 403 | 404 | // Create empty container 405 | let (container_id, container_name, image_id) = create_and_start_container(&docker).await; 406 | 407 | // Token to "cancel" keep logging 408 | let token = CancellationToken::new(); 409 | 410 | // Allows for joining of multiple tasks, used because we have both bare-metal and docker 411 | // This joinset will have 1 item, so normally you wouldn't use one in this case 412 | // But this is a test so :shrug: 413 | let mut join_set = JoinSet::new(); 414 | 415 | // Clone these values before moving them into the spawned task 416 | let task_token = token.clone(); 417 | let task_metrics_log = shared_metrics_log.clone(); 418 | let task_container_name = container_name.clone(); 419 | 420 | let proc_to_observe = ProcessToObserve::ManagedContainers { 421 | process_name: "".to_string(), 422 | container_names: vec![task_container_name], 423 | down: Some("".to_string()), 424 | }; 425 | 426 | // Spawn task ( async ) 427 | join_set.spawn(async move { 428 | tokio::select! { 429 | _ = task_token.cancelled() => {} 430 | _ = keep_logging(vec![proc_to_observe], task_metrics_log)=> {} 431 | } 432 | }); 433 | 434 | // Create stop handle ( used to extract metrics log and cancel ) 435 | let stop_handle = StopHandle::new(token, join_set, shared_metrics_log); 436 | 437 | // Wait for period of time ( to get logs) 438 | sleep(time::Duration::new(2, 0)).await; 439 | 440 | // Stop logging and get metrics_logs from keep_logging() 441 | let metrics_log = stop_handle.stop().await.unwrap(); 442 | 443 | // Should have no errors & some metrics 444 | assert!(!metrics_log.has_errors()); 445 | assert!(!metrics_log.get_metrics().is_empty()); 446 | assert_eq!( 447 | container_name, 448 | metrics_log.get_metrics().first().unwrap().process_name 449 | ); 450 | 451 | // Cleanup 452 | cleanup_container(&docker, &container_id, &image_id).await; 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /src/migrations/m20240822_095823_create_run_table.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | manager 10 | .create_table( 11 | Table::create() 12 | .table(PowerCurve::Table) 13 | .if_not_exists() 14 | .col( 15 | ColumnDef::new(PowerCurve::Id) 16 | .integer() 17 | .auto_increment() 18 | .not_null() 19 | .primary_key(), 20 | ) 21 | .col(ColumnDef::new(PowerCurve::A).double().not_null()) 22 | .col(ColumnDef::new(PowerCurve::B).double().not_null()) 23 | .col(ColumnDef::new(PowerCurve::C).double().not_null()) 24 | .col(ColumnDef::new(PowerCurve::D).double().not_null()) 25 | .to_owned(), 26 | ) 27 | .await?; 28 | 29 | manager 30 | .create_table( 31 | Table::create() 32 | .table(Cpu::Table) 33 | .if_not_exists() 34 | .col( 35 | ColumnDef::new(Cpu::Id) 36 | .integer() 37 | .auto_increment() 38 | .not_null() 39 | .primary_key(), 40 | ) 41 | .col(ColumnDef::new(Cpu::Name).string().not_null()) 42 | .col(ColumnDef::new(Cpu::Tdp).double()) 43 | .col(ColumnDef::new(Cpu::PowerCurveId).integer()) 44 | .foreign_key( 45 | ForeignKey::create() 46 | .from(Cpu::Table, Cpu::PowerCurveId) 47 | .to(PowerCurve::Table, PowerCurve::Id), 48 | ) 49 | .to_owned(), 50 | ) 51 | .await?; 52 | 53 | manager 54 | .create_table( 55 | Table::create() 56 | .table(Run::Table) 57 | .if_not_exists() 58 | .col( 59 | ColumnDef::new(Run::Id) 60 | .integer() 61 | .auto_increment() 62 | .not_null() 63 | .primary_key(), 64 | ) 65 | .col( 66 | ColumnDef::new(Run::IsLive) 67 | .boolean() 68 | .not_null() 69 | .default(false), 70 | ) 71 | .col(ColumnDef::new(Run::CpuId).integer().not_null()) 72 | .col(ColumnDef::new(Run::StartTime).big_integer().not_null()) 73 | .col(ColumnDef::new(Run::StopTime).big_integer().not_null()) 74 | .foreign_key( 75 | ForeignKey::create() 76 | .from(Run::Table, Run::CpuId) 77 | .to(Cpu::Table, Cpu::Id), 78 | ) 79 | .to_owned(), 80 | ) 81 | .await 82 | } 83 | 84 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 85 | manager 86 | .drop_table(Table::drop().table(Run::Table).to_owned()) 87 | .await 88 | } 89 | } 90 | 91 | #[derive(DeriveIden)] 92 | pub enum PowerCurve { 93 | Table, 94 | Id, 95 | A, 96 | B, 97 | C, 98 | D, 99 | } 100 | 101 | #[derive(DeriveIden)] 102 | pub enum Cpu { 103 | Table, 104 | Id, 105 | Name, 106 | Tdp, 107 | PowerCurveId, 108 | } 109 | 110 | #[derive(DeriveIden)] 111 | pub enum Run { 112 | Table, 113 | Id, 114 | IsLive, 115 | CpuId, 116 | StartTime, 117 | StopTime, 118 | } 119 | -------------------------------------------------------------------------------- /src/migrations/m20240822_095830_create_metrics_table.rs: -------------------------------------------------------------------------------- 1 | use super::m20240822_095823_create_run_table::Run; 2 | use sea_orm_migration::prelude::*; 3 | 4 | #[derive(DeriveMigrationName)] 5 | pub struct Migration; 6 | 7 | #[async_trait::async_trait] 8 | impl MigrationTrait for Migration { 9 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 10 | manager 11 | .create_table( 12 | Table::create() 13 | .table(Metrics::Table) 14 | .if_not_exists() 15 | .col( 16 | ColumnDef::new(Metrics::Id) 17 | .integer() 18 | .auto_increment() 19 | .not_null() 20 | .primary_key(), 21 | ) 22 | .col(ColumnDef::new(Metrics::RunId).integer().not_null()) 23 | .col(ColumnDef::new(Metrics::ProcessId).string().not_null()) 24 | .col(ColumnDef::new(Metrics::ProcessName).string().not_null()) 25 | .col(ColumnDef::new(Metrics::CpuUsage).double().not_null()) 26 | .col(ColumnDef::new(Metrics::CpuTotalUsage).double().not_null()) 27 | .col(ColumnDef::new(Metrics::CpuCoreCount).integer().not_null()) 28 | .col(ColumnDef::new(Metrics::TimeStamp).big_integer().not_null()) 29 | .foreign_key( 30 | ForeignKey::create() 31 | .from(Metrics::Table, Metrics::RunId) 32 | .to(Run::Table, Run::Id), 33 | ) 34 | // unique constraint 35 | .index( 36 | Index::create() 37 | .col(Metrics::RunId) 38 | .col(Metrics::ProcessId) 39 | .col(Metrics::TimeStamp) 40 | .unique(), 41 | ) 42 | .to_owned(), 43 | ) 44 | .await 45 | } 46 | 47 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 48 | manager 49 | .drop_table(Table::drop().table(Metrics::Table).to_owned()) 50 | .await 51 | } 52 | } 53 | 54 | #[derive(DeriveIden)] 55 | enum Metrics { 56 | Table, 57 | Id, 58 | RunId, 59 | ProcessId, 60 | ProcessName, 61 | CpuUsage, 62 | CpuTotalUsage, 63 | CpuCoreCount, 64 | TimeStamp, 65 | } 66 | -------------------------------------------------------------------------------- /src/migrations/m20240822_095838_create_iteration_table.rs: -------------------------------------------------------------------------------- 1 | use super::m20240822_095823_create_run_table::Run; 2 | use sea_orm_migration::prelude::*; 3 | 4 | #[derive(DeriveMigrationName)] 5 | pub struct Migration; 6 | 7 | #[async_trait::async_trait] 8 | impl MigrationTrait for Migration { 9 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 10 | manager 11 | .create_table( 12 | Table::create() 13 | .table(Iteration::Table) 14 | .if_not_exists() 15 | .col( 16 | ColumnDef::new(Iteration::Id) 17 | .integer() 18 | .auto_increment() 19 | .not_null() 20 | .primary_key(), 21 | ) 22 | .col(ColumnDef::new(Iteration::RunId).integer().not_null()) 23 | .col(ColumnDef::new(Iteration::ScenarioName).string().not_null()) 24 | .col(ColumnDef::new(Iteration::Count).integer().not_null()) 25 | .col( 26 | ColumnDef::new(Iteration::StartTime) 27 | .big_integer() 28 | .not_null(), 29 | ) 30 | .col(ColumnDef::new(Iteration::StopTime).big_integer().not_null()) 31 | .foreign_key( 32 | ForeignKey::create() 33 | .from(Iteration::Table, Iteration::RunId) 34 | .to(Run::Table, Run::Id), 35 | ) 36 | // unique constraint 37 | .index( 38 | Index::create() 39 | .name("unique trio") 40 | .col(Iteration::RunId) 41 | .col(Iteration::ScenarioName) 42 | .col(Iteration::Count) 43 | .unique(), 44 | ) 45 | .to_owned(), 46 | ) 47 | .await 48 | } 49 | 50 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 51 | manager 52 | .drop_table(Table::drop().table(Iteration::Table).to_owned()) 53 | .await 54 | } 55 | } 56 | 57 | #[derive(DeriveIden)] 58 | enum Iteration { 59 | Table, 60 | Id, 61 | RunId, 62 | ScenarioName, 63 | Count, 64 | StartTime, 65 | StopTime, 66 | } 67 | -------------------------------------------------------------------------------- /src/migrations/m20241109_180400_add_region_column.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | manager 10 | .alter_table( 11 | Table::alter() 12 | .table(Run::Table) 13 | .add_column_if_not_exists(ColumnDef::new(Run::Region).string()) 14 | .to_owned(), 15 | ) 16 | .await 17 | } 18 | 19 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 20 | manager 21 | .alter_table( 22 | Table::alter() 23 | .table(Run::Table) 24 | .drop_column(Alias::new("region")) 25 | .to_owned(), 26 | ) 27 | .await 28 | } 29 | } 30 | 31 | #[derive(DeriveIden)] 32 | pub enum Run { 33 | Table, 34 | Region, 35 | CarbonIntensity, 36 | } 37 | -------------------------------------------------------------------------------- /src/migrations/m20241110_191154_add_ci_column.rs: -------------------------------------------------------------------------------- 1 | use sea_orm_migration::prelude::*; 2 | 3 | #[derive(DeriveMigrationName)] 4 | pub struct Migration; 5 | 6 | #[async_trait::async_trait] 7 | impl MigrationTrait for Migration { 8 | async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { 9 | manager 10 | .alter_table( 11 | Table::alter() 12 | .table(Run::Table) 13 | .add_column( 14 | ColumnDef::new(Run::CarbonIntensity) 15 | .double() 16 | .not_null() 17 | .default(0.494), 18 | ) 19 | .to_owned(), 20 | ) 21 | .await 22 | } 23 | 24 | async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { 25 | manager 26 | .alter_table( 27 | Table::alter() 28 | .table(Run::Table) 29 | .drop_column(Alias::new("carbon_intensity")) 30 | .to_owned(), 31 | ) 32 | .await 33 | } 34 | } 35 | 36 | #[derive(DeriveIden)] 37 | enum Run { 38 | Table, 39 | CarbonIntensity, 40 | } 41 | -------------------------------------------------------------------------------- /src/migrations/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod m20240822_095823_create_run_table; 2 | pub mod m20240822_095830_create_metrics_table; 3 | pub mod m20240822_095838_create_iteration_table; 4 | pub mod m20241109_180400_add_region_column; 5 | pub mod m20241110_191154_add_ci_column; 6 | 7 | pub use sea_orm_migration::prelude::*; 8 | 9 | pub struct Migrator; 10 | 11 | #[async_trait::async_trait] 12 | impl MigratorTrait for Migrator { 13 | fn migrations() -> Vec> { 14 | vec![ 15 | Box::new(m20240822_095823_create_run_table::Migration), 16 | Box::new(m20240822_095830_create_metrics_table::Migration), 17 | Box::new(m20240822_095838_create_iteration_table::Migration), 18 | Box::new(m20241109_180400_add_region_column::Migration), 19 | Box::new(m20241110_191154_add_ci_column::Migration), 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/migrator.rs: -------------------------------------------------------------------------------- 1 | use cardamon::migrations; 2 | use sea_orm_migration::prelude::*; 3 | 4 | #[tokio::main] 5 | async fn main() { 6 | cli::run_cli(migrations::Migrator).await; 7 | } 8 | -------------------------------------------------------------------------------- /src/models.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin}; 2 | 3 | use crate::{config::Power, data::Data, entities::metrics::Model as Metrics}; 4 | use itertools::Itertools; 5 | 6 | pub type BoxFuture = Pin> + Send>>; 7 | 8 | fn boa_model(a: f64, b: f64, c: f64, d: f64) -> impl Fn(f64) -> f64 { 9 | move |workload| a * (b * (workload + c)).ln() + d 10 | } 11 | 12 | pub fn rab_model(metrics: &Vec<&Metrics>, power: &Power, ci_g_wh: f64) -> Data { 13 | let data = metrics 14 | .iter() 15 | .sorted_by(|a, b| b.time_stamp.cmp(&a.time_stamp)) 16 | .tuple_windows() 17 | .map(|(x, y)| { 18 | match *power { 19 | Power::Curve(a, b, c, d) => { 20 | let cpu_util = 0.5 * (x.cpu_usage + y.cpu_usage) * 100.0; 21 | let delta_t_h = (x.time_stamp - y.time_stamp) as f64 / 3_600_000.0; 22 | 23 | // boa_model(a, b, c, d)(cpu_util * delta_t_h) 24 | boa_model(a, b, c, d)(cpu_util) * delta_t_h 25 | } 26 | 27 | Power::Tdp(tdp) => { 28 | let delta_t_h = (x.time_stamp - y.time_stamp) as f64 / 3_600_000.0; 29 | 30 | // taking the midpoint of the two datapoints and dividing by 50 because we're 31 | // assuming tdp is at 50% utilization 32 | (0.5 * (x.cpu_usage + y.cpu_usage)) / 50.0 * tdp * delta_t_h 33 | } 34 | } 35 | }) 36 | .collect_vec(); 37 | 38 | let pow_w = data.iter().fold(0_f64, |x, acc| x + acc); 39 | let co2_g_wh = pow_w * ci_g_wh; 40 | 41 | Data { 42 | pow: pow_w, 43 | co2: co2_g_wh, 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/public/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Root-Branch/cardamon-core/2d822c91d319621fcabcb8bbe1ea57516f393aef/src/public/.gitkeep -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | mod errors; 2 | mod routes; 3 | 4 | use anyhow::Context; 5 | use axum::response::{IntoResponse, Response}; 6 | use axum::{http::header, routing::get, Router}; 7 | use colored::Colorize; 8 | use http::{StatusCode, Uri}; 9 | use rust_embed::Embed; 10 | use sea_orm::DatabaseConnection; 11 | 12 | #[derive(Embed, Clone)] 13 | #[folder = "src/public"] 14 | struct Asset; 15 | 16 | pub struct StaticFile(pub T); 17 | 18 | impl IntoResponse for StaticFile 19 | where 20 | T: Into, 21 | { 22 | fn into_response(self) -> Response { 23 | let path = self.0.into(); 24 | 25 | match Asset::get(path.as_str()) { 26 | Some(content) => { 27 | let mime = mime_guess::from_path(path).first_or_octet_stream(); 28 | ([(header::CONTENT_TYPE, mime.as_ref())], content.data).into_response() 29 | } 30 | None => (StatusCode::NOT_FOUND, "404 Not Found").into_response(), 31 | } 32 | } 33 | } 34 | 35 | // We use static route matchers ("/" and "/index.html") to serve our home 36 | // page. 37 | async fn spa_fallback() -> impl IntoResponse { 38 | static_handler("/index.html".parse::().unwrap()).await 39 | } 40 | 41 | // We use a wildcard matcher ("/dist/*file") to match against everything 42 | // within our defined assets directory. This is the directory on our Asset 43 | // struct below, where folder = "examples/public/". 44 | async fn static_handler(uri: Uri) -> impl IntoResponse { 45 | let path = uri.path().trim_start_matches('/').to_string(); 46 | StaticFile(path) 47 | } 48 | 49 | // Keep seperated for integraion tests 50 | async fn create_app(db: &DatabaseConnection) -> Router { 51 | // Middleware later 52 | /* 53 | let protected = Router::new() 54 | .route("/user", get(routes::user::get_user)) 55 | .layer(middleware::from_fn_with_state(pool.clone(), api_key_auth)); 56 | */ 57 | 58 | Router::new() 59 | .route("/api/scenarios", get(routes::get_scenarios)) 60 | .route("/api/runs/:scenario_name", get(routes::get_runs)) 61 | .route("/assets/*file", get(static_handler)) 62 | .fallback(spa_fallback) 63 | .with_state(db.clone()) 64 | } 65 | 66 | pub async fn start(port: u32, db: &DatabaseConnection) -> anyhow::Result<()> { 67 | let app = create_app(db).await; 68 | 69 | let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port)) 70 | .await 71 | .unwrap(); 72 | 73 | println!("\n{}", " Cardamon UI ".reversed().green()); 74 | println!("> Server started: visit http://localhost:{}", port); 75 | axum::serve(listener, app).await.context("Error serving UI") 76 | } 77 | -------------------------------------------------------------------------------- /src/server/errors.rs: -------------------------------------------------------------------------------- 1 | use axum::{http::StatusCode, response::IntoResponse}; 2 | use std::fmt; 3 | 4 | #[derive(Debug)] 5 | // TODO: Split server error into different types 6 | pub struct ServerError(pub anyhow::Error); 7 | 8 | impl fmt::Display for ServerError { 9 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 10 | write!(f, "{}", self.0) 11 | } 12 | } 13 | 14 | impl IntoResponse for ServerError { 15 | fn into_response(self) -> axum::response::Response { 16 | ( 17 | StatusCode::INTERNAL_SERVER_ERROR, 18 | format!("Something went wrong! \n{}", self.0), 19 | ) 20 | .into_response() 21 | } 22 | } 23 | 24 | impl From for ServerError { 25 | fn from(error: anyhow::Error) -> Self { 26 | ServerError(error) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/server/routes.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | dao::pagination::Pages, 3 | data::{ 4 | dataset::{AggregationMethod, Dataset, LiveDataFilter}, 5 | dataset_builder::DatasetBuilder, 6 | ProcessMetrics, ScenarioData, 7 | }, 8 | models::rab_model, 9 | server::errors::ServerError, 10 | }; 11 | use anyhow::Context; 12 | use axum::{ 13 | extract::{Path, Query, State}, 14 | Json, 15 | }; 16 | use chrono::Utc; 17 | use itertools::Itertools; 18 | use sea_orm::DatabaseConnection; 19 | use serde::{Deserialize, Serialize}; 20 | use tracing::{instrument, trace}; 21 | 22 | #[derive(Debug, Deserialize, Serialize)] 23 | #[serde(rename_all = "camelCase")] 24 | pub struct Pagination { 25 | pub current_page: u64, 26 | pub per_page: u64, 27 | pub total_pages: u64, 28 | } 29 | 30 | #[derive(Debug, Deserialize)] 31 | #[serde(rename_all = "camelCase")] 32 | pub struct ScenariosParams { 33 | pub from_date: Option, 34 | pub to_date: Option, 35 | pub search_query: Option, 36 | pub last_n: Option, 37 | pub page: Option, 38 | pub limit: Option, 39 | } 40 | 41 | #[derive(Debug, Serialize)] 42 | #[serde(rename_all = "camelCase")] 43 | pub struct ScenarioResponse { 44 | pub scenario_name: String, 45 | pub last_run: i64, 46 | pub pow: f64, 47 | pub co2: f64, 48 | pub sparkline: Vec, 49 | pub trend: f64, 50 | } 51 | 52 | #[derive(Debug, Serialize)] 53 | #[serde(rename_all = "camelCase")] 54 | pub struct ScenariosResponse { 55 | pub scenarios: Vec, 56 | pub pagination: Pagination, 57 | } 58 | 59 | #[derive(Debug, Deserialize)] 60 | #[serde(rename_all = "camelCase")] 61 | pub struct RunsParams { 62 | pub page: Option, 63 | pub limit: Option, 64 | } 65 | 66 | #[derive(Debug, Serialize)] 67 | #[serde(rename_all = "camelCase")] 68 | pub struct ProcessResponse { 69 | pub process_name: String, 70 | pub pow_contrib_perc: f64, 71 | pub iteration_metrics: Vec>, 72 | } 73 | 74 | #[derive(Debug, Serialize)] 75 | #[serde(rename_all = "camelCase")] 76 | pub struct RunResponse { 77 | pub region: Option, 78 | pub start_time: i64, 79 | pub duration: f64, 80 | pub pow: f64, 81 | pub co2: f64, 82 | pub ci: f64, 83 | pub processes: Vec, 84 | } 85 | 86 | #[derive(Debug, Serialize)] 87 | #[serde(rename_all = "camelCase")] 88 | pub struct RunsResponse { 89 | pub runs: Vec, 90 | pub pagination: Pagination, 91 | } 92 | 93 | pub async fn build_scenario_data( 94 | dataset: &Dataset, 95 | db: &DatabaseConnection, 96 | ) -> anyhow::Result> { 97 | let mut scenario_data = vec![]; 98 | for scenario_dataset in dataset.by_scenario(LiveDataFilter::IncludeLive) { 99 | let data = scenario_dataset 100 | .apply_model(db, &rab_model, AggregationMethod::MostRecent) 101 | .await?; 102 | scenario_data.push(data); 103 | } 104 | 105 | Ok(scenario_data) 106 | } 107 | 108 | #[instrument(name = "Get list of scenarios")] 109 | pub async fn get_scenarios( 110 | State(db): State, 111 | Query(params): Query, 112 | ) -> Result, ServerError> { 113 | let begin = params.from_date.unwrap_or(0); 114 | let end = params 115 | .to_date 116 | .unwrap_or_else(|| Utc::now().timestamp_millis()); 117 | let last_n = params.last_n.unwrap_or(5); 118 | let page = params.page.unwrap_or(1); 119 | let page = page - 1; // DB needs -1 indexing 120 | let limit = params.limit.unwrap_or(5); 121 | 122 | trace!( 123 | "Fetching scenarios:\nbegin: {}, end: {}\nlast_n: {}\npage: {}, page_size: {}", 124 | begin, 125 | end, 126 | last_n, 127 | page, 128 | limit 129 | ); 130 | 131 | let dataset = match ¶ms.search_query { 132 | Some(query) => { 133 | DatasetBuilder::new() 134 | .scenarios_by_name(query) 135 | .page(limit, page) 136 | .last_n_runs(last_n) 137 | .all() 138 | .build(&db) 139 | .await? 140 | } 141 | None => { 142 | DatasetBuilder::new() 143 | .scenarios_in_range(begin, end) 144 | .page(limit, page) 145 | .last_n_runs(last_n) 146 | .all() 147 | .build(&db) 148 | .await? 149 | } 150 | }; 151 | 152 | let scenario_data = build_scenario_data(&dataset, &db).await?; 153 | let total_pages = match dataset.total_scenarios { 154 | Pages::NotRequired => 0, 155 | Pages::Required(pages) => pages, 156 | }; 157 | 158 | let mut scenarios = vec![]; 159 | for scenario_data in scenario_data { 160 | let scenario_name = scenario_data.scenario_name; 161 | let last_run = scenario_data.run_data.first().context("")?.start_time; 162 | let pow = scenario_data.data.pow; 163 | let co2 = scenario_data.data.co2; 164 | let sparkline = scenario_data 165 | .run_data 166 | .iter() 167 | .map(|run_data| run_data.data.pow) 168 | .collect_vec(); 169 | let trend = scenario_data.trend; 170 | 171 | scenarios.push(ScenarioResponse { 172 | scenario_name, 173 | last_run, 174 | pow, 175 | co2, 176 | sparkline, 177 | trend, 178 | }); 179 | } 180 | 181 | Ok(Json(ScenariosResponse { 182 | scenarios, 183 | pagination: Pagination { 184 | current_page: page + 1, 185 | per_page: limit, 186 | total_pages, 187 | }, 188 | })) 189 | } 190 | 191 | pub async fn get_runs( 192 | State(db): State, 193 | Path(scenario_name): Path, 194 | Query(params): Query, 195 | ) -> Result, ServerError> { 196 | let page = params.page.unwrap_or(1); 197 | let page = page - 1; // DB needs -1 indexing 198 | let limit = params.limit.unwrap_or(5); 199 | 200 | trace!( 201 | "Fetching runs:\nscenario: {}\npage: {}, page_size: {}", 202 | scenario_name, 203 | page, 204 | limit 205 | ); 206 | 207 | let dataset = DatasetBuilder::new() 208 | .scenario(&scenario_name) 209 | .all() 210 | .runs_all() 211 | .page(limit, page)? 212 | .build(&db) 213 | .await?; 214 | let total_pages = match dataset.total_runs { 215 | Pages::NotRequired => 0, 216 | Pages::Required(pages) => pages, 217 | }; 218 | 219 | let mut runs = vec![]; 220 | for scenario_dataset in &dataset.by_scenario(LiveDataFilter::IncludeLive) { 221 | for run_dataset in scenario_dataset.by_run() { 222 | let model_data = run_dataset.apply_model(&db, &rab_model).await?; 223 | let processes = model_data 224 | .process_data 225 | .iter() 226 | .map(|data| ProcessResponse { 227 | process_name: data.process_id.clone(), 228 | pow_contrib_perc: data.pow_perc, 229 | iteration_metrics: data.iteration_metrics.clone(), 230 | }) 231 | .collect_vec(); 232 | 233 | // let json_str = serde_json::to_string_pretty(&processes); 234 | // println!("processes json\n{:?}", json_str); 235 | 236 | runs.push(RunResponse { 237 | region: model_data.region.clone(), 238 | start_time: model_data.start_time, 239 | duration: model_data.duration(), 240 | pow: model_data.data.pow, 241 | co2: model_data.data.co2, 242 | ci: model_data.ci, 243 | processes, 244 | }); 245 | } 246 | } 247 | 248 | Ok(Json(RunsResponse { 249 | runs, 250 | pagination: Pagination { 251 | current_page: page + 1, 252 | per_page: limit, 253 | total_pages, 254 | }, 255 | })) 256 | } 257 | 258 | #[cfg(test)] 259 | mod tests { 260 | use crate::{ 261 | data::dataset_builder::DatasetBuilder, db_connect, db_migrate, 262 | server::routes::build_scenario_data, tests::setup_fixtures, 263 | }; 264 | 265 | #[tokio::test] 266 | async fn building_data_response_for_ui_should_work() -> anyhow::Result<()> { 267 | let db = db_connect("sqlite::memory:", None).await?; 268 | db_migrate(&db).await?; 269 | setup_fixtures( 270 | &[ 271 | "./fixtures/power_curves.sql", 272 | "./fixtures/cpus.sql", 273 | "./fixtures/runs.sql", 274 | "./fixtures/iterations.sql", 275 | "./fixtures/metrics.sql", 276 | ], 277 | &db, 278 | ) 279 | .await?; 280 | 281 | let dataset = DatasetBuilder::new() 282 | .scenarios_all() 283 | .all() 284 | .last_n_runs(3) 285 | .all() 286 | .build(&db) 287 | .await?; 288 | 289 | let _res = build_scenario_data(&dataset, &db).await?; 290 | 291 | // uncomment to see generated json response 292 | // let json_str = serde_json::to_string_pretty(&_res)?; 293 | // println!("{}", json_str); 294 | 295 | Ok(()) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /src/templates/cardamon.unix.toml: -------------------------------------------------------------------------------- 1 | # Processes 2 | # --------- 3 | # This array of tables describes the components of your application that you 4 | # would like cardamon to measure. 5 | # 6 | # processes contain the following properties: 7 | # name: 8 | # type - string 9 | # desc - must be unique 10 | # required - true 11 | # 12 | # up: 13 | # type - string 14 | # desc - command to execute the processes 15 | # required - true 16 | # 17 | # down: 18 | # type - string 19 | # desc - command to stop the process. In the case of bare-metal processes 20 | # cardamon will pass the PID of the process to this command. You can 21 | # use `{pid}` as a placeholder in the command e.g. `kill {pid}`. 22 | # default: empty string 23 | # required - false 24 | # 25 | # proccess.type: 26 | # type - "baremetal" | "docker" 27 | # desc - the type of process which is being executed 28 | # required - true 29 | # 30 | # process.containers: 31 | # type - string[] 32 | # desc - docker processes may initiate multiple containers from a single 33 | # command, e.g. `docker compose up -d`. This is the list of 34 | # containers started by this process you would like cardamon to 35 | # measure. 36 | # required - true (if `process.type` is "docker") 37 | # 38 | # redirect.to: 39 | # type - "null" | "parent" | "file" 40 | # desc - where to redirect this processes stdout and stderr. "null" ignores 41 | # output, "parent" attaches the processes output to cardamon, "file" 42 | # writes stdout and stderr to a file of the same name as this 43 | # process e.g. db.stdout. 44 | # default: "file" 45 | # required - false 46 | # 47 | # EXAMPLE 48 | # ------- 49 | # [[process]] 50 | # name = "db" 51 | # up = "docker compose up -d" 52 | # down = "docker compose down" 53 | # redirect.to = "file" 54 | # process.type = "docker" 55 | # process.containers = ["postgres"] 56 | 57 | [[process]] 58 | name = "test_proc" 59 | up = "bash -c \"while true; do shuf -i 0-1337 -n 1; done\"" 60 | down = "kill {pid}" 61 | redirect.to = "file" 62 | process.type = "baremetal" 63 | 64 | # Scenarios 65 | # --------- 66 | # This array of tables describes the scenarios that cardamon can run. They can 67 | # be any kind of executable and are designed to place your application under 68 | # consistent load each time they are run. Examples include bash scripts which 69 | # `curl` a REST endpoint or nodejs scripts using playwright.js to control a 70 | # webpage. 71 | # 72 | # scenarios contain the following properties: 73 | # name: 74 | # type - string 75 | # desc - must be unique 76 | # required - true 77 | # 78 | # desc: 79 | # type - string 80 | # desc - a short description of the scenario to remind you what it does 81 | # required - false 82 | # 83 | # command: 84 | # type - string 85 | # desc - the command to execute this scenario 86 | # required - true 87 | # 88 | # iterations: 89 | # type - integer 90 | # desc - the number of times cardamon should execute this scenario per run. 91 | # It's better to run scenarios multiple times and take an average. 92 | # default - 1 93 | # required - false 94 | # 95 | # processes: 96 | # type - string[] 97 | # desc - a list of the processes which need to be started before executing 98 | # this scenario. 99 | # required - true 100 | # 101 | [[scenario]] 102 | name = "sleep" 103 | desc = "Sleeps for 10 seconds, a real scenario would call your app" 104 | command = "sleep 10" 105 | iterations = 2 106 | processes = ["test_proc"] 107 | 108 | 109 | # Observations 110 | # --------------- 111 | # This array of tables allows you to group scenarios together to make it 112 | # easier to execute multiple scenarios in a single run. 113 | # 114 | # obserations contain the following properties: 115 | # name: 116 | # type - string 117 | # desc - a unique name 118 | # required - true 119 | # 120 | # observe.scenarios: 121 | # type - string[] 122 | # desc - a list of scenarios to execute whilst observing the processes 123 | # required to run all scenarios 124 | # required - required if observe.processes is not defined 125 | # 126 | # observe.processes: 127 | # type - string[] 128 | # desc - a list of processes to execute and observe. Running an observation 129 | # with this property set runs Cardamon in Live mode. 130 | # required - required if observe.scenarios is not defined. 131 | # 132 | [[observation]] 133 | name = "test_obs" 134 | scenarios = ["sleep"] 135 | -------------------------------------------------------------------------------- /src/templates/cardamon.win.toml: -------------------------------------------------------------------------------- 1 | # Processes 2 | # --------- 3 | # This array of tables describes the components of your application that you 4 | # would like cardamon to measure. 5 | # 6 | # processes contain the following properties: 7 | # name: 8 | # type - string 9 | # desc - must be unique 10 | # required - true 11 | # 12 | # up: 13 | # type - string 14 | # desc - command to execute the processes 15 | # required - true 16 | # 17 | # down: 18 | # type - string 19 | # desc - command to stop the process. In the case of bare-metal processes 20 | # cardamon will pass the PID of the process to this command. You can 21 | # use `{pid}` as a placeholder in the command e.g. `kill {pid}`. 22 | # default: empty string 23 | # required - false 24 | # 25 | # proccess.type: 26 | # type - "baremetal" | "docker" 27 | # desc - the type of process which is being executed 28 | # required - true 29 | # 30 | # process.containers: 31 | # type - string[] 32 | # desc - docker processes may initiate multiple containers from a single 33 | # command, e.g. `docker compose up -d`. This is the list of 34 | # containers started by this process you would like cardamon to 35 | # measure. 36 | # required - true (if `process.type` is "docker") 37 | # 38 | # redirect.to: 39 | # type - "null" | "parent" | "file" 40 | # desc - where to redirect this processes stdout and stderr. "null" ignores 41 | # output, "parent" attaches the processes output to cardamon, "file" 42 | # writes stdout and stderr to a file of the same name as this 43 | # process e.g. db.stdout. 44 | # default: "file" 45 | # required - false 46 | # 47 | # EXAMPLE 48 | # ------- 49 | # [[process]] 50 | # name = "db" 51 | # up = "docker compose up -d" 52 | # down = "docker compose down" 53 | # redirect.to = "file" 54 | # process.type = "docker" 55 | # process.containers = ["postgres"] 56 | 57 | [[process]] 58 | name = "test_proc" 59 | up = "powershell while($true) { get-random }" # Required 60 | down = "stop-process {pid}" 61 | redirect.to = "file" 62 | process.type = "baremetal" 63 | 64 | # Scenarios 65 | # --------- 66 | # This array of tables describes the scenarios that cardamon can run. They can 67 | # be any kind of executable and are designed to place your application under 68 | # consistent load each time they are run. Examples include bash scripts which 69 | # `curl` a REST endpoint or nodejs scripts using playwright.js to control a 70 | # webpage. 71 | # 72 | # scenarios contain the following properties: 73 | # name: 74 | # type - string 75 | # desc - must be unique 76 | # required - true 77 | # 78 | # desc: 79 | # type - string 80 | # desc - a short description of the scenario to remind you what it does 81 | # required - false 82 | # 83 | # command: 84 | # type - string 85 | # desc - the command to execute this scenario 86 | # required - true 87 | # 88 | # iterations: 89 | # type - integer 90 | # desc - the number of times cardamon should execute this scenario per run. 91 | # It's better to run scenarios multiple times and take an average. 92 | # default - 1 93 | # required - false 94 | # 95 | # processes: 96 | # type - string[] 97 | # desc - a list of the processes which need to be started before executing 98 | # this scenario. 99 | # required - true 100 | # 101 | [[scenario]] 102 | name = "sleep" 103 | desc = "Sleeps for 10 seconds, a real scenario would call your app" 104 | command = "powershell sleep 15" # Required - commands for running scenarios 105 | iterations = 2 106 | processes = ["test_proc"] 107 | 108 | 109 | # Observations 110 | # --------------- 111 | # This array of tables allows you to group scenarios together to make it 112 | # easier to execute multiple scenarios in a single run. 113 | # 114 | # obserations contain the following properties: 115 | # name: 116 | # type - string 117 | # desc - a unique name 118 | # required - true 119 | # 120 | # observe.scenarios: 121 | # type - string[] 122 | # desc - a list of scenarios to execute whilst observing the processes 123 | # required to run all scenarios 124 | # required - required if observe.processes is not defined 125 | # 126 | # observe.processes: 127 | # type - string[] 128 | # desc - a list of processes to execute and observe. Running an observation 129 | # with this property set runs Cardamon in Live mode. 130 | # required - required if observe.scenarios is not defined. 131 | # 132 | [[observation]] 133 | name = "test_obs" 134 | scenarios = ["sleep"] 135 | --------------------------------------------------------------------------------