├── .github └── dependabot.yml ├── .gitignore ├── .golangci.yaml ├── LICENSE ├── README.md ├── SECURITY.md ├── go.mod ├── go.sum └── pkg └── power ├── README.md ├── c_states.go ├── c_states_test.go ├── cpu.go ├── cpu_test.go ├── host.go ├── host_test.go ├── integration_test.go ├── pool.go ├── pool_test.go ├── power.go ├── power_profile.go ├── power_profile_test.go ├── power_test.go ├── scaling_driver.go ├── scaling_driver_test.go ├── topology.go ├── topology_test.go ├── uncore.go └── uncore_test.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.so 2 | *.dylib 3 | 4 | # Test binary, built with `go test -c` 5 | *.test 6 | 7 | # Output of the go coverage tool, specifically when used with LiteIDE 8 | *.out 9 | 10 | # Go workspace file 11 | go.work 12 | 13 | __debug_bin 14 | 15 | .idea 16 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | skip-files: 3 | # exclude tests 4 | - "_test\\.go$" 5 | output: 6 | sort-results: true 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2022 Intel Corporation 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DISCONTINUATION OF PROJECT 2 | This project will no longer be maintained by Intel. 3 | Intel has ceased development and contributions including, but not limited to, maintenance, bug fixes, new releases, or updates, to this project. 4 | Intel no longer accepts patches to this project. 5 | If you have an ongoing need to use this project, are interested in independently developing it, or would like to maintain patches for the open source software community, please create your own fork of this project. 6 | 7 | 8 | # Intel® Power Optimization Library 9 | 10 | The Intel Power Optimization Library is an open source library that takes the desired configuration of the user to tune 11 | the frequencies and set the priority level of the cores. 12 | 13 | # Overview 14 | 15 | The Power Optimization Library takes allows management of CPUs power/performance on via Pool based management 16 | 17 | This library is currently used as part of the 18 | [Kubernetes Power Manager](https://github.com/intel/kubernetes-power-manager), but could be used with other utilities. 19 | 20 | ## Features 21 | 22 | * Pool based Frequency Tuning 23 | * Facilitate use of Intel SST (Speed Select Technology) Suite 24 | * SST-CP - Speed Select Technology - Core Power 25 | * C-States control 26 | * Uncore frequency 27 | * CPU Topology discovery and awareness 28 | 29 | # Prerequisites 30 | 31 | - Linux based OS 32 | - P-State or acpi-cpufreq scaling driver enabled 33 | - C-States 34 | - ``intel_cstates`` kernel module loaded 35 | - Uncore frequency 36 | - kernel 5.6+ compiled with ``CONFIG_INTEL_UNCORE_FREQ_CONTROL`` 37 | - ``intel-uncore-frequency`` kernel module loaded 38 | 39 | **Note:** on Ubuntu systems for Uncore frequency feature a ``linux-generic-hwe`` kernel is required 40 | 41 | # Definitions 42 | 43 | **Intel SST-CP (Speed Select Technology - Core Power)** - allows the user to group cores into levels of priority. 44 | When there is power to spare on the system, it can be distributed among the cores based on their priority level. 45 | While it is not guaranteed that the extra power will be applied to the highest priority cores, the system will do its 46 | best to do so. 47 | 48 | There are four levels of priority available: 49 | 50 | 1. Performance 51 | 2. Balance Performance 52 | 3. Balance Power 53 | 4. Power 54 | 55 | The Priority level for a core is defined using its **EPP (Energy Performance Preference)** value, which is one of the 56 | options in the Power Profiles. If not all the power is utilized on the CPU, the CPU can put the higher priority cores 57 | up to Turbo Frequency (allows the cores to run faster). 58 | 59 | **C-States** To save energy on a system, you can command the CPU to go into a low-power mode. Each CPU has several power 60 | modes, which 61 | are collectively called C-States. These work by cutting the clock signal and power from idle CPUs, or CPUs that are not 62 | executing commands.While you save more energy by sending CPUs into deeper C-State modes, it does take more time for the 63 | CPU to fully “wake up” from sleep mode, so there is a trade-off when it comes to deciding the depth of sleep. 64 | 65 | **Uncore** equates to logic outside the CPU cores but residing on the same die. Traffic (for example, Data Reads) 66 | generated by threads executing on CPU cores or IO devices may be operated on by logic in the Uncore. Logic responsible 67 | for managing coherency, managing access to the DIMMs, managing power distribution and sleep states, and so forth. 68 | 69 | **Uncore Frequency** the frequency of the Uncore fabric. 70 | 71 | # Usage 72 | 73 | CPU frequency/power values are managed by assigning Cores to desired pools, associated with their attached Power 74 | Profiles. The user of the Power Optimization Library can create any number of Exclusive Pools and Power Profiles. 75 | 76 | C-States are similarly managed via Pools but can also be manage or per-CPU basis. 77 | 78 | Uncore frequencies can be set system-wide, per package or per die. 79 | 80 | ### Setup 81 | 82 | See [Object definitions](pkg/power/README.md#library-objects) for more information. 83 | 84 | To begin using the library first create a host object with the supplied name, a reserved pool containing all CPUs marked 85 | as system reserved and an empty list of Exclusive Pools. At this stage no changes are made to any configurations. 86 | 87 | ```go 88 | import "github.com/intel/power-optimization-library/pkg/power" 89 | host := power.CreateInstance("Name") 90 | ``` 91 | 92 | All CPUs start in a reserved pool, meaning that they cannot be managed, we need to first configure shared Pool that can 93 | be managed. \ 94 | The below will leave CPUs with id 0,1 unmanaged by the library in the Reserved Pool and move all other CPUs to Shared 95 | Pool. 96 | 97 | ````go 98 | host.GetReservedPool().SetCpuIDs([]uint{0, 1}) 99 | ```` 100 | 101 | Alternatively CPUs to be put in the Shared Pool can be provided 102 | 103 | ````go 104 | host.GetSharedPool().SecCpuIDs([]uint{2, 3, 4, 5, 6,7}) 105 | ```` 106 | 107 | Create an Exclusive pool with the name ``"performance-pool"``. No CPUs placed are in an exclusive pool upon creation. 108 | 109 | ````go 110 | performancePool, err := node.AddExclusivePool("performance-pool") 111 | ```` 112 | 113 | Move desired CPUs to the new Pool 114 | 115 | ````go 116 | err := performancePool.MoveCpuIDs([]uint{3, 4}) 117 | ```` 118 | 119 | CPUs can only be moved to/from shared pool, cannot move pools from reserved pool or directly between exclusive pools 120 | 121 | Exclusive pools can also be removed. 122 | 123 | ````go 124 | err := perofmancePool.Remove() 125 | ```` 126 | 127 | All CPUs in the removed pool will be moved back to the Shared Pool. 128 | 129 | ### Profiles 130 | 131 | Power profiles can be associated with any Exclusive Pool or the Shared Pool 132 | 133 | To set a power Profile firs create it using ``NewPowerProfile(name, minFreq, maxFreq, governor, epp)`` 134 | All frequency values are in kHz 135 | 136 | ````go 137 | performanceProfile, err := NewPowerProfile("powerProfile", 2_600_000, 2_800_000, "performance", "performance") 138 | ```` 139 | You can also use the ``NewEcorePowerProfile(name, minFreq, maxFreq, emin, emax, governor, epp)`` constructor to 140 | create a profile that supports environments with performance and efficiency cores. 141 | 142 | ````go 143 | performanceProfile, err := NewEcorePowerProfile("powerProfile", 2_600_000, 2_800_000, 1_600_000, 1_800_000 "performance", "performance") 144 | ```` 145 | 146 | All values and support by hardware is validated during Profile creation. 147 | 148 | A power profile can now be associated with an Exclusive Pool or Shared Pool 149 | 150 | ````go 151 | err := host.GetExclusivePool("performance-pool").SetPowerProfilePool(performanceProfile) 152 | ```` 153 | 154 | Power Profiles can be unset/removed by passing ``nil``. this will restore CPUs frequencies, governor and epp to default 155 | 156 | ````go 157 | err := host.GetExclusivePool("performance-pool").SetPowerProfile(nil) 158 | ```` 159 | 160 | ### C-States 161 | 162 | C-States can be configured similarly by creating a CStates object and applying it to a pool 163 | 164 | ````go 165 | err := host.GetExclusivePool("performance-pool").SetCstates(CStates{"C0": true}) 166 | ```` 167 | 168 | It is also possible to set CStates on a per-CPU basis. This configuration will always precede per-Pool configuration 169 | 170 | ````go 171 | err := host.GetAllCpus().ById(4).SetCstates(CStates{"C0": true}) 172 | ```` 173 | 174 | Multiple CPUs 175 | 176 | ````go 177 | cStates := Cstates{"C0": true} 178 | for _, cpu := range host.GetAllCpus().ManyByIDs([]uint{3, 4, 5}){ 179 | err := cpu.SetCStates(cStates) 180 | } 181 | ```` 182 | 183 | ### Uncore frequency 184 | 185 | It is possible to set uncore frequency on a system-wide basis, per-package basis or per-die basis. Higher granularity 186 | objects i.e. per-die config will always precede per-package configuration 187 | 188 | First create uncore object. 189 | **Note:** due to driver limitations frequency will be rounded down to the nearest multiple of 100,000 190 | 191 | ````go 192 | uncore, err := NewUncore(2_000_000, 2_500_000) 193 | ```` 194 | 195 | Uncore will be validated during creation against hardware capabilities 196 | 197 | The uncore can now be applied system-wide, to package or die 198 | 199 | ````go 200 | err := host.Topology().SetUncore(uncore) 201 | err := host.Topology().Package(0).Die(0).SetUncore(uncore) 202 | ```` 203 | 204 | # References 205 | 206 | - [Intel® Speed Select Technology - Core Power (Intel® SST-CP) Overview Technology Guide](https://networkbuilders.intel.com/solutionslibrary/intel-speed-select-technology-core-power-intel-sst-cp-overview-technology-guide) 207 | 208 | # License 209 | 210 | Apache 2.0 license, See [License](LICENSE) 211 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 19 | 20 | # Security Policy 21 | 22 | ## Report a Vulnerability 23 | 24 | Please report security issues or vulnerabilities to the [Intel® Security Center]. 25 | 26 | For more information on how Intel® works to resolve security issues, see 27 | [Vulnerability Handling Guidelines]. 28 | 29 | [Intel® Security Center]:https://www.intel.com/security 30 | 31 | [Vulnerability Handling Guidelines]:https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html 32 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/intel/power-optimization-library 2 | 3 | go 1.22.3 4 | 5 | require ( 6 | github.com/go-logr/logr v1.4.1 7 | github.com/stretchr/testify v1.9.0 8 | ) 9 | 10 | require ( 11 | github.com/davecgh/go-spew v1.1.1 // indirect 12 | github.com/pmezard/go-difflib v1.0.0 // indirect 13 | github.com/stretchr/objx v0.5.2 // indirect 14 | gopkg.in/yaml.v3 v3.0.1 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= 4 | github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 5 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 6 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 7 | github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 8 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 9 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 10 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 11 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 12 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 13 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 14 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 15 | -------------------------------------------------------------------------------- /pkg/power/README.md: -------------------------------------------------------------------------------- 1 | # Under the hood 2 | 3 | ## Library objects 4 | 5 | ``Host`` - top level object representing the physical machine that is being managed 6 | 7 | ``Pool`` - object container for CPUs and ``Profile``, if CPU is in a Pool it means that the profile of that pool is 8 | applied to it (except for when associated Profile is ``nil`` in which case the CPU config is set to its default values) 9 | 10 | * ``Reserved Pool`` - CPUs that are in reserved pool and will never be touched by the library. By definition no profile 11 | can be configured for that pool. This pool cannot be removed or created 12 | 13 | * ``Shared pool`` - CPUs that are not system reserved but don't belong to any Exclusive Pool. This pool can (but doesn't 14 | have to) have a profile associated. It cannot be removed or created 15 | 16 | ``Topology`` - an overarching object representing entire system topology. Calls on core object are system-wide eg. 17 | ``topology.SetUncore()`` would apply to all dies on the system 18 | 19 | * ``Package`` - a physical Processor package as inserted into a socket, housing CPU Die(s) 20 | 21 | * ``Die`` - a piece of integrated circuit, contains any number of cores 22 | 23 | * ``Core`` a physical CPU core 24 | 25 | * ``CPU`` - an object representing a logical CPU/compute unit/thread as seen by the operating system. If hyperthreading 26 | is enabled there are 2 ``CPUs`` per ``Core``, otherwise there is a 1-1 correspondence. 27 | 28 | ``Profile`` or ``Power Profile`` - stores desired P-State properties (Governor, EPP, Max/Min frequency) to be set for 29 | Cores in the pool. A ``Power Profile`` has to be associated with ``Pool`` in order to be applied. 30 | 31 | ``C-States`` a Map storing association of C-State to its enablement state. ``Cstate`` can be associated with a ``Pool`` 32 | or with a ``CPU``. ``CPU`` associations will precede ``Pool`` associations. 33 | 34 | ``Uncore`` - Object storing desired uncore max and min frequency, can be applied to ``Topology`` for system-wide, 35 | ``Package``, or ``Die``. ``Die`` uncore will precede ``Package`` Uncore, which will in turn precede ``Topology`` 36 | system-wide uncore. 37 | 38 | ## Objects description 39 | 40 | ## Host 41 | 42 | ```` 43 | Name string 44 | ExclusivePools []Pool 45 | SharedPool Pool 46 | PowerProfiles Profiles 47 | ```` 48 | 49 | The Name value is simply the name of the Host. 50 | 51 | The Exclusive Pools value holds the information for each of the Power Profiles that are available on the cluster. The 52 | options available for a Power Profile are Performance, Balance Performance, and Balance Power. A Pool is added to this 53 | list when the associated Power Profile is created on the host. This operation is undertaken by the Power Profile 54 | controller, which will add the Pool when it detects the creation or change of a Power Profile in the cluster, and will 55 | delete the Pool when a Power Profile is deleted. 56 | 57 | Each Exclusive Pool will hold the Cores associated with the associated Power Workload. When a Guaranteed Pod is created 58 | and the Cores are added to the correct Power Workload, the Power Workload controller will move the Core objects for that 59 | Pod from the Shared Pool into the correct Pool in this list. When a Core is added to a Pool, the maximum and minimum 60 | frequency values for the Core are changed in the object, and on the actual Node. 61 | 62 | The Shared Pool holds all the Cores that are in the Kubernetes’ shared pool. When the Power Library instance is created, 63 | this Shared Pool is populated automatically, taking in all the Cores on the Node, getting their absolute maximum and 64 | minimum frequencies, and creates the Shared Pool’s Core list. The IsReservedSystemCPU value will be explained in the 65 | Pool section. Initially - without the presence of a Shared Power Workload - every Core belongs to the Default Pool, or 66 | the Pool that does not have any Power Profile associated with it and does not tune the Cores’ frequencies. When a Shared 67 | Workload is created, the Cores that are specified to be part the ReservedSystemCPUs subset are not tuned, while 68 | every other Core in the list is. When a Core for a Guaranteed Pod comes along, it is taken from the Shared Pool, and 69 | when the Pod is terminated, it is placed back in the Shared Pool. 70 | 71 | ## Pool 72 | 73 | ```` 74 | Name string 75 | Cores []Core 76 | PowerProfile Profile 77 | ```` 78 | 79 | The Name value is simply the name of the Pool, which will either be performance, balance-performance, balance-power, or 80 | shared. 81 | 82 | The Cores value is the list of Cores that are associated with that Pool. If it is an Exclusive Pool, Cores will be taken 83 | out of the Shared Pool when a Guaranteed Pod is created and its Cores are placed in the associated Power Workload. The 84 | operation of moving the Cores from the Shared Pool to the Exclusive Pool is done in the Power Workload controller when a 85 | Power Workload is created, updated, or deleted. 86 | 87 | The Shared Pool, while a singular Pool object, technically consists of two separate pools of Cores. The first pool is 88 | the Default pool, where no frequency tuning takes place on the Cores. The second is the Shared pool, which is associated 89 | with the Cores on the Node that will be tuned down to the lower frequencies of the Shared Power Profile. The Default 90 | pool is the initial pool created for the Shared Pool when the Power Library instance is created. Every Core on the 91 | system is a part of the Default pool at the beginning, with the MaximumFrequency value and the MinimumFrequency value of 92 | the Core object being set to the absolute maximum and absolute minimum values of the Core on the system respectively. 93 | The IsReservedSystemCPU is set to True for each Core in the Default pool. 94 | 95 | When a Shared Power Workload is created by the user, the reservedCPUs flag in the Workload spec is used to determine 96 | which Cores are to be left alone and which are to be tuned to the Shared Power Profile’s lower frequency values. This is 97 | done by changing the IsReservedSystemCPU value in the Core object to False if the Core is not part of the Power 98 | Workload’s reservedCPUs list. When the Power Library runs through a Pool to change the frequencies of all Cores in its 99 | Core list, it skips over any Cores that have a True IsSystemReservedCPU value. 100 | 101 | The PowerProfile value is simply the name of the Power Profile that is associated with the Pool. It is only the string 102 | value of the name and not the actual Power Profile, that can be retrieved through the Node’s PowerProfiles list. 103 | 104 | ## Profile 105 | 106 | ```` 107 | Name string 108 | Max int 109 | Min int 110 | Governor string 111 | Epp string 112 | ```` 113 | 114 | The Profile object is a replica of the Power Profile CRD. It’s just a way that the Power 115 | Library can get the information about a Power Profile without having to constantly query the Kubernetes API. 116 | The NewEcorePowerProfile constructor has 2 extra frequency fields called ``emin`` and ``emax`` and generates a profile 117 | that will set different frequencies based on the core type. This is intended to be used on systems that have performance and efficiency cores. 118 | 119 | ## CPU 120 | 121 | ```` 122 | ID int 123 | MinimumFreq int 124 | MaximumFreq int 125 | IsReservedSystemCPU bool 126 | ```` 127 | 128 | The ID value is simply the Core’s ID on the system. 129 | 130 | The MaximumFrequency value is the frequency you want placed in the Core’s 131 | /sys/devices/system/cpu/cpuN/cpufreq/scaling_max_freq file, which determines the maximum frequency the Core can run at. 132 | Initially, when the Power Library is initialized and each Core object is placed into the Shared Pool’s Core list, this 133 | value will take on the number in the Core’s cpuinfo_max_freq, which is the absolute maximum frequency the Core can run 134 | at. This value is taken, as when the Core’s scaling values are not changed, this is the value that will be in the 135 | scaling_max_freq file. 136 | 137 | The MinimumFrequency value is the frequency you want placed in the Core’s 138 | /sys/devices/system/cpu/cpuN/cpufreq/scaling_min_freq file, which determines the minimum frequency the Core can run at. 139 | Initially, when the Power Library is initialized and each Core object is placed into the Shared Pool’s Core list, this 140 | value will take on the number in the Core’s cpuinfo_min_freq, which is the absolute minimum frequency the Core can run 141 | at. This value is taken, as when the Core’s scaling values are not changed, this is the value that will be in the 142 | scaling_min_freq file. 143 | 144 | The MaximumFrequency and MinimumFrequency values are updated when a Core is placed into a new Pool. For example, when a 145 | Core goes from the Shared Pool to an Exclusive Pool, the values will be changed from - for example - 1500/1000 to 146 | 3500/3300. Then when the Cores are returned to the Shared Pool, they will revert from 3500/3300 to 1500/1000. 147 | 148 | The IsReservedSystemCPU is the value which is used to determine whether the Core’s frequency values should be changed on 149 | the system. If the value is True, when the Power Library is updated the frequency values on the Node, the Core will be 150 | passed over and no changes will occur. The reason for this is to determine which Cores have been delegated as the 151 | Reserved System CPUs for Kubelet, which we don’t want to update the frequency to as those cores will always be doing 152 | work for Kubernetes. If there is no Shared Power Workload and a Core is taken out of the Shared Pool and given to an 153 | Exclusive Pool, when the Core is given back to the Shared Pool, it’s scaling frequencies will still be updated to the 154 | absolute maximum and minimum. There can never be an instance where a Core is taken out of the Shared Pool before a 155 | Shared Power Workload is created, and then returned after the Shared Power Workload is created, accidentally setting the 156 | Core’s maximum and minimum frequencies to the absolute values instead of those of the Shared Power Profile, as an 157 | Exclusive Pool will never be given a core from the system that is a part of the Reserved System CPU list. So when 158 | returned to the Shared Pool, if there is a Shared Power Workload available, it will take on the values in that, if not 159 | it is given the absolute values. 160 | 161 | # Features 162 | 163 | ## C-States 164 | 165 | To save energy on a system, you can command the CPU to go into a low-power mode. Each CPU has several power modes, which 166 | are collectively called C-States. These work by cutting the clock signal and power from idle CPUs, or CPUs that are not 167 | executing commands.While you save more energy by sending CPUs into deeper C-State modes, it does take more time for the 168 | CPU to fully “wake up” from sleep mode, so there is a tradeoff when it comes to deciding the depth of sleep. 169 | 170 | ### C-State Implementation in the Power Optimization Library 171 | 172 | The driver that is used for C-States is the intel_idle driver. Everything associated with C-States in Linux is stored in 173 | the /sys/devices/system/cpu/cpuN/cpuidle file or the /sys/devices/system/cpu/cpuidle file. To check the driver in use, 174 | the user simply has to check the /sys/devices/system/cpu/cpuidle/current_driver file. 175 | 176 | C-States have to be confirmed if they are actually active on the system. If a user requests any C-States, they need to 177 | check on the system if they are activated and if they are not, reject the PowerConfig. The C-States are found in 178 | /sys/devices/system/cpu/cpuN/cpuidle/stateN/. 179 | 180 | ### C-State Ranges 181 | 182 | ```` 183 | C0 Operating State 184 | C1 Halt 185 | C1E Enhanced Halt 186 | C2 Stop Grant 187 | C2E Extended Stop Grant 188 | C3 Deep Sleep 189 | C4 Deeper Sleep 190 | C4E/C5 Enhanced Deeper Sleep 191 | C6 Deep Power Down 192 | ```` 193 | 194 | ## Scaling Driver 195 | 196 | ### P-state 197 | The P-state governor feature allows the user to check if the P-state driver is enabled on the system. If the P-state 198 | driver is enabled while using the Kubernetes Power Manager, users may select a P-state governor per core, which are 199 | described as "performance" and "powersave" governors in the Power Profiles. 200 | 201 | * Performance governor - The CPUfreq governor "performance" sets the CPU statically to the highest frequency within the 202 | borders of scaling_min_freq and scaling_max_freq. 203 | * Powersave governor - The CPUfreq governor "powersave" sets the CPU statically to the lowest frequency within the 204 | borders of scaling_min_freq and scaling_max_freq. 205 | 206 | ### acpi-cpufreq 207 | The acpi-cpufreq driver setting operates much like the P-state driver but has a different set of available governors. For more information see [here](https://www.kernel.org/doc/html/v4.12/admin-guide/pm/cpufreq.html). 208 | One thing to note is that acpi-cpufreq reports the base clock as the frequency hardware limits however the P-state driver uses turbo frequency limits. 209 | Both drivers can make use of turbo frequency; however, acpi-cpufreq can exceed hardware frequency limits when using turbo frequency. This is important to take into account when setting frequencies for profiles. 210 | ## Topology 211 | 212 | Topology discovery is done via reading /sys/devices/system/cpuN/topology/{physical_package_id,die_id,core_id}. Based on 213 | values there, the power library creates objects representing CPU package, die and core, and associates it with the 214 | corresponding cores. this mapping is tree-like where associations are as important as id when identifying objects. E.g. 215 | die 0 on package 0 is a different object to die 0 in package 216 | one, ``topology().Package(0).Die(0) != topology().Package(1).Die(0)`` 217 | 218 | ## Uncore 219 | 220 | The power library provides an abstraction to manage Uncore frequency configuration. The driver allows setting 221 | frequencies on die by die basis, and the library additionally allows setting frequency system-wide and package-wide with 222 | different higher granularity objects taking precedence. \ 223 | The frequency setting is done via interacting with a kernel interface exposed by intel_uncore_frequency in 224 | /sys/devices/system/cpu/intel_uncore_frequency/package_0N_die_0N/. 225 | 226 | ### References 227 | 228 | * [Intel Uncore Frequency Scaling](https://www.kernel.org/doc/html/next/admin-guide/pm/intel_uncore_frequency_scaling.html) -------------------------------------------------------------------------------- /pkg/power/c_states.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "regexp" 8 | "strconv" 9 | "strings" 10 | ) 11 | 12 | const ( 13 | cStatesDir = "cpuidle" 14 | cStateDisableFileFmt = cStatesDir + "/state%d/disable" 15 | cStateNameFileFmt = cStatesDir + "/state%d/name" 16 | cStatesDrvPath = cStatesDir + "/current_driver" 17 | ) 18 | 19 | type CStates map[string]bool 20 | 21 | func isSupportedCStatesDriver(driver string) bool { 22 | for _, s := range []string{"intel_idle", "acpi_idle"} { 23 | if driver == s { 24 | return true 25 | } 26 | } 27 | return false 28 | } 29 | 30 | // map of c-state name to state number path in the sysfs 31 | // populated during library initialisation 32 | var cStatesNamesMap = map[string]int{} 33 | 34 | // populated when mapping CStates 35 | var defaultCStates = CStates{} 36 | 37 | func initCStates() featureStatus { 38 | feature := featureStatus{ 39 | name: "C-States", 40 | initFunc: initCStates, 41 | } 42 | driver, err := readStringFromFile(filepath.Join(basePath, cStatesDrvPath)) 43 | driver = strings.TrimSuffix(driver, "\n") 44 | feature.driver = driver 45 | if err != nil { 46 | feature.err = fmt.Errorf("failed to determine driver: %w", err) 47 | return feature 48 | } 49 | if !isSupportedCStatesDriver(driver) { 50 | feature.err = fmt.Errorf("unsupported driver: %s", driver) 51 | return feature 52 | } 53 | feature.err = mapAvailableCStates() 54 | 55 | return feature 56 | } 57 | 58 | // sets cStatesNamesMap and defaultCStates 59 | func mapAvailableCStates() error { 60 | dirs, err := os.ReadDir(filepath.Join(basePath, "cpu0", cStatesDir)) 61 | if err != nil { 62 | return fmt.Errorf("could not open cpu0 C-States directory: %w", err) 63 | } 64 | 65 | cStateDirNameRegex := regexp.MustCompile(`state(\d+)`) 66 | for _, stateDir := range dirs { 67 | dirName := stateDir.Name() 68 | if !stateDir.IsDir() || !cStateDirNameRegex.MatchString(dirName) { 69 | log.Info("map C-States ignoring " + dirName) 70 | continue 71 | } 72 | stateNumber, err := strconv.Atoi(cStateDirNameRegex.FindStringSubmatch(dirName)[1]) 73 | if err != nil { 74 | return fmt.Errorf("failed to extract C-State number %s: %w", dirName, err) 75 | } 76 | 77 | stateName, err := readCpuStringProperty(0, fmt.Sprintf(cStateNameFileFmt, stateNumber)) 78 | if err != nil { 79 | return fmt.Errorf("could not read C-State %d name: %w", stateNumber, err) 80 | } 81 | 82 | cStatesNamesMap[stateName] = stateNumber 83 | defaultCStates[stateName] = true 84 | } 85 | log.V(3).Info("mapped C-states", "map", cStatesNamesMap) 86 | return nil 87 | } 88 | 89 | func validateCStates(states CStates) error { 90 | for name := range states { 91 | if _, exists := cStatesNamesMap[name]; !exists { 92 | return fmt.Errorf("c-state %s does not exist on this system", name) 93 | } 94 | } 95 | return nil 96 | } 97 | func (host *hostImpl) ValidateCStates(states CStates) error { 98 | return validateCStates(states) 99 | } 100 | 101 | func (host *hostImpl) AvailableCStates() []string { 102 | if !featureList.isFeatureIdSupported(CStatesFeature) { 103 | return []string{} 104 | } 105 | cStatesList := make([]string, 0) 106 | for name := range cStatesNamesMap { 107 | cStatesList = append(cStatesList, name) 108 | } 109 | return cStatesList 110 | } 111 | 112 | func (pool *poolImpl) SetCStates(states CStates) error { 113 | if !IsFeatureSupported(CStatesFeature) { 114 | return featureList.getFeatureIdError(CStatesFeature) 115 | } 116 | // check if requested states are on the system 117 | if err := validateCStates(states); err != nil { 118 | return err 119 | } 120 | pool.CStatesProfile = &states 121 | for _, cpu := range pool.cpus { 122 | if err := cpu.consolidate(); err != nil { 123 | return fmt.Errorf("failed to apply c-states: %w", err) 124 | } 125 | } 126 | return nil 127 | } 128 | 129 | func (pool *poolImpl) getCStates() *CStates { 130 | return pool.CStatesProfile 131 | } 132 | 133 | func (cpu *cpuImpl) SetCStates(cStates CStates) error { 134 | if !IsFeatureSupported(CStatesFeature) { 135 | return featureList.getFeatureIdError(CStatesFeature) 136 | } 137 | if err := validateCStates(cStates); err != nil { 138 | return err 139 | } 140 | cpu.cStates = &cStates 141 | return cpu.updateCStates() 142 | } 143 | func (cpu *cpuImpl) updateCStates() error { 144 | if !IsFeatureSupported(CStatesFeature) { 145 | return nil 146 | } 147 | if cpu.cStates != nil && *cpu.cStates != nil { 148 | return cpu.applyCStates(cpu.cStates) 149 | } 150 | if cpu.pool.getCStates() != nil { 151 | return cpu.applyCStates(cpu.pool.getCStates()) 152 | } 153 | return cpu.applyCStates(&defaultCStates) 154 | } 155 | 156 | func (cpu *cpuImpl) applyCStates(desiredCStates *CStates) error { 157 | for state, enabled := range *desiredCStates { 158 | stateFilePath := filepath.Join( 159 | basePath, 160 | fmt.Sprint("cpu", cpu.id), 161 | fmt.Sprintf(cStateDisableFileFmt, cStatesNamesMap[state]), 162 | ) 163 | content := make([]byte, 1) 164 | if enabled { 165 | content[0] = '0' // write '0' to enable the c state 166 | } else { 167 | content[0] = '1' // write '1' to disable the c state 168 | } 169 | if err := os.WriteFile(stateFilePath, content, 0644); err != nil { 170 | return fmt.Errorf("could not apply cstate %s on cpu %d: %w", state, cpu.id, err) 171 | } 172 | } 173 | return nil 174 | } 175 | -------------------------------------------------------------------------------- /pkg/power/c_states_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func setupCpuCStatesTests(cpufiles map[string]map[string]map[string]string) func() { 14 | origBasePath := basePath 15 | basePath = "testing/cpus" 16 | 17 | origGetNumOfCpusFunc := getNumberOfCpus 18 | getNumberOfCpus = func() uint { 19 | if _, ok := cpufiles["Driver"]; ok { 20 | return uint(len(cpufiles) - 1) 21 | } else { 22 | return uint(len(cpufiles)) 23 | } 24 | } 25 | 26 | featureList[CStatesFeature].err = nil 27 | for cpu, states := range cpufiles { 28 | if cpu == "Driver" { 29 | err := os.MkdirAll(filepath.Join(basePath, strings.Split(cStatesDrvPath, "/")[0]), os.ModePerm) 30 | if err != nil { 31 | panic(err) 32 | } 33 | for driver := range states { 34 | err := os.WriteFile(filepath.Join(basePath, cStatesDrvPath), []byte(driver), 0644) 35 | if err != nil { 36 | panic(err) 37 | } 38 | break 39 | } 40 | continue 41 | } 42 | cpuStatesDir := filepath.Join(basePath, cpu, cStatesDir) 43 | err := os.MkdirAll(filepath.Join(cpuStatesDir), os.ModePerm) 44 | if err != nil { 45 | panic(err) 46 | } 47 | for state, props := range states { 48 | err := os.Mkdir(filepath.Join(cpuStatesDir, state), os.ModePerm) 49 | if err != nil { 50 | //panic(err) 51 | } 52 | for propFile, value := range props { 53 | err := os.WriteFile(filepath.Join(cpuStatesDir, state, propFile), []byte(value), 0644) 54 | if err != nil { 55 | panic(err) 56 | } 57 | } 58 | } 59 | } 60 | 61 | return func() { 62 | err := os.RemoveAll(strings.Split(basePath, "/")[0]) 63 | if err != nil { 64 | panic(err) 65 | } 66 | basePath = origBasePath 67 | getNumberOfCpus = origGetNumOfCpusFunc 68 | cStatesNamesMap = map[string]int{} 69 | featureList[CStatesFeature].err = uninitialisedErr 70 | } 71 | } 72 | 73 | func Test_mapAvailableCStates(t *testing.T) { 74 | states := map[string]map[string]string{ 75 | "state0": {"name": "C0"}, 76 | "state1": {"name": "C1"}, 77 | "state2": {"name": "C2"}, 78 | "state3": {"name": "POLL"}, 79 | "notState": nil, 80 | } 81 | cpufiles := map[string]map[string]map[string]string{ 82 | "cpu0": states, 83 | "cpu1": states, 84 | } 85 | teardown := setupCpuCStatesTests(cpufiles) 86 | 87 | err := mapAvailableCStates() 88 | assert.NoError(t, err) 89 | 90 | assert.Equal(t, cStatesNamesMap, map[string]int{ 91 | "C0": 0, 92 | "C1": 1, 93 | "C2": 2, 94 | "POLL": 3, 95 | }) 96 | 97 | teardown() 98 | 99 | states["state0"] = nil 100 | teardown = setupCpuCStatesTests(cpufiles) 101 | 102 | err = mapAvailableCStates() 103 | 104 | assert.Error(t, err) 105 | 106 | teardown() 107 | 108 | states["state0"] = map[string]string{"name": "C0"} 109 | delete(cpufiles, "cpu0") 110 | teardown = setupCpuCStatesTests(cpufiles) 111 | 112 | assert.Error(t, mapAvailableCStates()) 113 | teardown() 114 | } 115 | 116 | func TestCStates_preCheckCStates(t *testing.T) { 117 | teardown := setupCpuCStatesTests(map[string]map[string]map[string]string{ 118 | "cpu0": nil, 119 | "Driver": {"intel_idle\n": nil}, 120 | }) 121 | defer teardown() 122 | state := initCStates() 123 | assert.Equal(t, "C-States", state.name) 124 | assert.Equal(t, "intel_idle", state.driver) 125 | assert.Nil(t, state.FeatureError()) 126 | teardown() 127 | 128 | teardown = setupCpuCStatesTests(map[string]map[string]map[string]string{ 129 | "Driver": {"something": nil}, 130 | }) 131 | feature := initCStates() 132 | assert.ErrorContains(t, feature.FeatureError(), "unsupported") 133 | assert.Equal(t, "something", feature.driver) 134 | teardown() 135 | } 136 | 137 | func TestCpuImpl_applyCStates(t *testing.T) { 138 | states := map[string]map[string]string{ 139 | "state0": {"name": "C0", "disable": "0"}, 140 | "state2": {"name": "C2", "disable": "0"}, 141 | } 142 | cpufiles := map[string]map[string]map[string]string{ 143 | "cpu0": states, 144 | } 145 | defer setupCpuCStatesTests(cpufiles)() 146 | cStatesNamesMap = map[string]int{ 147 | "C2": 2, 148 | "C0": 0, 149 | } 150 | err := (&cpuImpl{id: 0}).applyCStates(&CStates{ 151 | "C0": false, 152 | "C2": true}) 153 | 154 | assert.NoError(t, err) 155 | 156 | stateFilePath := filepath.Join( 157 | basePath, 158 | fmt.Sprint("cpu", 0), 159 | fmt.Sprintf(cStateDisableFileFmt, 0), 160 | ) 161 | disabled, _ := readStringFromFile(stateFilePath) 162 | assert.Equal(t, "1", disabled) 163 | 164 | stateFilePath = filepath.Join( 165 | basePath, 166 | fmt.Sprint("cpu", 0), 167 | fmt.Sprintf(cStateDisableFileFmt, 2), 168 | ) 169 | disabled, _ = readStringFromFile(stateFilePath) 170 | assert.Equal(t, "0", disabled) 171 | } 172 | 173 | func TestValidateCStates(t *testing.T) { 174 | defer setupCpuCStatesTests(nil)() 175 | 176 | cStatesNamesMap = map[string]int{ 177 | "C0": 0, 178 | "C2": 2, 179 | "C3": 3, 180 | } 181 | 182 | assert.NoError(t, validateCStates(CStates{ 183 | "C0": true, 184 | "C2": false, 185 | })) 186 | 187 | assert.ErrorContains(t, validateCStates(CStates{ 188 | "C9": false, 189 | }), "does not exist on this system") 190 | } 191 | 192 | func TestHostImpl_AvailableCStates(t *testing.T) { 193 | cStatesNamesMap = map[string]int{ 194 | "C1": 1, 195 | "C2": 2, 196 | "C3": 3, 197 | } 198 | host := &hostImpl{} 199 | assert.Empty(t, host.AvailableCStates()) 200 | defer setupCpuCStatesTests(nil)() 201 | 202 | assert.ElementsMatch(t, host.AvailableCStates(), []string{"C1", "C2", "C3"}) 203 | } 204 | 205 | func TestPoolImpl_SetCStates(t *testing.T) { 206 | core1 := new(cpuMock) 207 | core1.On("consolidate").Return(nil) 208 | 209 | core2 := new(cpuMock) 210 | pool := &poolImpl{ 211 | cpus: CpuList{core1}, 212 | } 213 | // cstates not supported 214 | assert.ErrorIs(t, pool.SetCStates(nil), uninitialisedErr) 215 | core1.AssertNotCalled(t, "consolidate") 216 | core2.AssertNotCalled(t, "consolidate") 217 | defer setupCpuCStatesTests(nil)() 218 | 219 | // all good 220 | cStatesNamesMap = map[string]int{ 221 | "C0": 0, 222 | } 223 | assert.NoError(t, pool.SetCStates(CStates{"C0": true})) 224 | core1.AssertExpectations(t) 225 | core2.AssertNotCalled(t, "consolidate") 226 | 227 | //consolidate failed 228 | core1 = new(cpuMock) 229 | pool.cpus = CpuList{core1} 230 | core1.On("consolidate").Return(fmt.Errorf("consolidate failed")) 231 | assert.ErrorContains(t, pool.SetCStates(CStates{"C0": true}), "failed to apply c-states: consolidate failed") 232 | } 233 | 234 | func TestCpuImpl_updateCStates(t *testing.T) { 235 | core := &cpuImpl{id: 0} 236 | // cstates feature not supported 237 | assert.NoError(t, core.updateCStates()) 238 | 239 | defer setupCpuCStatesTests(map[string]map[string]map[string]string{ 240 | "cpu0": { 241 | "state0": {"name": "C0", "disable": "0"}, 242 | "state1": {"name": "C1", "disable": "0"}, 243 | }, 244 | })() 245 | 246 | cStatesNamesMap["C0"] = 0 247 | cStatesNamesMap["C1"] = 1 248 | 249 | stateFilePath := filepath.Join( 250 | basePath, 251 | fmt.Sprint("cpu", 0), 252 | fmt.Sprintf(cStateDisableFileFmt, 0), 253 | ) 254 | 255 | // read core property 256 | core.cStates = &CStates{"C0": false} 257 | assert.NoError(t, core.updateCStates()) 258 | value, _ := os.ReadFile(stateFilePath) 259 | assert.Equal(t, "1", string(value), "expecting cstate to be disabled") 260 | 261 | // read pool property 262 | pool := new(poolMock) 263 | pool.On("getCStates").Return(&CStates{"C0": true}) 264 | core.pool = pool 265 | core.cStates = nil 266 | assert.NoError(t, core.updateCStates()) 267 | value, _ = os.ReadFile(stateFilePath) 268 | assert.Equal(t, "0", string(value), "expecting cstate to be enabled") 269 | pool.AssertExpectations(t) 270 | 271 | // default 272 | defaultCStates = CStates{"C0": false} 273 | pool = new(poolMock) 274 | pool.On("getCStates").Return(nil) 275 | core.pool = pool 276 | assert.NoError(t, core.updateCStates()) 277 | value, _ = os.ReadFile(stateFilePath) 278 | assert.Equal(t, "1", string(value), "expecting cstate to be disabled") 279 | pool.AssertExpectations(t) 280 | } 281 | 282 | func TestCpuImpl_SetCStates(t *testing.T) { 283 | pool := new(poolMock) 284 | pool.On("getCStates").Return(nil) 285 | core := &cpuImpl{ 286 | id: 0, 287 | pool: pool, 288 | } 289 | assert.ErrorIs(t, core.SetCStates(nil), uninitialisedErr) 290 | defer setupCpuCStatesTests(map[string]map[string]map[string]string{ 291 | "cpu0": { 292 | "state0": {"name": "C0", "disable": "0"}, 293 | }, 294 | })() 295 | assert.NoError(t, core.SetCStates(nil)) 296 | 297 | } 298 | -------------------------------------------------------------------------------- /pkg/power/cpu.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "strings" 7 | "sync" 8 | ) 9 | 10 | const ( 11 | numOfSupportedCoreTypes uint = 2 12 | ) 13 | 14 | // uints are references to an array index of frequency sets 15 | type supportedCores struct { 16 | pcore uint 17 | ecore uint 18 | } 19 | 20 | func (c *supportedCores) Pcore() uint { 21 | return c.pcore 22 | } 23 | func (c *supportedCores) Ecore() uint { 24 | return c.ecore 25 | } 26 | 27 | // public instance with read only access to supported core types 28 | var CpuTypeReferences = supportedCores{} 29 | 30 | // Cpu represents a compute unit/thread as seen by the OS 31 | // it is either a physical core ot virtual thread if hyperthreading/SMT is enabled 32 | type Cpu interface { 33 | GetID() uint 34 | GetAbsMinMax() (uint, uint) 35 | SetPool(pool Pool) error 36 | 37 | getPool() Pool 38 | doSetPool(pool Pool) error 39 | consolidate() error 40 | consolidate_unsafe() error 41 | GetCore() Core 42 | // C-States stuff 43 | SetCStates(cStates CStates) error 44 | 45 | // used only to set initial pool when creating core instance 46 | _setPoolProperty(pool Pool) 47 | } 48 | 49 | type cpuImpl struct { 50 | id uint 51 | mutex sync.Locker 52 | pool Pool 53 | core Core 54 | // C-States properties 55 | cStates *CStates 56 | } 57 | 58 | func newCpu(coreID uint, core Core) (Cpu, error) { 59 | if featureList.isFeatureIdSupported(FrequencyScalingFeature) { 60 | min, max, err := readCpuFreqLimits(coreID) 61 | if err != nil { 62 | return &cpuImpl{}, err 63 | } 64 | cType := coreTypes.appendIfUnique(min, max) 65 | core.setType(cType) 66 | } 67 | cpu := &cpuImpl{ 68 | id: coreID, 69 | mutex: &sync.Mutex{}, 70 | core: core, 71 | } 72 | 73 | return cpu, nil 74 | } 75 | 76 | func (cpu *cpuImpl) consolidate() error { 77 | cpu.mutex.Lock() 78 | defer cpu.mutex.Unlock() 79 | return cpu.consolidate_unsafe() 80 | } 81 | func (cpu *cpuImpl) consolidate_unsafe() error { 82 | if err := cpu.updateFrequencies(); err != nil { 83 | return err 84 | } 85 | if err := cpu.updateCStates(); err != nil { 86 | return err 87 | } 88 | return nil 89 | } 90 | 91 | // SetPool moves current core to a specified target pool 92 | // allowed movements are reservedPoolType <-> sharedPoolType and sharedPoolType <-> any exclusive pool 93 | func (cpu *cpuImpl) SetPool(targetPool Pool) error { 94 | /* 95 | case 0: current and target pool are the same -> do nothing 96 | 97 | case 1: target = reserved, current = reserved -> case 0 98 | case 2: target = reserved, current = shared -> do it 99 | case 3: target = reserved, current = exclusive -> error 100 | 101 | case 4: target = shared, current = exclusive -> do it 102 | case 5: target = shared, current = shared -> case 0 103 | case 6: target = shared, current = reserved -> do it 104 | 105 | case 7: target = exclusive, current = other exclusive -> error 106 | case 8: target = exclusive, current = shared -> do it 107 | case 9: target = exclusive, current = reserved -> error 108 | 109 | */ 110 | if targetPool == nil { 111 | return fmt.Errorf("target pool cannot be nil") 112 | } 113 | 114 | log.Info("Set pool", "cpu", cpu.id, "source pool", cpu.pool.Name(), "target pool", targetPool.Name()) 115 | cpu.mutex.Lock() 116 | defer cpu.mutex.Unlock() 117 | 118 | if cpu.pool == targetPool { // case 0,1,5 119 | return nil 120 | } 121 | reservedPool := cpu.pool.getHost().GetReservedPool() 122 | sharedPool := cpu.pool.getHost().GetSharedPool() 123 | if cpu.pool == reservedPool && targetPool.isExclusive() { // case 3 124 | return fmt.Errorf("cannot move from reserved to exclusive pool") 125 | } 126 | 127 | if cpu.pool.isExclusive() && targetPool.isExclusive() { // case 7 128 | return fmt.Errorf("cannot move exclusive to different exclusive pool") 129 | } 130 | 131 | if cpu.pool.isExclusive() && targetPool == reservedPool { // case 9 132 | return fmt.Errorf("cannot move from exclusive to reserved") 133 | } 134 | 135 | // cases 2,4,5,6,8 136 | if targetPool == sharedPool || cpu.pool == sharedPool { 137 | return cpu.doSetPool(targetPool) 138 | } 139 | panic("we should never get here") 140 | } 141 | 142 | func (cpu *cpuImpl) doSetPool(pool Pool) error { 143 | cpu.pool.poolMutex().Lock() 144 | pool.poolMutex().Lock() 145 | log.V(4).Info("acquired mutexes", "source", cpu.pool.Name(), "target", pool.Name(), "cpu", cpu.id) 146 | 147 | origPool := cpu.pool 148 | cpu.pool = pool 149 | 150 | defer func() { 151 | log.V(4).Info("releasing mutexes", "source", origPool.Name(), "target", pool.Name()) 152 | origPool.poolMutex().Unlock() 153 | pool.poolMutex().Unlock() 154 | }() 155 | 156 | origPoolCpus := origPool.Cpus() 157 | log.V(4).Info("removing cpu from pool", "pool", origPool.Name(), "coreID", cpu.id) 158 | if err := origPoolCpus.remove(cpu); err != nil { 159 | cpu.pool = origPool 160 | return err 161 | } 162 | 163 | log.V(4).Info("starting consolidation of cpu", "coreID", cpu.id) 164 | if err := cpu.consolidate_unsafe(); err != nil { 165 | cpu.pool = origPool 166 | origPoolCpus.add(cpu) 167 | return err 168 | } 169 | 170 | newPoolCpus := cpu.pool.Cpus() 171 | newPoolCpus.add(cpu) 172 | return nil 173 | } 174 | 175 | func (cpu *cpuImpl) getPool() Pool { 176 | return cpu.pool 177 | } 178 | 179 | func (cpu *cpuImpl) GetID() uint { 180 | return cpu.id 181 | } 182 | 183 | func (cpu *cpuImpl) GetAbsMinMax() (uint, uint) { 184 | // return 0,0 to prevent indexing error on coretype 185 | if !featureList.isFeatureIdSupported(FrequencyScalingFeature) { 186 | return 0, 0 187 | } 188 | typeNum := cpu.core.GetType() 189 | return coreTypes[typeNum].GetMin(), coreTypes[typeNum].GetMax() 190 | } 191 | 192 | func (cpu *cpuImpl) GetCore() Core { 193 | return cpu.core 194 | } 195 | 196 | func (cpu *cpuImpl) _setPoolProperty(pool Pool) { 197 | cpu.pool = pool 198 | } 199 | 200 | // read property of specific CPU as an int, takes CPUid and path to specific file within cpu subdirectory in sysfs 201 | func readCpuUintProperty(cpuID uint, file string) (uint, error) { 202 | path := filepath.Join(basePath, fmt.Sprint("cpu", cpuID), file) 203 | return readUintFromFile(path) 204 | } 205 | 206 | // reads content of a file and returns it as a string 207 | func readCpuStringProperty(cpuID uint, file string) (string, error) { 208 | path := filepath.Join(basePath, fmt.Sprint("cpu", cpuID), file) 209 | value, err := readStringFromFile(path) 210 | if err != nil { 211 | return "", fmt.Errorf("failed to read cpuCore %d string property: %w", cpuID, err) 212 | } 213 | value = strings.TrimSuffix(value, "\n") 214 | return value, nil 215 | } 216 | 217 | // reads the min and max frequency of a CPU 218 | func readCpuFreqLimits(id uint) (uint, uint, error) { 219 | maxFreq, err := readCpuUintProperty(id, cpuMaxFreqFile) 220 | if err != nil { 221 | return 0, 0, err 222 | } 223 | minFreq, err := readCpuUintProperty(id, cpuMinFreqFile) 224 | if err != nil { 225 | return 0, 0, err 226 | } 227 | return minFreq, maxFreq, nil 228 | } 229 | 230 | type CpuList []Cpu 231 | 232 | func (cpus *CpuList) IndexOf(cpu Cpu) int { 233 | for i, c := range *cpus { 234 | if c == cpu { 235 | return i 236 | } 237 | } 238 | return -1 239 | } 240 | 241 | func (cpus *CpuList) Contains(cpu Cpu) bool { 242 | if cpus.IndexOf(cpu) < 0 { 243 | return false 244 | } else { 245 | return true 246 | } 247 | } 248 | func (cpus *CpuList) add(cpu Cpu) { 249 | *cpus = append(*cpus, cpu) 250 | } 251 | func (cpus *CpuList) remove(cpu Cpu) error { 252 | index := cpus.IndexOf(cpu) 253 | if index < 0 { 254 | return fmt.Errorf("cpu %d is not in pool", cpu.GetID()) 255 | } 256 | size := len(*cpus) - 1 257 | (*cpus)[index] = (*cpus)[size] 258 | *cpus = (*cpus)[:size] 259 | return nil 260 | } 261 | func (cpus *CpuList) IDs() []uint { 262 | ids := make([]uint, len(*cpus)) 263 | for i, cpu := range *cpus { 264 | ids[i] = cpu.GetID() 265 | } 266 | return ids 267 | } 268 | func (cpus *CpuList) ByID(id uint) Cpu { 269 | index := int(id) 270 | // first we try index == cpuId 271 | if len(*cpus) > index && (*cpus)[index].GetID() == id { 272 | return (*cpus)[index] 273 | } 274 | // if that doesn't work we fall back to looping 275 | for _, cpu := range *cpus { 276 | if cpu.GetID() == id { 277 | return cpu 278 | } 279 | } 280 | return nil 281 | } 282 | func (cpus *CpuList) ManyByIDs(ids []uint) (CpuList, error) { 283 | targets := make(CpuList, len(ids)) 284 | 285 | for i, id := range ids { 286 | cpu := cpus.ByID(id) 287 | if cpu == nil { 288 | return nil, fmt.Errorf("cpu with id %d, not in list", id) 289 | } 290 | targets[i] = cpu 291 | } 292 | return targets, nil 293 | } 294 | -------------------------------------------------------------------------------- /pkg/power/cpu_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/mock" 14 | ) 15 | 16 | type cpuMock struct { 17 | mock.Mock 18 | } 19 | 20 | func (m *cpuMock) SetCStates(cStates CStates) error { 21 | return m.Called(cStates).Error(0) 22 | } 23 | 24 | func (m *cpuMock) _setPoolProperty(pool Pool) { 25 | m.Called(pool) 26 | } 27 | func (m *cpuMock) consolidate() error { 28 | return m.Called().Error(0) 29 | } 30 | func (m *cpuMock) consolidate_unsafe() error { 31 | return m.Called().Error(0) 32 | } 33 | func (m *cpuMock) doSetPool(pool Pool) error { 34 | return m.Called(pool).Error(0) 35 | } 36 | func (m *cpuMock) GetID() uint { 37 | args := m.Called() 38 | return args.Get(0).(uint) 39 | } 40 | func (m *cpuMock) GetAbsMinMax() (uint, uint) { 41 | args := m.Called() 42 | return args.Get(0).(uint), args.Get(1).(uint) 43 | } 44 | 45 | func (m *cpuMock) getPool() Pool { 46 | args := m.Called().Get(0) 47 | if args == nil { 48 | return nil 49 | } else { 50 | return args.(Pool) 51 | } 52 | } 53 | 54 | func (m *cpuMock) GetCore() Core { 55 | return m.Called().Get(0).(Core) 56 | } 57 | 58 | func (m *cpuMock) SetPool(pool Pool) error { 59 | return m.Called(pool).Error(0) 60 | } 61 | 62 | type mutexMock struct { 63 | mock.Mock 64 | } 65 | 66 | func (m *mutexMock) Lock() { 67 | m.Called() 68 | } 69 | 70 | func (m *mutexMock) Unlock() { 71 | m.Called() 72 | } 73 | func setupCpuScalingTests(cpufiles map[string]map[string]string) func() { 74 | origBasePath := basePath 75 | basePath = "testing/cpus" 76 | defaultDefaultPowerProfile := defaultPowerProfile 77 | typeCopy := coreTypes 78 | referenceCopy := CpuTypeReferences 79 | // backup pointer to function that gets all CPUs 80 | // replace it with our controlled function 81 | origGetNumOfCpusFunc := getNumberOfCpus 82 | getNumberOfCpus = func() uint { return uint(len(cpufiles)) } 83 | 84 | // "initialise" P-States feature 85 | featureList[FrequencyScalingFeature].err = nil 86 | 87 | // if cpu0 is here we set its values to temporary defaultPowerProfile 88 | if cpu0, ok := cpufiles["cpu0"]; ok { 89 | defaultPowerProfile = &profileImpl{} 90 | if max, ok := cpu0["max"]; ok { 91 | max, _ := strconv.Atoi(max) 92 | defaultPowerProfile.max = uint(max) 93 | } 94 | if min, ok := cpu0["min"]; ok { 95 | min, _ := strconv.Atoi(min) 96 | defaultPowerProfile.min = uint(min) 97 | } 98 | if governor, ok := cpu0["governor"]; ok { 99 | defaultPowerProfile.governor = governor 100 | } 101 | if epp, ok := cpu0["epp"]; ok { 102 | defaultPowerProfile.epp = epp 103 | } 104 | } 105 | for cpuName, cpuDetails := range cpufiles { 106 | cpudir := filepath.Join(basePath, cpuName) 107 | os.MkdirAll(filepath.Join(cpudir, "cpufreq"), os.ModePerm) 108 | os.MkdirAll(filepath.Join(cpudir, "topology"), os.ModePerm) 109 | for prop, value := range cpuDetails { 110 | switch prop { 111 | case "driver": 112 | os.WriteFile(filepath.Join(cpudir, pStatesDrvFile), []byte(value+"\n"), 0664) 113 | case "max": 114 | os.WriteFile(filepath.Join(cpudir, scalingMaxFile), []byte(value+"\n"), 0644) 115 | os.WriteFile(filepath.Join(cpudir, cpuMaxFreqFile), []byte(value+"\n"), 0644) 116 | case "min": 117 | os.WriteFile(filepath.Join(cpudir, scalingMinFile), []byte(value+"\n"), 0644) 118 | os.WriteFile(filepath.Join(cpudir, cpuMinFreqFile), []byte(value+"\n"), 0644) 119 | case "package": 120 | os.WriteFile(filepath.Join(cpudir, packageIdFile), []byte(value+"\n"), 0644) 121 | case "die": 122 | os.WriteFile(filepath.Join(cpudir, dieIdFile), []byte(value+"\n"), 0644) 123 | os.WriteFile(filepath.Join(cpudir, coreIdFile), []byte(cpuName[3:]+"\n"), 0644) 124 | case "epp": 125 | os.WriteFile(filepath.Join(cpudir, eppFile), []byte(value+"\n"), 0644) 126 | case "governor": 127 | os.WriteFile(filepath.Join(cpudir, scalingGovFile), []byte(value+"\n"), 0644) 128 | case "available_governors": 129 | os.WriteFile(filepath.Join(cpudir, availGovFile), []byte(value+"\n"), 0644) 130 | } 131 | } 132 | } 133 | return func() { 134 | // wipe created cpus dir 135 | os.RemoveAll(strings.Split(basePath, "/")[0]) 136 | // revert cpu /sys path 137 | basePath = origBasePath 138 | // revert get number of system cpus function 139 | getNumberOfCpus = origGetNumOfCpusFunc 140 | // revert scaling driver feature to un initialised state 141 | featureList[FrequencyScalingFeature].err = uninitialisedErr 142 | coreTypes = typeCopy 143 | CpuTypeReferences = referenceCopy 144 | // revert default powerProfile 145 | defaultPowerProfile = defaultDefaultPowerProfile 146 | } 147 | } 148 | 149 | func TestNewCore(t *testing.T) { 150 | cpufiles := map[string]map[string]string{ 151 | "cpu0": { 152 | "max": "123", 153 | "min": "100", 154 | "epp": "some", 155 | }, 156 | } 157 | defer setupCpuScalingTests(cpufiles)() 158 | 159 | // happy path - ensure values from files are read correctly 160 | core := &cpuCore{} 161 | cpu, err := newCpu(0, core) 162 | assert.NoError(t, err) 163 | 164 | assert.NotNil(t, cpu.(*cpuImpl).mutex) 165 | // we don't want to compare value of new mutex, so we set it to nil 166 | cpu.(*cpuImpl).mutex = nil 167 | assert.Equal(t, &cpuImpl{ 168 | id: 0, 169 | core: core, 170 | }, cpu) 171 | // now "break" scaling driver by setting a feature error 172 | featureList[FrequencyScalingFeature].err = fmt.Errorf("some error") 173 | 174 | cpu, err = newCpu(0, nil) 175 | 176 | assert.NoError(t, err) 177 | 178 | assert.NotNil(t, cpu.(*cpuImpl).mutex) 179 | // Ensure P-States stuff was never read by ensuring related properties are 0 180 | cpu.(*cpuImpl).mutex = nil 181 | assert.Equal(t, &cpuImpl{ 182 | id: 0, 183 | }, cpu) 184 | } 185 | 186 | func TestCpuImpl_SetPool(t *testing.T) { 187 | // feature errors are set so functions inside consolidate() return without doing anything 188 | var cpuMutex *mutexMock 189 | host := new(hostMock) 190 | 191 | sharedPool := new(poolMock) 192 | sharedPool.On("isExclusive").Return(false) 193 | sharedPool.On("getHost").Return(host) 194 | sharedPool.On("Name").Return("shared") 195 | sharedPoolCores := make(CpuList, 8) 196 | sharedPool.On("Cpus").Return(&sharedPoolCores) 197 | sharedPool.On("poolMutex").Return(&sync.Mutex{}) 198 | 199 | reservedPool := new(poolMock) 200 | reservedPool.On("isExclusive").Return(false) 201 | reservedPool.On("getHost").Return(host) 202 | reservedPool.On("Name").Return("reserved") 203 | reservedPoolCores := make(CpuList, 8) 204 | reservedPool.On("Cpus").Return(&reservedPoolCores) 205 | reservedPool.On("poolMutex").Return(&sync.Mutex{}) 206 | 207 | host.On("GetReservedPool").Return(reservedPool) 208 | host.On("GetSharedPool").Return(sharedPool) 209 | 210 | exclusivePool1 := new(poolMock) 211 | exclusivePool1.On("isExclusive").Return(true) 212 | exclusivePool1.On("getHost").Return(host) 213 | exclusivePool1.On("Name").Return("excl1") 214 | exclusivePool1Cores := make(CpuList, 8) 215 | exclusivePool1.On("Cpus").Return(&exclusivePool1Cores) 216 | exclusivePool1.On("poolMutex").Return(&sync.Mutex{}) 217 | 218 | exclusivePool2 := new(poolMock) 219 | exclusivePool2.On("isExclusive").Return(true) 220 | exclusivePool2.On("getHost").Return(host) 221 | exclusivePool2.On("Name").Return("excl2") 222 | exclusivePool2Cores := make(CpuList, 8) 223 | exclusivePool2.On("Cpus").Return(&exclusivePool2Cores) 224 | exclusivePool2.On("poolMutex").Return(&sync.Mutex{}) 225 | 226 | cpu := &cpuImpl{ 227 | id: 0, 228 | pool: sharedPool, 229 | } 230 | // nil pool 231 | // in this scenario we don't expect lock to be acquired 232 | cpu.mutex = new(mutexMock) 233 | assert.ErrorContains(t, cpu.SetPool(nil), "cannot be nil") 234 | 235 | // current == target pool, case 0 236 | cpuMutex = new(mutexMock) 237 | cpuMutex.On("Unlock").Return().NotBefore( 238 | cpuMutex.On("Lock").Return(), 239 | ) 240 | cpu.mutex = cpuMutex 241 | assert.NoError(t, cpu.SetPool(sharedPool)) 242 | sharedPool.AssertNotCalled(t, "isExclusive") 243 | assert.True(t, cpu.pool == sharedPool) 244 | cpuMutex.AssertExpectations(t) 245 | 246 | // shared to reserved 247 | cpuMutex = new(mutexMock) 248 | cpuMutex.On("Unlock").Return().NotBefore( 249 | cpuMutex.On("Lock").Return(), 250 | ) 251 | cpu.mutex = cpuMutex 252 | sharedPoolCores[0] = cpu 253 | cpu.pool = sharedPool 254 | assert.NoError(t, cpu.SetPool(reservedPool)) 255 | assert.True(t, cpu.pool == reservedPool) 256 | cpuMutex.AssertExpectations(t) 257 | 258 | // shared to shared 259 | cpuMutex = new(mutexMock) 260 | cpuMutex.On("Unlock").Return().NotBefore( 261 | cpuMutex.On("Lock").Return(), 262 | ) 263 | cpu.mutex = cpuMutex 264 | cpu.pool = sharedPool 265 | sharedPoolCores[0] = cpu 266 | assert.NoError(t, cpu.SetPool(sharedPool)) 267 | assert.True(t, cpu.pool == sharedPool) 268 | cpuMutex.AssertExpectations(t) 269 | 270 | // shared to exclusive 271 | cpuMutex = new(mutexMock) 272 | cpuMutex.On("Unlock").Return().NotBefore( 273 | cpuMutex.On("Lock").Return(), 274 | ) 275 | cpu.mutex = cpuMutex 276 | cpu.pool = sharedPool 277 | sharedPoolCores[0] = cpu 278 | assert.NoError(t, cpu.SetPool(exclusivePool1)) 279 | assert.True(t, cpu.pool == exclusivePool1) 280 | cpuMutex.AssertExpectations(t) 281 | 282 | // reserved to reserved 283 | cpuMutex = new(mutexMock) 284 | cpuMutex.On("Unlock").Return().NotBefore( 285 | cpuMutex.On("Lock").Return(), 286 | ) 287 | cpu.mutex = cpuMutex 288 | cpu.pool = reservedPool 289 | reservedPoolCores[0] = cpu 290 | assert.NoError(t, cpu.SetPool(reservedPool)) 291 | assert.True(t, cpu.pool == reservedPool) 292 | cpuMutex.AssertExpectations(t) 293 | 294 | // reserved to shared 295 | cpuMutex = new(mutexMock) 296 | cpuMutex.On("Unlock").Return().NotBefore( 297 | cpuMutex.On("Lock").Return(), 298 | ) 299 | cpu.mutex = cpuMutex 300 | cpu.pool = reservedPool 301 | reservedPoolCores[0] = cpu 302 | assert.NoError(t, cpu.SetPool(sharedPool)) 303 | assert.True(t, cpu.pool == sharedPool) 304 | cpuMutex.AssertExpectations(t) 305 | 306 | // reserved to exclusive 307 | cpuMutex = new(mutexMock) 308 | cpuMutex.On("Unlock").Return().NotBefore( 309 | cpuMutex.On("Lock").Return(), 310 | ) 311 | cpu.mutex = cpuMutex 312 | cpu.pool = reservedPool 313 | reservedPoolCores[0] = cpu 314 | assert.ErrorContains(t, cpu.SetPool(exclusivePool1), "reserved to exclusive") 315 | assert.True(t, cpu.pool == reservedPool) 316 | cpuMutex.AssertExpectations(t) 317 | 318 | // exclusive to reserved 319 | cpuMutex = new(mutexMock) 320 | cpuMutex.On("Unlock").Return().NotBefore( 321 | cpuMutex.On("Lock").Return(), 322 | ) 323 | cpu.mutex = cpuMutex 324 | cpu.pool = exclusivePool1 325 | exclusivePool1Cores[0] = cpu 326 | assert.ErrorContains(t, cpu.SetPool(reservedPool), "exclusive to reserved") 327 | assert.True(t, cpu.pool == exclusivePool1) 328 | cpuMutex.AssertExpectations(t) 329 | 330 | // exclusive to shared 331 | cpuMutex = new(mutexMock) 332 | cpuMutex.On("Unlock").Return().NotBefore( 333 | cpuMutex.On("Lock").Return(), 334 | ) 335 | cpu.mutex = cpuMutex 336 | cpu.pool = exclusivePool1 337 | exclusivePool1Cores[0] = cpu 338 | assert.NoError(t, cpu.SetPool(sharedPool)) 339 | assert.True(t, cpu.pool == sharedPool) 340 | cpuMutex.AssertExpectations(t) 341 | 342 | // exclusive to same exclusive 343 | cpuMutex = new(mutexMock) 344 | cpuMutex.On("Unlock").Return().NotBefore( 345 | cpuMutex.On("Lock").Return(), 346 | ) 347 | cpu.mutex = cpuMutex 348 | cpu.pool = exclusivePool1 349 | exclusivePool1Cores[0] = cpu 350 | assert.NoError(t, cpu.SetPool(exclusivePool1)) 351 | assert.True(t, cpu.pool == exclusivePool1) 352 | cpuMutex.AssertExpectations(t) 353 | 354 | //exclusive to another exclusive 355 | cpuMutex = new(mutexMock) 356 | cpuMutex.On("Unlock").Return().NotBefore( 357 | cpuMutex.On("Lock").Return(), 358 | ) 359 | cpu.mutex = cpuMutex 360 | cpu.pool = exclusivePool1 361 | exclusivePool1Cores[0] = cpu 362 | assert.ErrorContains(t, cpu.SetPool(exclusivePool2), " exclusive to different exclusive") 363 | assert.True(t, cpu.pool == exclusivePool1) 364 | cpuMutex.AssertExpectations(t) 365 | } 366 | 367 | func TestCpuImpl_doSetPool(t *testing.T) { 368 | var sourcePool, targetPool *poolMock 369 | var sourcePoolMutex, targetPoolMutex *mutexMock 370 | 371 | var cpu *cpuImpl 372 | // happy path 373 | sourcePool = new(poolMock) 374 | sourcePool.On("Name").Return("sauce") 375 | sourcePoolMutex = new(mutexMock) 376 | 377 | sourcePoolMutex.On("Unlock").Return().NotBefore( 378 | sourcePoolMutex.On("Lock").Return(), 379 | ) 380 | sourcePool.On("poolMutex").Return(sourcePoolMutex) 381 | 382 | targetPool = new(poolMock) 383 | targetPool.On("Name").Return("target") 384 | targetPoolMutex = new(mutexMock) 385 | 386 | targetPoolMutex.On("Unlock").Return().NotBefore( 387 | targetPoolMutex.On("Lock").Return(), 388 | ) 389 | targetPool.On("poolMutex").Return(targetPoolMutex) 390 | 391 | cpu = &cpuImpl{ 392 | pool: sourcePool, 393 | } 394 | sourcePool.On("Cpus").Return(&CpuList{cpu}) 395 | targetPool.On("Cpus").Return(&CpuList{}) 396 | 397 | assert.NoError(t, cpu.doSetPool(targetPool)) 398 | assert.True(t, cpu.pool == targetPool) 399 | sourcePoolMutex.AssertExpectations(t) 400 | targetPoolMutex.AssertExpectations(t) 401 | 402 | // remove failure 403 | sourcePool = new(poolMock) 404 | sourcePool.On("Name").Return("sauce") 405 | sourcePoolMutex.On("Unlock").Return().NotBefore( 406 | sourcePoolMutex.On("Lock").Return(), 407 | ) 408 | sourcePool.On("poolMutex").Return(sourcePoolMutex) 409 | 410 | targetPool = new(poolMock) 411 | targetPool.On("Name").Return("target") 412 | targetPoolMutex = new(mutexMock) 413 | targetPoolMutex.On("Unlock").Return().NotBefore( 414 | targetPoolMutex.On("Lock").Return(), 415 | ) 416 | targetPool.On("poolMutex").Return(targetPoolMutex) 417 | 418 | cpu = &cpuImpl{ 419 | pool: sourcePool, 420 | } 421 | sourcePool.On("Cpus").Return(&CpuList{}) 422 | targetPool.On("Cpus").Return(&CpuList{}) 423 | 424 | assert.ErrorContains(t, cpu.doSetPool(targetPool), "not in pool") 425 | assert.True(t, cpu.pool == sourcePool) 426 | sourcePoolMutex.AssertExpectations(t) 427 | targetPoolMutex.AssertExpectations(t) 428 | } 429 | 430 | func TestCoreList_IDs(t *testing.T) { 431 | cpus := CpuList{} 432 | var expectedIDs []uint 433 | for i := uint(0); i < 5; i++ { 434 | mockedCore := new(cpuMock) 435 | mockedCore.On("GetID").Return(i) 436 | cpus = append(cpus, mockedCore) 437 | expectedIDs = append(expectedIDs, i) 438 | } 439 | assert.ElementsMatch(t, cpus.IDs(), expectedIDs) 440 | } 441 | 442 | func TestCoreList_ByID(t *testing.T) { 443 | // test for quick get to skip iteration over list when index == coreId 444 | cpus := CpuList{} 445 | for i := uint(0); i < 5; i++ { 446 | mockedCore := new(cpuMock) 447 | mockedCore.On("GetID").Return(i) 448 | cpus = append(cpus, mockedCore) 449 | } 450 | 451 | assert.Equal(t, cpus[2], cpus.ByID(2)) 452 | cpus[0].(*cpuMock).AssertNotCalled(t, "GetID") 453 | cpus[1].(*cpuMock).AssertNotCalled(t, "GetID") 454 | 455 | // test for when index != coreID and have to iterate 456 | cpus = CpuList{} 457 | for _, u := range []uint{56, 1, 6, 99, 2, 11} { 458 | mocked := new(cpuMock) 459 | mocked.On("GetID").Return(u) 460 | cpus = append(cpus, mocked) 461 | } 462 | assert.Equal(t, cpus[3], cpus.ByID(99)) 463 | assert.Equal(t, cpus[5], cpus.ByID(11)) 464 | 465 | // not in list 466 | assert.Nil(t, cpus.ByID(77)) 467 | } 468 | 469 | func TestCoreList_ManyByIDs(t *testing.T) { 470 | cpus := CpuList{} 471 | for i := uint(0); i < 5; i++ { 472 | mockedCore := new(cpuMock) 473 | mockedCore.On("GetID").Return(i) 474 | cpus = append(cpus, mockedCore) 475 | } 476 | returnedList, err := cpus.ManyByIDs([]uint{1, 3}) 477 | assert.ElementsMatch(t, returnedList, []Cpu{cpus[1], cpus[3]}) 478 | assert.NoError(t, err) 479 | 480 | // out of range# 481 | returnedList, err = cpus.ManyByIDs([]uint{6}) 482 | assert.Error(t, err) 483 | } 484 | -------------------------------------------------------------------------------- /pkg/power/host.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | // The hostImpl is the backing object of Host interface 9 | type hostImpl struct { 10 | name string 11 | exclusivePools PoolList 12 | reservedPool Pool 13 | sharedPool Pool 14 | topology Topology 15 | featureStates *FeatureSet 16 | } 17 | 18 | // Host represents the actual machine to be managed 19 | type Host interface { 20 | SetName(name string) 21 | GetName() string 22 | GetFeaturesInfo() FeatureSet 23 | 24 | GetReservedPool() Pool 25 | GetSharedPool() Pool 26 | 27 | AddExclusivePool(poolName string) (Pool, error) 28 | GetExclusivePool(poolName string) Pool 29 | GetAllExclusivePools() *PoolList 30 | 31 | GetAllCpus() *CpuList 32 | GetFreqRanges() CoreTypeList 33 | Topology() Topology 34 | // returns number of distinct core types 35 | NumCoreTypes() uint 36 | AvailableCStates() []string 37 | ValidateCStates(states CStates) error 38 | } 39 | 40 | // create a pre-populated Host object 41 | func initHost(nodeName string) (Host, error) { 42 | 43 | host := &hostImpl{ 44 | name: nodeName, 45 | exclusivePools: PoolList{}, 46 | } 47 | host.featureStates = &featureList 48 | // create predefined pools 49 | host.reservedPool = &reservedPoolType{poolImpl{ 50 | name: reservedPoolName, 51 | mutex: &sync.Mutex{}, 52 | host: host, 53 | }} 54 | host.sharedPool = &sharedPoolType{poolImpl{ 55 | name: sharedPoolName, 56 | cpus: CpuList{}, 57 | mutex: &sync.Mutex{}, 58 | host: host, 59 | }} 60 | 61 | topology, err := discoverTopology() 62 | if err != nil { 63 | log.Error(err, "failed to discover cpuTopology") 64 | return nil, fmt.Errorf("failed to init host: %w", err) 65 | } 66 | for _, cpu := range *topology.CPUs() { 67 | cpu._setPoolProperty(host.reservedPool) 68 | } 69 | // not very pretty but finds the lowest/highest core ranges 70 | var highest uint 71 | var highIndex uint 72 | var lowIndex uint 73 | for i, frequencies := range coreTypes { 74 | if frequencies.GetMax() > highest { 75 | highest = frequencies.GetMax() 76 | lowIndex = highIndex 77 | highIndex = uint(i) 78 | } 79 | if frequencies.GetMax() < highest { 80 | lowIndex = uint(i) 81 | } 82 | } 83 | CpuTypeReferences.pcore = highIndex 84 | CpuTypeReferences.ecore = lowIndex 85 | log.Info("discovered cpus", "cpus", len(*topology.CPUs())) 86 | // coretypes are populated after default profile is generated so we need to update here 87 | if featureList.isFeatureIdSupported(FrequencyScalingFeature) && host.NumCoreTypes() == 2 { 88 | defaultPowerProfile.max = coreTypes[CpuTypeReferences.Pcore()].GetMax() 89 | defaultPowerProfile.min = coreTypes[CpuTypeReferences.Pcore()].GetMax() 90 | defaultPowerProfile.efficientMax = coreTypes[CpuTypeReferences.Ecore()].GetMax() 91 | defaultPowerProfile.efficientMin = coreTypes[CpuTypeReferences.Ecore()].GetMax() 92 | } 93 | if host.NumCoreTypes() > numOfSupportedCoreTypes { 94 | log.Error(fmt.Errorf("more than %d core types detected. This may result in undefined behavior: %v", numOfSupportedCoreTypes, coreTypes), "topology issues detected") 95 | } 96 | host.topology = topology 97 | 98 | // create a shallow copy of pointers, changes to underlying cpu object will reflect in both lists, 99 | // changes to each list will not affect the other 100 | host.reservedPool.(*reservedPoolType).cpus = make(CpuList, len(*topology.CPUs())) 101 | copy(host.reservedPool.(*reservedPoolType).cpus, *topology.CPUs()) 102 | return host, nil 103 | } 104 | 105 | func (host *hostImpl) SetName(name string) { 106 | host.name = name 107 | } 108 | 109 | func (host *hostImpl) GetName() string { 110 | return host.name 111 | } 112 | 113 | func (host *hostImpl) GetReservedPool() Pool { 114 | return host.reservedPool 115 | } 116 | 117 | // returns default min/max frequency range 118 | func (host *hostImpl) GetFreqRanges() CoreTypeList { 119 | return coreTypes 120 | } 121 | 122 | // AddExclusivePool creates new empty pool 123 | func (host *hostImpl) AddExclusivePool(poolName string) (Pool, error) { 124 | if i := host.exclusivePools.IndexOfName(poolName); i >= 0 { 125 | return host.exclusivePools[i], fmt.Errorf("pool with name %s already exists", poolName) 126 | } 127 | var pool Pool = &exclusivePoolType{poolImpl{ 128 | name: poolName, 129 | mutex: &sync.Mutex{}, 130 | cpus: make([]Cpu, 0), 131 | host: host, 132 | }} 133 | 134 | host.exclusivePools.add(pool) 135 | return pool, nil 136 | } 137 | 138 | // GetExclusivePool Returns a Pool object of the exclusive pool with matching name supplied 139 | // returns nil if not found 140 | func (host *hostImpl) GetExclusivePool(name string) Pool { 141 | return host.exclusivePools.ByName(name) 142 | } 143 | 144 | // GetSharedPool returns shared pool 145 | func (host *hostImpl) GetSharedPool() Pool { 146 | return host.sharedPool 147 | } 148 | 149 | func (host *hostImpl) GetFeaturesInfo() FeatureSet { 150 | return *host.featureStates 151 | } 152 | 153 | func (host *hostImpl) GetAllCpus() *CpuList { 154 | return host.topology.CPUs() 155 | } 156 | 157 | func (host *hostImpl) GetAllExclusivePools() *PoolList { 158 | return &host.exclusivePools 159 | } 160 | 161 | func (host *hostImpl) NumCoreTypes() uint { 162 | return uint(len(coreTypes)) 163 | } 164 | 165 | func (host *hostImpl) Topology() Topology { 166 | return host.topology 167 | } 168 | -------------------------------------------------------------------------------- /pkg/power/host_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/mock" 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type hostMock struct { 14 | mock.Mock 15 | } 16 | 17 | func (m *hostMock) Topology() Topology { 18 | return m.Called().Get(0).(Topology) 19 | } 20 | 21 | func (m *hostMock) ValidateCStates(states CStates) error { 22 | return m.Called(states).Error(0) 23 | } 24 | 25 | func (m *hostMock) AvailableCStates() []string { 26 | return m.Called().Get(0).([]string) 27 | } 28 | 29 | func (m *hostMock) GetAllExclusivePools() *PoolList { 30 | return m.Called().Get(0).(*PoolList) 31 | } 32 | 33 | func (m *hostMock) SetName(name string) { 34 | m.Called(name) 35 | } 36 | 37 | func (m *hostMock) GetName() string { 38 | return m.Called().String(0) 39 | } 40 | 41 | func (m *hostMock) NumCoreTypes() uint { 42 | return m.Called().Get(0).(uint) 43 | } 44 | 45 | func (m *hostMock) GetFeaturesInfo() FeatureSet { 46 | ret := m.Called().Get(0) 47 | if ret == nil { 48 | return nil 49 | } else { 50 | return ret.(FeatureSet) 51 | } 52 | } 53 | 54 | func (m *hostMock) GetReservedPool() Pool { 55 | ret := m.Called().Get(0) 56 | if ret == nil { 57 | return nil 58 | } else { 59 | return ret.(Pool) 60 | } 61 | } 62 | 63 | func (m *hostMock) GetSharedPool() Pool { 64 | ret := m.Called().Get(0) 65 | if ret == nil { 66 | return nil 67 | } else { 68 | return ret.(Pool) 69 | } 70 | } 71 | 72 | func (m *hostMock) AddExclusivePool(poolName string) (Pool, error) { 73 | args := m.Called(poolName) 74 | retPool := args.Get(0) 75 | if retPool == nil { 76 | return nil, args.Error(1) 77 | } else { 78 | return retPool.(Pool), args.Error(1) 79 | } 80 | } 81 | 82 | func (m *hostMock) GetExclusivePool(poolName string) Pool { 83 | ret := m.Called(poolName).Get(0) 84 | if ret == nil { 85 | return nil 86 | } else { 87 | return ret.(Pool) 88 | } 89 | } 90 | 91 | func (m *hostMock) GetAllCpus() *CpuList { 92 | ret := m.Called().Get(0) 93 | if ret == nil { 94 | return nil 95 | } else { 96 | return ret.(*CpuList) 97 | } 98 | } 99 | 100 | func (m *hostMock) GetFreqRanges() CoreTypeList { 101 | return m.Called().Get(0).(CoreTypeList) 102 | } 103 | 104 | func TestHost_initHost(t *testing.T) { 105 | origGetAllCores := discoverTopology 106 | defer func() { discoverTopology = origGetAllCores }() 107 | const hostName = "host" 108 | 109 | // get topology fail 110 | discoverTopology = func() (Topology, error) { return new(mockCpuTopology), fmt.Errorf("error") } 111 | host, err := initHost(hostName) 112 | assert.Nil(t, host) 113 | assert.Error(t, err) 114 | 115 | core1 := new(cpuMock) 116 | core1.On("_setPoolProperty", mock.Anything).Return() 117 | core2 := new(cpuMock) 118 | core2.On("_setPoolProperty", mock.Anything).Return() 119 | 120 | mockedCores := CpuList{core1, core2} 121 | topObj := new(mockCpuTopology) 122 | topObj.On("CPUs").Return(&mockedCores) 123 | discoverTopology = func() (Topology, error) { return topObj, nil } 124 | host, err = initHost(hostName) 125 | 126 | assert.NoError(t, err) 127 | 128 | core1.AssertExpectations(t) 129 | core2.AssertExpectations(t) 130 | 131 | hostObj := host.(*hostImpl) 132 | assert.Equal(t, hostObj.name, hostName) 133 | assert.Equal(t, hostObj.topology, topObj) 134 | assert.ElementsMatch(t, hostObj.reservedPool.(*reservedPoolType).cpus, mockedCores) 135 | assert.NotNil(t, hostObj.sharedPool) 136 | } 137 | 138 | func TestHostImpl_AddExclusivePool(t *testing.T) { 139 | // happy path 140 | poolName := "poolName" 141 | host := &hostImpl{} 142 | 143 | pool, err := host.AddExclusivePool(poolName) 144 | assert.Nil(t, err) 145 | 146 | poolObj := pool.(*exclusivePoolType) 147 | assert.Contains(t, host.exclusivePools, pool) 148 | assert.Equal(t, poolObj.name, poolName) 149 | assert.Equal(t, poolObj.host, host) 150 | assert.Empty(t, poolObj.cpus) 151 | 152 | // already exists 153 | returnedPool, err := host.AddExclusivePool(poolName) 154 | assert.Equal(t, pool, returnedPool) 155 | assert.Error(t, err) 156 | } 157 | 158 | type hostTestsSuite struct { 159 | suite.Suite 160 | } 161 | 162 | func TestHost(t *testing.T) { 163 | suite.Run(t, new(hostTestsSuite)) 164 | } 165 | func (s *hostTestsSuite) TestRemoveExclusivePool() { 166 | // happy path 167 | p1 := new(poolMock) 168 | p1.On("Name").Return("pool1") 169 | p1.On("Remove").Return(nil) 170 | 171 | p2 := new(poolMock) 172 | p2.On("name").Return("pool2") 173 | p2.On("Remove").Return(nil) 174 | host := &hostImpl{ 175 | exclusivePools: []Pool{p1, p2}, 176 | } 177 | s.NoError(host.GetAllExclusivePools().remove(p1)) 178 | s.Assert().NotContains(host.exclusivePools, p1) 179 | s.Assert().Contains(host.exclusivePools, p2) 180 | 181 | // not existing 182 | p3 := new(poolMock) 183 | p3.On("Name").Return("pool3") 184 | p3.On("Remove").Return(nil) 185 | s.Error(new(hostImpl).GetAllExclusivePools().remove(p3)) 186 | } 187 | 188 | func (s *hostTestsSuite) TestHostImpl_SetReservedPoolCores() { 189 | cores := make(CpuList, 4) 190 | topology := new(mockCpuTopology) 191 | host := &hostImpl{topology: topology} 192 | for i := range cores { 193 | m := new(mockCpuCore) 194 | core, err := newCpu(uint(i), m) 195 | s.Nil(err) 196 | 197 | cores[i] = core 198 | } 199 | topology.On("CPUs").Return(&cores) 200 | host.reservedPool = &reservedPoolType{poolImpl{host: host, mutex: &sync.Mutex{}, cpus: make(CpuList, 0)}} 201 | host.sharedPool = &sharedPoolType{poolImpl{PowerProfile: &profileImpl{}, mutex: &sync.Mutex{}, host: host, cpus: cores}} 202 | 203 | for _, core := range cores { 204 | core._setPoolProperty(host.sharedPool) 205 | } 206 | referenceCores := make(CpuList, 4) 207 | copy(referenceCores, cores) 208 | s.Nil(host.GetReservedPool().SetCpus(referenceCores)) 209 | s.ElementsMatch(host.GetReservedPool().Cpus().IDs(), referenceCores.IDs()) 210 | s.Len(host.GetSharedPool().Cpus().IDs(), 0) 211 | 212 | } 213 | 214 | func (s *hostTestsSuite) TestAddSharedPool() { 215 | cores := make(CpuList, 4) 216 | topology := new(mockCpuTopology) 217 | host := &hostImpl{topology: topology} 218 | host.sharedPool = &sharedPoolType{poolImpl{PowerProfile: &profileImpl{}, mutex: &sync.Mutex{}, host: host}} 219 | for i := range cores { 220 | m := new(mockCpuCore) 221 | core, err := newCpu(uint(i), m) 222 | s.Nil(err) 223 | 224 | cores[i] = core 225 | } 226 | topology.On("CPUs").Return(&cores) 227 | 228 | host.reservedPool = &reservedPoolType{poolImpl{host: host, mutex: &sync.Mutex{}, cpus: cores}} 229 | for _, core := range cores { 230 | core._setPoolProperty(host.reservedPool) 231 | } 232 | 233 | referenceCores := make(CpuList, 2) 234 | copy(referenceCores, cores[0:2]) 235 | s.Nil(host.GetSharedPool().SetCpus(referenceCores)) 236 | 237 | s.ElementsMatch(host.sharedPool.Cpus().IDs(), referenceCores.IDs()) 238 | } 239 | 240 | func (s *hostTestsSuite) TestRemoveCoreFromExclusivePool() { 241 | pool := &poolImpl{ 242 | name: "test", 243 | PowerProfile: &profileImpl{}, 244 | mutex: &sync.Mutex{}, 245 | } 246 | cores := make(CpuList, 4) 247 | for i := range cores { 248 | m := new(mockCpuCore) 249 | core, err := newCpu(uint(i), m) 250 | s.Nil(err) 251 | 252 | cores[i] = core 253 | } 254 | pool.cpus = cores 255 | 256 | topology := new(mockCpuTopology) 257 | //topology.On("CPUs").Return(cores) 258 | 259 | host := &hostImpl{ 260 | name: "test_host", 261 | exclusivePools: []Pool{pool}, 262 | topology: topology, 263 | } 264 | pool.host = host 265 | for _, core := range cores { 266 | core._setPoolProperty(host.exclusivePools[0]) 267 | } 268 | 269 | host.sharedPool = &sharedPoolType{poolImpl{PowerProfile: &profileImpl{}, mutex: &sync.Mutex{}, host: host}} 270 | 271 | coresToRemove := make(CpuList, 2) 272 | copy(coresToRemove, cores[0:2]) 273 | coresToPreserve := make(CpuList, 2) 274 | copy(coresToPreserve, cores[2:]) 275 | s.Nil(host.GetSharedPool().MoveCpus(coresToRemove)) 276 | 277 | s.ElementsMatch(host.GetExclusivePool("test").Cpus().IDs(), coresToPreserve.IDs()) 278 | s.ElementsMatch(host.GetSharedPool().Cpus().IDs(), coresToRemove.IDs()) 279 | 280 | } 281 | 282 | func (s *hostTestsSuite) TestAddCoresToExclusivePool() { 283 | topology := new(mockCpuTopology) 284 | host := &hostImpl{ 285 | topology: topology, 286 | } 287 | host.exclusivePools = []Pool{&exclusivePoolType{poolImpl{ 288 | name: "test", 289 | cpus: make([]Cpu, 0), 290 | mutex: &sync.Mutex{}, 291 | PowerProfile: &profileImpl{}, 292 | host: host, 293 | }}} 294 | host.name = "test_node" 295 | cores := make(CpuList, 4) 296 | for i := range cores { 297 | m := new(mockCpuCore) 298 | core, err := newCpu(uint(i), m) 299 | s.Nil(err) 300 | 301 | cores[i] = core 302 | } 303 | topology.On("CPUs").Return(&cores) 304 | host.sharedPool = &sharedPoolType{poolImpl{PowerProfile: &profileImpl{}, mutex: &sync.Mutex{}, host: host, cpus: cores}} 305 | for _, core := range cores { 306 | core._setPoolProperty(host.sharedPool) 307 | } 308 | 309 | var movedCoresIds []uint 310 | for _, core := range cores[:2] { 311 | movedCoresIds = append(movedCoresIds, core.GetID()) 312 | } 313 | s.Nil(host.GetExclusivePool("test").MoveCpuIDs(movedCoresIds)) 314 | unmoved := cores[2:] 315 | s.ElementsMatch(host.GetSharedPool().Cpus().IDs(), unmoved.IDs()) 316 | s.Len(host.GetExclusivePool("test").Cpus().IDs(), 2) 317 | 318 | } 319 | 320 | // // 321 | func (s *hostTestsSuite) TestUpdateProfile() { 322 | //pool := new(poolMock) 323 | profile := &profileImpl{name: "powah", min: 2500, max: 3200} 324 | //pool.On("GetPowerProfile").Return(profile) 325 | //pool.On("SetPowerProfile", mock.Anything).Return(nil) 326 | //pool.On("Name").Return("powah") 327 | host := hostImpl{ 328 | sharedPool: new(poolMock), 329 | featureStates: &FeatureSet{FrequencyScalingFeature: &featureStatus{err: nil}}, 330 | } 331 | origFeatureList := featureList 332 | featureList = map[featureID]*featureStatus{ 333 | FrequencyScalingFeature: { 334 | err: nil, 335 | initFunc: initScalingDriver, 336 | }, 337 | CStatesFeature: { 338 | err: nil, 339 | initFunc: initCStates, 340 | }, 341 | } 342 | defer func() { featureList = origFeatureList }() 343 | pool := &poolImpl{name: "ex", mutex: &sync.Mutex{}, PowerProfile: profile, host: &host} 344 | host.exclusivePools = []Pool{pool} 345 | s.Equal(host.GetExclusivePool("ex").GetPowerProfile().MinFreq(), uint(2500)) 346 | s.Equal(host.GetExclusivePool("ex").GetPowerProfile().MaxFreq(), uint(3200)) 347 | 348 | s.Nil(host.GetExclusivePool("ex").SetPowerProfile(&profileImpl{name: "powah", min: 1200, max: 2500})) 349 | 350 | s.Equal(host.GetExclusivePool("ex").GetPowerProfile().MinFreq(), uint(1200)) 351 | s.Equal(host.GetExclusivePool("ex").GetPowerProfile().MaxFreq(), uint(2500)) 352 | } 353 | 354 | func (s *hostTestsSuite) TestRemoveCoresFromSharedPool() { 355 | topology := new(mockCpuTopology) 356 | host := &hostImpl{topology: topology} 357 | host.exclusivePools = []Pool{&poolImpl{ 358 | name: "test", 359 | cpus: make([]Cpu, 0), 360 | mutex: &sync.Mutex{}, 361 | PowerProfile: &profileImpl{}, 362 | host: host, 363 | }} 364 | host.name = "test_node" 365 | cores := make(CpuList, 4) 366 | for i := range cores { 367 | m := new(mockCpuCore) 368 | core, err := newCpu(uint(i), m) 369 | s.Nil(err) 370 | 371 | cores[i] = core 372 | } 373 | //topology.On("CPUs").Return(cores) 374 | host.sharedPool = &sharedPoolType{poolImpl{PowerProfile: &profileImpl{}, mutex: &sync.Mutex{}, host: host, cpus: cores}} 375 | host.reservedPool = &reservedPoolType{poolImpl{host: host, mutex: &sync.Mutex{}, cpus: make([]Cpu, 0)}} 376 | 377 | for _, core := range cores { 378 | core._setPoolProperty(host.sharedPool) 379 | } 380 | coresCopy := make(CpuList, 4) 381 | copy(coresCopy, cores) 382 | s.Nil(host.GetReservedPool().MoveCpus(coresCopy)) 383 | s.ElementsMatch(host.GetReservedPool().Cpus().IDs(), coresCopy.IDs()) 384 | s.Len(host.GetSharedPool().Cpus().IDs(), 0) 385 | } 386 | 387 | func (s *hostTestsSuite) TestGetExclusivePool() { 388 | node := &hostImpl{ 389 | exclusivePools: []Pool{ 390 | &poolImpl{name: "p0"}, 391 | &poolImpl{name: "p1"}, 392 | &poolImpl{name: "p2"}, 393 | }, 394 | } 395 | s.Equal(node.exclusivePools[1], node.GetExclusivePool("p1")) 396 | s.Nil(node.GetExclusivePool("non existent")) 397 | } 398 | func (s *hostTestsSuite) TestGetSharedPool() { 399 | cores := make(CpuList, 4) 400 | for i := range cores { 401 | m := new(mockCpuCore) 402 | core, err := newCpu(uint(i), m) 403 | s.Nil(err) 404 | 405 | cores[i] = core 406 | } 407 | 408 | node := &hostImpl{ 409 | sharedPool: &sharedPoolType{poolImpl{ 410 | name: sharedPoolName, 411 | cpus: cores, 412 | PowerProfile: &profileImpl{}, 413 | }}, 414 | } 415 | sharedPool := node.GetSharedPool().(*sharedPoolType) 416 | s.ElementsMatch(cores.IDs(), sharedPool.cpus.IDs()) 417 | s.Equal(node.sharedPool.(*sharedPoolType).PowerProfile, sharedPool.PowerProfile) 418 | } 419 | func (s *hostTestsSuite) TestGetReservedPool() { 420 | cores := make(CpuList, 4) 421 | for i := range cores { 422 | m := new(mockCpuCore) 423 | core, err := newCpu(uint(i), m) 424 | s.Nil(err) 425 | cores[i] = core 426 | } 427 | poolImp := &poolImpl{ 428 | name: reservedPoolName, 429 | cpus: cores, 430 | PowerProfile: &profileImpl{}, 431 | } 432 | node := &hostImpl{ 433 | reservedPool: poolImp, 434 | } 435 | reservedPool := node.GetReservedPool() 436 | s.ElementsMatch(cores.IDs(), reservedPool.Cpus().IDs()) 437 | s.Equal(reservedPool.GetPowerProfile(), poolImp.PowerProfile) 438 | } 439 | func (s *hostTestsSuite) TestDeleteProfile() { 440 | allCores := make(CpuList, 12) 441 | sharedCores := make(CpuList, 4) 442 | for i := 0; i < 4; i++ { 443 | m := new(mockCpuCore) 444 | core, err := newCpu(uint(i), m) 445 | s.Nil(err) 446 | allCores[i] = core 447 | sharedCores[i] = core 448 | } 449 | sharedCoresCopy := make(CpuList, len(sharedCores)) 450 | copy(sharedCoresCopy, sharedCores) 451 | 452 | p1cores := make(CpuList, 4) 453 | for i := 4; i < 8; i++ { 454 | m := new(mockCpuCore) 455 | core, err := newCpu(uint(i), m) 456 | s.Nil(err) 457 | allCores[i] = core 458 | p1cores[i-4] = core 459 | } 460 | p1copy := make([]Cpu, len(p1cores)) 461 | copy(p1copy, p1cores) 462 | 463 | p2cores := make(CpuList, 4) 464 | for i := 8; i < 12; i++ { 465 | m := new(mockCpuCore) 466 | core, err := newCpu(uint(i), m) 467 | s.Nil(err) 468 | allCores[i] = core 469 | p2cores[i-8] = core 470 | } 471 | p2copy := make(CpuList, len(p2cores)) 472 | copy(p2copy, p2cores) 473 | 474 | host := &hostImpl{} 475 | exclusive := []Pool{ 476 | &exclusivePoolType{poolImpl{ 477 | name: "pool1", 478 | cpus: p1cores, 479 | mutex: &sync.Mutex{}, 480 | PowerProfile: &profileImpl{name: "profile1"}, 481 | host: host, 482 | }}, 483 | &exclusivePoolType{poolImpl{ 484 | name: "pool2", 485 | cpus: p2cores, 486 | mutex: &sync.Mutex{}, 487 | PowerProfile: &profileImpl{name: "profile2"}, 488 | host: host, 489 | }}, 490 | } 491 | shared := &sharedPoolType{poolImpl{ 492 | name: sharedPoolName, 493 | cpus: sharedCores, 494 | mutex: &sync.Mutex{}, 495 | PowerProfile: &profileImpl{name: sharedPoolName}, 496 | host: host, 497 | }} 498 | host.exclusivePools = exclusive 499 | host.sharedPool = shared 500 | host.reservedPool = &reservedPoolType{poolImpl{host: host}} 501 | topology := new(mockCpuTopology) 502 | topology.On("CPUs").Return(&allCores) 503 | host.topology = topology 504 | for i := 0; i < 4; i++ { 505 | sharedCores[i]._setPoolProperty(host.sharedPool) 506 | p1cores[i]._setPoolProperty(host.exclusivePools[0]) 507 | p2cores[i]._setPoolProperty(host.exclusivePools[1]) 508 | } 509 | s.NoError(host.GetExclusivePool("pool1").Remove()) 510 | s.Len(host.exclusivePools, 1) 511 | s.Equal("profile2", host.exclusivePools[0].(*exclusivePoolType).PowerProfile.(*profileImpl).name) 512 | s.ElementsMatch(host.exclusivePools[0].(*exclusivePoolType).cpus, p2copy) 513 | newShared := append(sharedCoresCopy, p1copy...) 514 | s.ElementsMatch(host.GetSharedPool().Cpus().IDs(), newShared.IDs()) 515 | 516 | } 517 | -------------------------------------------------------------------------------- /pkg/power/integration_test.go: -------------------------------------------------------------------------------- 1 | // this file contains integration tests pof the power library 2 | package power 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | "maps" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | // this test checks for potential race condition where one go routine moves cpus to a pool and another changes a power 15 | // profile of the target pool 16 | func TestConcurrentMoveCpusSetProfile(t *testing.T) { 17 | typecopy := CpuTypeReferences 18 | const count = 5 19 | for i := 0; i < count; i++ { 20 | doConcurrentMoveCPUSetProfile(t) 21 | } 22 | // reset feature list 23 | for _, status := range featureList { 24 | status.err = uninitialisedErr 25 | } 26 | CpuTypeReferences = typecopy 27 | 28 | } 29 | 30 | func doConcurrentMoveCPUSetProfile(t *testing.T) { 31 | const numCpus = 88 32 | emin := "11100" 33 | emax := "5550000" 34 | cpuConfig := map[string]string{ 35 | "min": "11100", 36 | "max": "9990000", 37 | "driver": "intel_pstate", 38 | "available_governors": "performance", 39 | "epp": "performance", 40 | } 41 | 42 | ecoreConfig := map[string]string{} 43 | maps.Copy(ecoreConfig, cpuConfig) 44 | ecoreConfig["min"] = emin 45 | ecoreConfig["max"] = emax 46 | 47 | cpuConfigAll := map[string]map[string]string{} 48 | 49 | cpuTopologyMap := map[string]map[string]string{} 50 | for i := 0; i < numCpus; i++ { 51 | // set e cores 52 | if i > numCpus/2 { 53 | cpuConfigAll[fmt.Sprint("cpu", i)] = ecoreConfig 54 | } else { 55 | // set p cores 56 | cpuConfigAll[fmt.Sprint("cpu", i)] = cpuConfig 57 | } 58 | // for this test we don't care about topology, so we just emulate 1 pkg, 1 die, numCpus cores, no hyperthreading 59 | cpuTopologyMap[fmt.Sprint("cpu", i)] = map[string]string{ 60 | "pkg": "0", 61 | "die": "0", 62 | "core": fmt.Sprint(i), 63 | } 64 | } 65 | defer setupCpuCStatesTests(map[string]map[string]map[string]string{})() 66 | defer setupUncoreTests(map[string]map[string]string{}, "")() 67 | defer setupCpuScalingTests(cpuConfigAll)() 68 | defer setupTopologyTest(cpuTopologyMap)() 69 | 70 | instance, err := CreateInstance("host") 71 | 72 | assert.ErrorContainsf(t, err, "failed to determine driver", "expecting c-states feature error") 73 | assert.ErrorContainsf(t, err, "intel_uncore_frequency not loaded", "expecting uncore feature error") 74 | assert.NotNil(t, instance) 75 | 76 | assert.Len(t, *instance.GetAllCpus(), numCpus) 77 | assert.ElementsMatch(t, *instance.GetReservedPool().Cpus(), *instance.GetAllCpus()) 78 | assert.Empty(t, *instance.GetSharedPool().Cpus()) 79 | 80 | profile, err := NewEcorePowerProfile("pwr", 100, 1000, 100, 500, "performance", "performance") 81 | assert.NoError(t, err) 82 | 83 | moveCoresErrChan := make(chan error) 84 | setPowerProfileErrChan2 := make(chan error) 85 | 86 | go func(instance Host, errChannel chan error) { 87 | errChannel <- instance.GetSharedPool().MoveCpus(*instance.GetAllCpus()) 88 | }(instance, moveCoresErrChan) 89 | 90 | go func(instance Host, profile Profile, errChannel chan error) { 91 | time.Sleep(5 * time.Millisecond) 92 | errChannel <- instance.GetSharedPool().SetPowerProfile(profile) 93 | }(instance, profile, setPowerProfileErrChan2) 94 | 95 | assert.NoError(t, <-moveCoresErrChan) 96 | close(moveCoresErrChan) 97 | 98 | assert.NoError(t, <-setPowerProfileErrChan2) 99 | close(setPowerProfileErrChan2) 100 | 101 | assert.Equal(t, profile, instance.GetSharedPool().GetPowerProfile()) 102 | assert.ElementsMatch(t, *instance.GetAllCpus(), *instance.GetSharedPool().Cpus()) 103 | for i := uint(0); i < numCpus; i++ { 104 | assert.NoError(t, verifyPowerProfile(i, profile), "cpuid", i) 105 | } 106 | } 107 | 108 | // verifies that the cpu is configured correctly 109 | // checking is done relative to basePath 110 | func verifyPowerProfile(cpuId uint, profile Profile) error { 111 | var allerrs []error 112 | var err error 113 | 114 | governor, err := readCpuStringProperty(cpuId, scalingGovFile) 115 | allerrs = append(allerrs, err) 116 | if governor != profile.Governor() { 117 | allerrs = append(allerrs, fmt.Errorf("governor mismatch expected : %s, current %s", profile.Governor(), governor)) 118 | } 119 | 120 | if profile.Epp() != "" { 121 | epp, err := readCpuStringProperty(cpuId, eppFile) 122 | allerrs = append(allerrs, err) 123 | if governor != profile.Epp() { 124 | allerrs = append(allerrs, fmt.Errorf("epp mismatch expected : %s, current %s", profile.Epp(), epp)) 125 | } 126 | } 127 | 128 | maxFreq, err := readCpuUintProperty(cpuId, scalingMaxFile) 129 | allerrs = append(allerrs, err) 130 | if maxFreq != profile.MaxFreq() && maxFreq != profile.EfficientMaxFreq() { 131 | allerrs = append(allerrs, fmt.Errorf("maxFreq mismatch expected %d, current %d", profile.MaxFreq(), maxFreq)) 132 | } 133 | minFreq, err := readCpuUintProperty(cpuId, scalingMinFile) 134 | allerrs = append(allerrs, err) 135 | if minFreq != profile.MinFreq() && minFreq != profile.EfficientMinFreq() { 136 | allerrs = append(allerrs, fmt.Errorf("minFreq mismatch expected %d, current %d", profile.MinFreq(), minFreq)) 137 | } 138 | return errors.Join(allerrs...) 139 | } 140 | -------------------------------------------------------------------------------- /pkg/power/pool.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | type poolImpl struct { 9 | name string 10 | cpus CpuList 11 | mutex sync.Locker 12 | host Host 13 | // Scaling-Driver 14 | PowerProfile Profile 15 | // C-States 16 | CStatesProfile *CStates 17 | } 18 | 19 | type Pool interface { 20 | Name() string 21 | Cpus() *CpuList 22 | 23 | SetCpuIDs(cpuIDs []uint) error 24 | SetCpus(requestedCpus CpuList) error 25 | 26 | Remove() error 27 | 28 | Clear() error 29 | MoveCpus(cpus CpuList) error 30 | MoveCpuIDs(cpuIDs []uint) error 31 | 32 | SetPowerProfile(profile Profile) error 33 | GetPowerProfile() Profile 34 | 35 | poolMutex() sync.Locker 36 | 37 | // c-states 38 | SetCStates(states CStates) error 39 | getCStates() *CStates 40 | // private interface members 41 | getHost() Host 42 | isExclusive() bool 43 | } 44 | 45 | func (pool *poolImpl) Name() string { 46 | return pool.name 47 | } 48 | 49 | func (pool *poolImpl) Cpus() *CpuList { 50 | return &pool.cpus 51 | } 52 | 53 | func (pool *poolImpl) SetCpuIDs([]uint) error { 54 | panic("virtual") 55 | } // virtual 56 | 57 | func (pool *poolImpl) SetCpus(CpuList) error { 58 | // virtual function to be overwritten by exclusivePoolType, sharedPoolType and ReservedPoolType 59 | panic("scuffed") 60 | } //virtual 61 | 62 | func (pool *poolImpl) MoveCpus(cpus CpuList) error { 63 | panic("virtual") 64 | } 65 | 66 | func (pool *poolImpl) MoveCpuIDs(cpuIDs []uint) error { 67 | panic("virtual") 68 | } 69 | 70 | func (pool *poolImpl) Remove() error { 71 | panic("'virtual' function") 72 | } // virtual 73 | 74 | func (pool *poolImpl) Clear() error { 75 | panic("scuffed") 76 | } // virtual 77 | 78 | func (pool *poolImpl) poolMutex() sync.Locker { 79 | return pool.mutex 80 | } 81 | 82 | func (pool *poolImpl) SetPowerProfile(profile Profile) error { 83 | log.V(4).Info("SetPowerProfile mutex lock", "pool", pool.name) 84 | pool.mutex.Lock() 85 | pool.PowerProfile = profile 86 | defer func() { 87 | pool.mutex.Unlock() 88 | log.V(4).Info("SetPowerProfile mutex unlock", "pool", pool.name) 89 | }() 90 | for _, cpu := range pool.cpus { 91 | err := cpu.consolidate() 92 | if err != nil { 93 | return err 94 | } 95 | } 96 | return nil 97 | } 98 | 99 | func (pool *poolImpl) GetPowerProfile() Profile { 100 | return pool.PowerProfile 101 | } 102 | 103 | func (pool *poolImpl) getHost() Host { 104 | return pool.host 105 | } 106 | 107 | func (pool *poolImpl) isExclusive() bool { 108 | return false 109 | } 110 | 111 | type sharedPoolType struct { 112 | poolImpl 113 | } 114 | 115 | func (sharedPool *sharedPoolType) MoveCpuIDs(cpuIDs []uint) error { 116 | cpus, err := sharedPool.host.GetAllCpus().ManyByIDs(cpuIDs) 117 | if err != nil { 118 | return err 119 | } 120 | return sharedPool.MoveCpus(cpus) 121 | } 122 | func (sharedPool *sharedPoolType) MoveCpus(cpus CpuList) error { 123 | for _, cpu := range cpus { 124 | if err := cpu.SetPool(sharedPool); err != nil { 125 | return err 126 | } 127 | } 128 | return nil 129 | } 130 | func (sharedPool *sharedPoolType) SetCpuIDs(cpuIDs []uint) error { 131 | cores, err := sharedPool.host.GetAllCpus().ManyByIDs(cpuIDs) 132 | if err != nil { 133 | return fmt.Errorf("cpuCore out of range: %w", err) 134 | } 135 | return sharedPool.SetCpus(cores) 136 | } 137 | 138 | // SetCpus on shared pool with place all desired cpus in shared pool 139 | // undesired cpus that were in the shared pool will be placed in the reserved pool 140 | func (sharedPool *sharedPoolType) SetCpus(requestedCores CpuList) error { 141 | for _, cpu := range *sharedPool.host.GetAllCpus() { 142 | if requestedCores.Contains(cpu) { 143 | err := cpu.SetPool(sharedPool) 144 | if err != nil { 145 | return err 146 | } 147 | } else { 148 | if cpu.getPool() == sharedPool { // move cpus we don't want if the shared pool to reserved, don't touch any exclusive 149 | err := cpu.SetPool(sharedPool.host.GetReservedPool()) 150 | if err != nil { 151 | return err 152 | } 153 | } 154 | } 155 | } 156 | return nil 157 | } 158 | 159 | func (sharedPool *sharedPoolType) Clear() error { 160 | return sharedPool.SetCpus(CpuList{}) 161 | } 162 | func (sharedPool *sharedPoolType) Remove() error { 163 | return fmt.Errorf("shared pool canot be removed") 164 | } 165 | 166 | type reservedPoolType struct { 167 | poolImpl 168 | } 169 | 170 | func (reservedPool *reservedPoolType) MoveCpuIDs(cpuIDs []uint) error { 171 | cpus, err := reservedPool.host.GetAllCpus().ManyByIDs(cpuIDs) 172 | if err != nil { 173 | return err 174 | } 175 | return reservedPool.MoveCpus(cpus) 176 | } 177 | func (reservedPool *reservedPoolType) MoveCpus(cpus CpuList) error { 178 | for _, cpu := range cpus { 179 | if err := cpu.SetPool(reservedPool); err != nil { 180 | return err 181 | } 182 | } 183 | return nil 184 | } 185 | func (reservedPool *reservedPoolType) SetCpuIDs(cpuIDs []uint) error { 186 | cpus, err := reservedPool.host.GetAllCpus().ManyByIDs(cpuIDs) 187 | if err != nil { 188 | return fmt.Errorf("cpuCore out of range: %w", err) 189 | } 190 | return reservedPool.SetCpus(cpus) 191 | } 192 | func (reservedPool *reservedPoolType) SetPowerProfile(Profile) error { 193 | return fmt.Errorf("cannot set power profile for reserved pool") 194 | } 195 | 196 | func (reservedPool *reservedPoolType) SetCpus(cores CpuList) error { 197 | /* 198 | case 1: cpu in any exclusive pool, not passed matching IDs -> untouched 199 | case 2: cpu in any exclusive pool, matching passed IDs -> error 200 | 201 | case 3: cpu in shared pool, not matching IDs passed -> untouched 202 | case 4: cpu in shared pool, IDs match passed -> move to reserved 203 | 204 | case 5: cpu in reserved pool, not matching IDs passed -> move to shared 205 | case 6: cpu in reserved pool, IDs match passed -> untouched 206 | */ 207 | 208 | sharedPool := reservedPool.host.GetSharedPool() 209 | 210 | for _, cpu := range *reservedPool.host.GetAllCpus() { 211 | if cores.Contains(cpu) { // case 2,4, 6 212 | if cpu.getPool().isExclusive() { // case 2 213 | return fmt.Errorf("cpus cannot be moved directly from exclusive to reserved pool") 214 | } 215 | err := cpu.SetPool(reservedPool) // case 4 216 | if err != nil { 217 | return err 218 | } 219 | } else { // case 1,3,5 220 | if cpu.getPool() == reservedPool { // case 5 221 | err := cpu.SetPool(sharedPool) 222 | if err != nil { 223 | return err 224 | } 225 | } 226 | continue // 1,3 do nothing 227 | } 228 | } 229 | return nil 230 | } 231 | 232 | func (reservedPool *reservedPoolType) Remove() error { 233 | return fmt.Errorf("reserved Pool cannot be removed") 234 | } 235 | 236 | func (reservedPool *reservedPoolType) Clear() error { 237 | return reservedPool.SetCpus(CpuList{}) 238 | } 239 | 240 | type exclusivePoolType struct { 241 | poolImpl 242 | } 243 | 244 | func (pool *exclusivePoolType) MoveCpuIDs(cpuIDs []uint) error { 245 | cpus, err := pool.host.GetAllCpus().ManyByIDs(cpuIDs) 246 | if err != nil { 247 | return err 248 | } 249 | return pool.MoveCpus(cpus) 250 | } 251 | func (pool *exclusivePoolType) MoveCpus(cpus CpuList) error { 252 | for _, cpu := range cpus { 253 | if err := cpu.SetPool(pool); err != nil { 254 | return err 255 | } 256 | } 257 | return nil 258 | } 259 | func (pool *exclusivePoolType) SetCpuIDs(cpuIDs []uint) error { 260 | cpus, err := pool.host.GetAllCpus().ManyByIDs(cpuIDs) 261 | if err != nil { 262 | return fmt.Errorf("cpuCore out of range: %w", err) 263 | } 264 | return pool.SetCpus(cpus) 265 | } 266 | 267 | func (pool *exclusivePoolType) SetCpus(requestedCores CpuList) error { 268 | for _, cpu := range *pool.host.GetAllCpus() { 269 | if requestedCores.Contains(cpu) { 270 | err := cpu.SetPool(pool) 271 | if err != nil { 272 | return err 273 | } 274 | } else { 275 | if cpu.getPool() != pool { 276 | continue 277 | } 278 | err := cpu.SetPool(pool.host.GetSharedPool()) 279 | if err != nil { 280 | return err 281 | } 282 | } 283 | } 284 | return nil 285 | } 286 | 287 | func (pool *exclusivePoolType) Clear() error { 288 | return pool.SetCpus(CpuList{}) 289 | } 290 | 291 | func (pool *exclusivePoolType) Remove() error { 292 | if err := pool.Clear(); err != nil { 293 | return err 294 | } 295 | if err := pool.host.GetAllExclusivePools().remove(pool); err != nil { 296 | return err 297 | } 298 | // improvement: mark current pool as invalid 299 | // *pool = nil 300 | return nil 301 | } 302 | 303 | func (pool *exclusivePoolType) isExclusive() bool { 304 | return true 305 | } 306 | 307 | type PoolList []Pool 308 | 309 | func (pools *PoolList) IndexOf(pool Pool) int { 310 | for i, p := range *pools { 311 | if p == pool { 312 | return i 313 | } 314 | } 315 | return -1 316 | } 317 | 318 | func (pools *PoolList) IndexOfName(name string) int { 319 | for i, p := range *pools { 320 | if p.Name() == name { 321 | return i 322 | } 323 | } 324 | return -1 325 | } 326 | 327 | func (pools *PoolList) Contains(pool Pool) bool { 328 | if pools.IndexOf(pool) < 0 { 329 | return false 330 | } else { 331 | return true 332 | } 333 | } 334 | 335 | func (pools *PoolList) remove(pool Pool) error { 336 | index := pools.IndexOf(pool) 337 | if index < 0 { 338 | return fmt.Errorf("pool %s not in on host", pool.Name()) 339 | } 340 | size := len(*pools) - 1 341 | (*pools)[index] = (*pools)[size] 342 | *pools = (*pools)[:size] 343 | return nil 344 | } 345 | 346 | func (pools *PoolList) add(pool Pool) { 347 | *pools = append(*pools, pool) 348 | } 349 | 350 | func (pools *PoolList) ByName(name string) Pool { 351 | index := pools.IndexOfName(name) 352 | if index < 0 { 353 | return nil 354 | } 355 | return (*pools)[index] 356 | } 357 | -------------------------------------------------------------------------------- /pkg/power/pool_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/mock" 10 | ) 11 | 12 | type poolMock struct { 13 | mock.Mock 14 | } 15 | 16 | func (m *poolMock) poolMutex() sync.Locker { 17 | return m.Called().Get(0).(sync.Locker) 18 | } 19 | 20 | func (m *poolMock) SetCStates(states CStates) error { 21 | return m.Called(states).Error(0) 22 | } 23 | 24 | func (m *poolMock) getCStates() *CStates { 25 | args := m.Called().Get(0) 26 | if args == nil { 27 | return nil 28 | } 29 | return args.(*CStates) 30 | } 31 | 32 | func (m *poolMock) isExclusive() bool { 33 | return m.Called().Bool(0) 34 | } 35 | 36 | func (m *poolMock) Clear() error { 37 | return m.Called().Error(0) 38 | } 39 | 40 | func (m *poolMock) Name() string { 41 | return m.Called().String(0) 42 | } 43 | 44 | func (m *poolMock) Cpus() *CpuList { 45 | args := m.Called().Get(0) 46 | if args == nil { 47 | return nil 48 | } 49 | return args.(*CpuList) 50 | } 51 | 52 | func (m *poolMock) SetCpus(cores CpuList) error { 53 | return m.Called(cores).Error(0) 54 | } 55 | 56 | func (m *poolMock) SetCpuIDs(cpuIDs []uint) error { 57 | return m.Called(cpuIDs).Error(0) 58 | } 59 | 60 | func (m *poolMock) Remove() error { 61 | return m.Called().Error(0) 62 | } 63 | 64 | func (m *poolMock) MoveCpuIDs(coreIDs []uint) error { 65 | return m.Called(coreIDs).Error(0) 66 | } 67 | 68 | func (m *poolMock) MoveCpus(cores CpuList) error { 69 | return m.Called(cores).Error(0) 70 | } 71 | 72 | func (m *poolMock) getHost() Host { 73 | args := m.Called().Get(0) 74 | if args == nil { 75 | return nil 76 | } 77 | return args.(Host) 78 | } 79 | 80 | func (m *poolMock) SetPowerProfile(profile Profile) error { 81 | args := m.Called(profile) 82 | return args.Error(0) 83 | } 84 | 85 | func (m *poolMock) GetPowerProfile() Profile { 86 | args := m.Called().Get(0) 87 | if args == nil { 88 | return nil 89 | } 90 | return args.(Profile) 91 | } 92 | 93 | func TestPoolList(t *testing.T) { 94 | p1 := new(poolMock) 95 | p1.On("Name").Return("pool1") 96 | p2 := new(poolMock) 97 | p2.On("Name").Return("pool2") 98 | 99 | var pools PoolList = []Pool{p1, p2} 100 | // IndexOf 101 | assert.Equal(t, 1, pools.IndexOf(p2)) 102 | assert.Equal(t, -1, pools.IndexOf(&poolImpl{})) 103 | // IndexOfName 104 | assert.Equal(t, 1, pools.IndexOfName("pool2")) 105 | assert.Equal(t, -1, pools.IndexOfName("not existing")) 106 | // Contains 107 | assert.True(t, pools.Contains(p1)) 108 | assert.False(t, pools.Contains(&exclusivePoolType{})) 109 | // add 110 | newPool := &poolImpl{ 111 | name: "new", 112 | } 113 | pools.add(newPool) 114 | assert.Contains(t, pools, newPool) 115 | // remove 116 | assert.NoError(t, pools.remove(p2)) 117 | assert.NotContains(t, pools, p2) 118 | assert.Error(t, pools.remove(new(poolImpl))) 119 | // get by name 120 | assert.Equal(t, newPool, pools.ByName("new")) 121 | assert.Nil(t, pools.ByName("not exising")) 122 | } 123 | func TestPoolImpl_MoveCoresIDs(t *testing.T) { 124 | assert.PanicsWithValue(t, "virtual", func() { 125 | pool := poolImpl{ 126 | host: &hostImpl{}, 127 | } 128 | _ = pool.MoveCpuIDs([]uint{}) 129 | }) 130 | } 131 | 132 | func TestPoolImpl_MoveCores(t *testing.T) { 133 | assert.PanicsWithValue(t, "virtual", func() { 134 | pool := poolImpl{} 135 | _ = pool.MoveCpus(CpuList{}) 136 | }) 137 | } 138 | func TestExclusivePoolType_MoveCpuIDs(t *testing.T) { 139 | host := new(hostMock) 140 | host.On("GetAllCpus").Return(new(CpuList)) 141 | pool := &exclusivePoolType{poolImpl{ 142 | host: host, 143 | }, 144 | } 145 | assert.NoError(t, pool.MoveCpuIDs([]uint{})) 146 | assert.ErrorContains(t, pool.MoveCpuIDs([]uint{2}), "not in list") 147 | } 148 | 149 | func TestExclusivePoolType_MoveCpus(t *testing.T) { 150 | // happy path 151 | mockCore := new(cpuMock) 152 | mockCore2 := new(cpuMock) 153 | p := new(exclusivePoolType) 154 | mockCore.On("SetPool", p).Return(nil) 155 | mockCore2.On("SetPool", p).Return(nil) 156 | 157 | assert.NoError(t, p.MoveCpus(CpuList{mockCore, mockCore2})) 158 | 159 | mockCore.AssertExpectations(t) 160 | mockCore2.AssertExpectations(t) 161 | 162 | //failed to set 163 | setPoolErr := fmt.Errorf("") 164 | mockCore = new(cpuMock) 165 | mockCore.On("SetPool", p).Return(setPoolErr) 166 | 167 | assert.ErrorIs(t, p.MoveCpus(CpuList{mockCore}), setPoolErr) 168 | mockCore.AssertExpectations(t) 169 | } 170 | func TestSharedPoolType_MoveCpuIDs(t *testing.T) { 171 | host := new(hostMock) 172 | host.On("GetAllCpus").Return(new(CpuList)) 173 | pool := &sharedPoolType{poolImpl{ 174 | host: host, 175 | }, 176 | } 177 | assert.NoError(t, pool.MoveCpuIDs([]uint{})) 178 | assert.ErrorContains(t, pool.MoveCpuIDs([]uint{2}), "not in list") 179 | } 180 | 181 | func TestSharedPoolType_MoveCpus(t *testing.T) { 182 | // happy path 183 | mockCore := new(cpuMock) 184 | mockCore2 := new(cpuMock) 185 | p := new(sharedPoolType) 186 | mockCore.On("SetPool", p).Return(nil) 187 | mockCore2.On("SetPool", p).Return(nil) 188 | 189 | assert.NoError(t, p.MoveCpus(CpuList{mockCore, mockCore2})) 190 | 191 | mockCore.AssertExpectations(t) 192 | mockCore2.AssertExpectations(t) 193 | 194 | //failed to set 195 | setPoolErr := fmt.Errorf("") 196 | mockCore = new(cpuMock) 197 | mockCore.On("SetPool", p).Return(setPoolErr) 198 | 199 | assert.ErrorIs(t, p.MoveCpus(CpuList{mockCore}), setPoolErr) 200 | mockCore.AssertExpectations(t) 201 | } 202 | func TestReservedPoolType_MoveCpuIDs(t *testing.T) { 203 | host := new(hostMock) 204 | host.On("GetAllCpus").Return(new(CpuList)) 205 | pool := &reservedPoolType{poolImpl{ 206 | host: host, 207 | }, 208 | } 209 | assert.NoError(t, pool.MoveCpuIDs([]uint{})) 210 | assert.ErrorContains(t, pool.MoveCpuIDs([]uint{2}), "not in list") 211 | } 212 | 213 | func TestReservedPoolType_MoveCpus(t *testing.T) { 214 | // happy path 215 | mockCore := new(cpuMock) 216 | mockCore2 := new(cpuMock) 217 | p := new(reservedPoolType) 218 | mockCore.On("SetPool", p).Return(nil) 219 | mockCore2.On("SetPool", p).Return(nil) 220 | 221 | assert.NoError(t, p.MoveCpus(CpuList{mockCore, mockCore2})) 222 | 223 | mockCore.AssertExpectations(t) 224 | mockCore2.AssertExpectations(t) 225 | 226 | //failed to set 227 | setPoolErr := fmt.Errorf("") 228 | mockCore = new(cpuMock) 229 | mockCore.On("SetPool", p).Return(setPoolErr) 230 | 231 | assert.ErrorIs(t, p.MoveCpus(CpuList{mockCore}), setPoolErr) 232 | mockCore.AssertExpectations(t) 233 | } 234 | 235 | func TestPoolImpl_Getters(t *testing.T) { 236 | name := "pool" 237 | cores := CpuList{} 238 | powerProfile := new(profileImpl) 239 | host := new(hostMock) 240 | pool := poolImpl{ 241 | name: name, 242 | cpus: cores, 243 | PowerProfile: powerProfile, 244 | host: host, 245 | } 246 | assert.Equal(t, name, pool.Name()) 247 | assert.Equal(t, &cores, pool.Cpus()) 248 | assert.Equal(t, powerProfile, pool.GetPowerProfile()) 249 | } 250 | 251 | func TestPoolImpl_SetCoreIDs(t *testing.T) { 252 | assert.Panics(t, func() { 253 | pool := poolImpl{} 254 | _ = pool.SetCpuIDs([]uint{}) 255 | }) 256 | } 257 | func TestSharedPoolType_SetCoreIDs(t *testing.T) { 258 | host := new(hostMock) 259 | host.On("GetAllCpus").Return(new(CpuList)) 260 | 261 | pool := &sharedPoolType{poolImpl{host: host}} 262 | assert.NoError(t, pool.SetCpuIDs([]uint{})) 263 | } 264 | func TestReservedPoolType_SetCoreIDs(t *testing.T) { 265 | host := new(hostMock) 266 | host.On("GetAllCpus").Return(new(CpuList)) 267 | host.On("GetSharedPool").Return(new(poolMock)) 268 | 269 | pool := &reservedPoolType{poolImpl{host: host}} 270 | assert.NoError(t, pool.SetCpuIDs([]uint{})) 271 | } 272 | 273 | func TestExclusivePoolType_SetCoreIDs(t *testing.T) { 274 | host := new(hostMock) 275 | host.On("GetAllCpus").Return(new(CpuList)) 276 | 277 | pool := &exclusivePoolType{poolImpl{host: host}} 278 | assert.NoError(t, pool.SetCpuIDs([]uint{})) 279 | } 280 | 281 | func TestPoolImpl_SetCores(t *testing.T) { 282 | // base struct pool should always panic 283 | basePool := &poolImpl{} 284 | assert.Panics(t, func() { 285 | _ = basePool.SetCpus(CpuList{}) 286 | }) 287 | } 288 | 289 | func TestSharedPoolType_SetCores(t *testing.T) { 290 | reservedPool := new(poolMock) 291 | host := new(hostMock) 292 | 293 | sharedPool := &sharedPoolType{poolImpl{ 294 | host: host, 295 | }} 296 | 297 | allCores := make(CpuList, 8) 298 | for i := range allCores { 299 | core := new(cpuMock) 300 | if i >= 2 && i < 5 { 301 | core.On("SetPool", sharedPool).Return(nil) 302 | } else { 303 | core.On("SetPool", reservedPool).Return(nil) 304 | core.On("getPool").Return(sharedPool) 305 | } 306 | allCores[i] = core 307 | } 308 | 309 | host.On("GetAllCpus").Return(&allCores) 310 | host.On("GetReservedPool").Return(reservedPool) 311 | 312 | assert.NoError(t, sharedPool.SetCpus(allCores[2:5])) 313 | for _, core := range allCores { 314 | core.(*cpuMock).AssertExpectations(t) 315 | } 316 | // setPool error 317 | err := fmt.Errorf("borked") 318 | allCores[0] = new(cpuMock) 319 | allCores[0].(*cpuMock).On("SetPool", mock.Anything).Return(err) 320 | assert.ErrorIs(t, sharedPool.SetCpus(allCores), err) 321 | 322 | } 323 | 324 | func TestReservedPoolType_SetCores(t *testing.T) { 325 | sharedPool := new(poolMock) 326 | sharedPool.On("isExclusive").Return(false) 327 | 328 | exclusivePool := new(poolMock) 329 | exclusivePool.On("isExclusive").Return(true) 330 | 331 | host := new(hostMock) 332 | allCores := CpuList{} 333 | host.On("GetAllCpus").Return(&allCores) 334 | host.On("GetSharedPool").Return(sharedPool) 335 | 336 | requestedSetCores := CpuList{} 337 | reservedPool := &reservedPoolType{poolImpl{host: host}} 338 | for i := 1; i <= 6; i++ { 339 | core := new(cpuMock) 340 | switch i { 341 | case 1: 342 | core.On("getPool").Return(exclusivePool) 343 | case 2: 344 | core.On("getPool").Return(nil) 345 | // will test when testing errors, we don't want to return prematurely 346 | case 3: 347 | core.On("getPool").Return(sharedPool) 348 | case 4: 349 | core.On("getPool").Return(sharedPool) 350 | requestedSetCores.add(core) 351 | core.On("SetPool", reservedPool).Return(nil) 352 | case 5: 353 | core.On("getPool").Return(reservedPool) 354 | core.On("SetPool", sharedPool).Return(nil) 355 | case 6: 356 | core.On("getPool").Return(reservedPool) 357 | requestedSetCores.add(core) 358 | core.On("SetPool", reservedPool).Return(nil) 359 | } 360 | allCores.add(core) 361 | } 362 | 363 | assert.NoError(t, reservedPool.SetCpus(requestedSetCores)) 364 | for _, core := range allCores { 365 | core.(*cpuMock).AssertExpectations(t) 366 | } 367 | // now test case 2 where we expect error 368 | allCores[0] = new(cpuMock) 369 | allCores[0].(*cpuMock).On("getPool").Return(exclusivePool) 370 | 371 | assert.ErrorContains(t, reservedPool.SetCpus(CpuList{allCores[0]}), "exclusive to reserved") 372 | } 373 | 374 | func TestExclusivePoolType_SetCores(t *testing.T) { 375 | sharedPool := new(poolMock) 376 | host := new(hostMock) 377 | 378 | exclusivePool := &exclusivePoolType{poolImpl{ 379 | host: host, 380 | }} 381 | 382 | allCores := make(CpuList, 3) 383 | for i := range allCores { 384 | core := new(cpuMock) 385 | switch i { 386 | case 0: 387 | core.On("getPool").Return(exclusivePool) 388 | core.On("SetPool", sharedPool).Return(nil) 389 | case 1: 390 | core.On("getPool").Return(sharedPool) 391 | case 2: 392 | core.On("SetPool", exclusivePool).Return(nil) 393 | } 394 | 395 | allCores[i] = core 396 | } 397 | 398 | host.On("GetAllCpus").Return(&allCores) 399 | host.On("GetSharedPool").Return(sharedPool) 400 | // exclusive pool 401 | assert.NoError(t, exclusivePool.SetCpus(CpuList{allCores[2]})) 402 | for _, core := range allCores { 403 | core.(*cpuMock).AssertExpectations(t) 404 | } 405 | // setPool error 406 | err := fmt.Errorf("borked") 407 | allCores[0] = new(cpuMock) 408 | allCores[0].(*cpuMock).On("SetPool", mock.Anything).Return(err) 409 | assert.ErrorIs(t, exclusivePool.SetCpus(CpuList{allCores[0]}), err) 410 | } 411 | 412 | func TestPoolImpl_SetPowerProfile(t *testing.T) { 413 | cores := make(CpuList, 2) 414 | for i := range cores { 415 | core := new(cpuMock) 416 | core.On("consolidate").Return(nil) 417 | cores[i] = core 418 | } 419 | 420 | poolMutex := new(mutexMock) 421 | poolMutex.On("Unlock").Return().NotBefore( 422 | poolMutex.On("Lock").Return(), 423 | ) 424 | pool := &poolImpl{cpus: cores, mutex: poolMutex} 425 | powerProfile := new(profileImpl) 426 | assert.NoError(t, pool.SetPowerProfile(powerProfile)) 427 | assert.True(t, pool.PowerProfile == powerProfile) 428 | poolMutex.AssertExpectations(t) 429 | for _, core := range cores { 430 | core.(*cpuMock).AssertExpectations(t) 431 | } 432 | } 433 | 434 | func TestPoolImpl_Remove(t *testing.T) { 435 | // expecting to call the 'virtual' SetCpus that panics 436 | assert.Panics(t, func() { 437 | pool := poolImpl{} 438 | pool.Remove() 439 | }) 440 | 441 | } 442 | 443 | func TestExclusivePoolType_Remove(t *testing.T) { 444 | host := new(hostMock) 445 | host.On("GetAllCpus").Return(new(CpuList)) 446 | 447 | pool := &exclusivePoolType{poolImpl{host: host}} 448 | pools := PoolList{pool} 449 | host.On("GetAllExclusivePools").Return(&pools) 450 | assert.NoError(t, pool.Remove()) 451 | host.AssertExpectations(t) 452 | assert.NotContains(t, pools, pool) 453 | } 454 | 455 | func TestPoolList_ByName(t *testing.T) { 456 | pools := PoolList{} 457 | for _, s := range []string{"pool1", "poo2", "something"} { 458 | mockPool := new(poolMock) 459 | mockPool.On("Name").Return(s) 460 | pools = append(pools, mockPool) 461 | } 462 | assert.Equal(t, pools[2], pools.ByName("something")) 463 | assert.Nil(t, pools.ByName("not existing")) 464 | 465 | } 466 | -------------------------------------------------------------------------------- /pkg/power/power.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | "path" 8 | "runtime" 9 | "strconv" 10 | "strings" 11 | 12 | "github.com/go-logr/logr" 13 | ) 14 | 15 | var basePath = "/sys/devices/system/cpu" 16 | 17 | type featureID uint 18 | 19 | const ( 20 | sharedPoolName = "sharedPool" 21 | reservedPoolName = "reservedPool" 22 | FrequencyScalingFeature featureID = iota 23 | EPPFeature 24 | CStatesFeature 25 | UncoreFeature 26 | ) 27 | 28 | type LibConfig struct { 29 | CpuPath string 30 | ModulePath string 31 | Cores uint 32 | } 33 | 34 | // initialized with null logger, can be set to proper logger with SetLogger 35 | var log = logr.Discard() 36 | 37 | // default declaration of defined features, defined to uninitialized state 38 | var featureList FeatureSet = map[featureID]*featureStatus{ 39 | EPPFeature: { 40 | err: uninitialisedErr, 41 | initFunc: initEpp, 42 | }, 43 | FrequencyScalingFeature: { 44 | err: uninitialisedErr, 45 | initFunc: initScalingDriver, 46 | }, 47 | CStatesFeature: { 48 | err: uninitialisedErr, 49 | initFunc: initCStates, 50 | }, 51 | UncoreFeature: { 52 | err: uninitialisedErr, 53 | initFunc: initUncore, 54 | }, 55 | } 56 | var uninitialisedErr = fmt.Errorf("feature uninitialized") 57 | var undefinederr = fmt.Errorf("feature undefined") 58 | 59 | // featureStatus stores feature name, driver and if feature is not supported, error describing the reason 60 | type featureStatus struct { 61 | name string 62 | driver string 63 | err error 64 | initFunc func() featureStatus 65 | } 66 | 67 | func (f *featureStatus) Name() string { 68 | return f.name 69 | } 70 | func (f *featureStatus) Driver() string { 71 | return f.driver 72 | } 73 | func (f *featureStatus) FeatureError() error { 74 | return f.err 75 | } 76 | func (f *featureStatus) isSupported() bool { 77 | return f.err == nil 78 | } 79 | 80 | // FeatureSet stores info of about functionalities supported by the power library 81 | // on current system 82 | type FeatureSet map[featureID]*featureStatus 83 | 84 | // initialise all defined features, return multiple errors for each failed feature 85 | func (set *FeatureSet) init() error { 86 | if len(*set) == 0 { 87 | return fmt.Errorf("no features defined") 88 | } 89 | allErrors := make([]error, 0, len(*set)) 90 | for id, status := range *set { 91 | feature := status.initFunc() 92 | (*set)[id] = &feature 93 | allErrors = append(allErrors, feature.err) 94 | } 95 | return errors.Join(allErrors...) 96 | } 97 | 98 | // anySupported checks if any of the defined featured is supported on current machine 99 | func (set *FeatureSet) anySupported() bool { 100 | for _, status := range *set { 101 | if status.err == nil { 102 | return true 103 | } 104 | } 105 | return false 106 | } 107 | 108 | // isFeatureIdSupported takes feature if, check if feature is supported on current system 109 | func (set *FeatureSet) isFeatureIdSupported(id featureID) bool { 110 | feature, exists := (*set)[id] 111 | if !exists { 112 | return false 113 | } 114 | return feature.isSupported() 115 | } 116 | 117 | // getFeatureIdError retrieve any error associated with a feature 118 | func (set *FeatureSet) getFeatureIdError(id featureID) error { 119 | feature, exists := (*set)[id] 120 | if !exists { 121 | return undefinederr 122 | } 123 | return feature.err 124 | } 125 | 126 | // CreateInstance initialises the power library 127 | // returns Host with empty list of exclusive pools, and a default pool containing all cpus 128 | // by default all cpus are in the system reserved pool 129 | // if fatal errors occurred returns nil and error 130 | // if non-fatal error occurred Host object and error are returned 131 | func CreateInstance(hostName string) (Host, error) { 132 | allErrors := featureList.init() 133 | if !featureList.anySupported() { 134 | return nil, allErrors 135 | } 136 | host, err := initHost(hostName) 137 | if err != nil { 138 | return nil, errors.Join(allErrors, err) 139 | } 140 | return host, allErrors 141 | } 142 | func CreateInstanceWithConf(hostname string, conf LibConfig) (Host, error) { 143 | if conf.CpuPath != "" { 144 | basePath = conf.CpuPath 145 | } 146 | if conf.ModulePath != "" { 147 | kernelModulesFilePath = conf.ModulePath 148 | } 149 | getNumberOfCpus = func() uint { return conf.Cores } 150 | return CreateInstance(hostname) 151 | } 152 | 153 | // getNumberOfCpus defined as var so can be mocked by the unit test 154 | var getNumberOfCpus = func() uint { 155 | // First, try to get CPUs from sysfs. If the sysfs isn't available 156 | // return Number of CPUs from runtime 157 | cpusAvailable, err := readStringFromFile(path.Join(basePath, "online")) 158 | if err != nil { 159 | return uint(runtime.NumCPU()) 160 | } 161 | // Delete \n character and split the string to get 162 | // first and last element 163 | cpusAvailable = strings.Replace(cpusAvailable, "\n", "", -1) 164 | cpuSlice := strings.Split(cpusAvailable, "-") 165 | if len(cpuSlice) < 2 { 166 | return uint(runtime.NumCPU()) 167 | } 168 | // Calculate number of CPUs, if an error occurs 169 | // return the number of CPUs from runtime 170 | firstElement, err := strconv.Atoi(cpuSlice[0]) 171 | if err != nil { 172 | return uint(runtime.NumCPU()) 173 | } 174 | secondElement, err := strconv.Atoi(cpuSlice[1]) 175 | if err != nil { 176 | return uint(runtime.NumCPU()) 177 | } 178 | return uint((secondElement - firstElement) + 1) 179 | } 180 | 181 | // reads a file from a path, parses contents as an int a returns the value 182 | // returns Error if any step fails 183 | func readUintFromFile(filePath string) (uint, error) { 184 | valueString, err := readStringFromFile(filePath) 185 | if err != nil { 186 | return 0, err 187 | } 188 | valueString = strings.TrimSuffix(valueString, "\n") 189 | value, err := strconv.Atoi(valueString) 190 | if err != nil { 191 | return 0, err 192 | } 193 | if value < 0 { 194 | return 0, fmt.Errorf("unexpected negative value when expecting uint") 195 | } 196 | return uint(value), nil 197 | } 198 | 199 | // reads value from a file and returns contents as a string 200 | func readStringFromFile(filePath string) (string, error) { 201 | valueByte, err := os.ReadFile(filePath) 202 | if err != nil { 203 | return "", err 204 | } 205 | return string(valueByte), nil 206 | } 207 | 208 | // IsFeatureSupported checks if any number of features is supported. if any of the checked features is not supported 209 | // return false 210 | func IsFeatureSupported(features ...featureID) bool { 211 | for _, feature := range features { 212 | if !featureList.isFeatureIdSupported(feature) { 213 | return false 214 | } 215 | } 216 | return true 217 | } 218 | 219 | // SetLogger takes fre-configured go-logr logr.Logger to be used by the library 220 | func SetLogger(logger logr.Logger) { 221 | log = logger 222 | } 223 | -------------------------------------------------------------------------------- /pkg/power/power_profile.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type profileImpl struct { 8 | name string 9 | max uint 10 | min uint 11 | efficientMax uint 12 | efficientMin uint 13 | epp string 14 | governor string 15 | // todo classification 16 | } 17 | 18 | // Profile contains scaling driver information 19 | type Profile interface { 20 | Name() string 21 | Epp() string 22 | MaxFreq() uint 23 | EfficientMaxFreq() uint 24 | MinFreq() uint 25 | EfficientMinFreq() uint 26 | Governor() string 27 | } 28 | 29 | var availableGovs []string 30 | 31 | // todo add simple constructor that determines frequencies automagically? 32 | 33 | // NewPowerProfile creates a power profile, 34 | func NewPowerProfile(name string, minFreq uint, maxFreq uint, governor string, epp string) (Profile, error) { 35 | if !featureList.isFeatureIdSupported(FrequencyScalingFeature) { 36 | return nil, featureList.getFeatureIdError(FrequencyScalingFeature) 37 | } 38 | if len(coreTypes) > 1 { 39 | log.Error(fmt.Errorf("creating standard power profile on system with multiple core types"), "undefined behavior expected") 40 | } 41 | if minFreq > maxFreq { 42 | return nil, fmt.Errorf("max Freq can't be lower than min") 43 | } 44 | if governor == "" { 45 | governor = defaultGovernor 46 | } 47 | if !checkGov(governor) { //todo determine by reading available governors, its different for acpi Driver 48 | return nil, fmt.Errorf("governor can only be set to the following %v", availableGovs) 49 | 50 | } 51 | if epp != "" && governor == cpuPolicyPerformance && epp != cpuPolicyPerformance { 52 | return nil, fmt.Errorf("only '%s' epp can be used with '%s' governor", cpuPolicyPerformance, cpuPolicyPerformance) 53 | } 54 | 55 | log.Info("creating powerProfile object", "name", name) 56 | return &profileImpl{ 57 | name: name, 58 | max: maxFreq * 1000, 59 | min: minFreq * 1000, 60 | efficientMax: maxFreq * 1000, 61 | efficientMin: minFreq * 1000, 62 | epp: epp, 63 | governor: governor, 64 | }, nil 65 | } 66 | 67 | // creates a Power Profile for efficient and performant cores 68 | func NewEcorePowerProfile(name string, minFreq uint, maxFreq uint, emin uint, emax uint, governor string, epp string) (Profile, error) { 69 | if !featureList.isFeatureIdSupported(FrequencyScalingFeature) { 70 | return nil, featureList.getFeatureIdError(FrequencyScalingFeature) 71 | } 72 | if minFreq > maxFreq { 73 | return nil, fmt.Errorf("max Freq can't be lower than min") 74 | } 75 | if emin > emax { 76 | return nil, fmt.Errorf("max Freq can't be lower than min") 77 | } 78 | if governor == "" { 79 | governor = defaultGovernor 80 | } 81 | if !checkGov(governor) { //todo determine by reading available governors, its different for acpi Driver 82 | return nil, fmt.Errorf("governor can only be set to the following %v", availableGovs) 83 | 84 | } 85 | if epp != "" && governor == cpuPolicyPerformance && epp != cpuPolicyPerformance { 86 | return nil, fmt.Errorf("only '%s' epp can be used with '%s' governor", cpuPolicyPerformance, cpuPolicyPerformance) 87 | } 88 | 89 | log.Info("creating powerProfile object", "name", name) 90 | return &profileImpl{ 91 | name: name, 92 | max: maxFreq * 1000, 93 | min: minFreq * 1000, 94 | efficientMax: emax * 1000, 95 | efficientMin: emin * 1000, 96 | epp: epp, 97 | governor: governor, 98 | }, nil 99 | } 100 | 101 | func (p *profileImpl) Epp() string { 102 | return p.epp 103 | } 104 | 105 | func (p *profileImpl) MaxFreq() uint { 106 | return p.max 107 | } 108 | 109 | func (p *profileImpl) MinFreq() uint { 110 | return p.min 111 | } 112 | 113 | func (p *profileImpl) EfficientMaxFreq() uint { 114 | return p.efficientMax 115 | } 116 | 117 | func (p *profileImpl) EfficientMinFreq() uint { 118 | return p.efficientMin 119 | } 120 | 121 | func (p *profileImpl) Name() string { 122 | return p.name 123 | } 124 | 125 | func (p *profileImpl) Governor() string { 126 | return p.governor 127 | } 128 | 129 | func checkGov(governor string) bool { 130 | for _, element := range availableGovs { 131 | if element == governor { 132 | return true 133 | } 134 | } 135 | return false 136 | } 137 | -------------------------------------------------------------------------------- /pkg/power/power_profile_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestNewProfile(t *testing.T) { 11 | oldgovs := availableGovs 12 | availableGovs = []string{cpuPolicyPowersave, cpuPolicyPerformance} 13 | 14 | profile, err := NewPowerProfile("name", 0, 100, cpuPolicyPowersave, "epp") 15 | assert.ErrorIs(t, err, uninitialisedErr) 16 | assert.Nil(t, profile) 17 | 18 | featureList[FrequencyScalingFeature].err = nil 19 | featureList[EPPFeature].err = nil 20 | defer func() { featureList[FrequencyScalingFeature].err = uninitialisedErr }() 21 | defer func() { featureList[EPPFeature].err = uninitialisedErr }() 22 | defer func() { availableGovs = oldgovs }() 23 | 24 | profile, err = NewPowerProfile("name", 0, 100, cpuPolicyPowersave, "epp") 25 | assert.NoError(t, err) 26 | assert.Equal(t, "name", profile.(*profileImpl).name) 27 | assert.Equal(t, uint(0), profile.(*profileImpl).min) 28 | assert.Equal(t, uint(100*1000), profile.(*profileImpl).max) 29 | assert.Equal(t, "powersave", profile.(*profileImpl).governor) 30 | assert.Equal(t, "epp", profile.(*profileImpl).epp) 31 | 32 | profile, err = NewPowerProfile("name", 0, 10, cpuPolicyPerformance, cpuPolicyPerformance) 33 | assert.NoError(t, err) 34 | assert.NotNil(t, profile) 35 | 36 | profile, err = NewPowerProfile("name", 0, 100, cpuPolicyPerformance, "epp") 37 | assert.ErrorContains(t, err, fmt.Sprintf("'%s' epp can be used with '%s' governor", cpuPolicyPerformance, cpuPolicyPerformance)) 38 | assert.Nil(t, profile) 39 | 40 | profile, err = NewPowerProfile("name", 100, 0, cpuPolicyPowersave, "epp") 41 | assert.ErrorContains(t, err, "max Freq can't be lower than min") 42 | assert.Nil(t, profile) 43 | 44 | profile, err = NewPowerProfile("name", 0, 100, "something random", "epp") 45 | assert.ErrorContains(t, err, "governor can only be set to the following") 46 | assert.Nil(t, profile) 47 | } 48 | 49 | func TestEfficientProfile(t *testing.T) { 50 | oldGovs := availableGovs 51 | availableGovs = []string{cpuPolicyPowersave, cpuPolicyPerformance} 52 | featureList[FrequencyScalingFeature].err = nil 53 | featureList[EPPFeature].err = nil 54 | typeCopy := coreTypes 55 | 56 | //reset values afterwards 57 | defer func() { featureList[FrequencyScalingFeature].err = uninitialisedErr }() 58 | defer func() { featureList[EPPFeature].err = uninitialisedErr }() 59 | defer func() { coreTypes = typeCopy }() 60 | defer func() { availableGovs = oldGovs }() 61 | 62 | coreTypes = CoreTypeList{&CpuFrequencySet{min: 300, max: 1000}, &CpuFrequencySet{min: 300, max: 500}} 63 | 64 | //default scenario 65 | profile, err := NewEcorePowerProfile("name", 300, 1000, 300, 450, cpuPolicyPerformance, cpuPolicyPerformance) 66 | assert.NoError(t, err) 67 | assert.NotNil(t, profile) 68 | 69 | // invalid frequency ranges 70 | profile, err = NewEcorePowerProfile("name", 300, 1000, 430, 200, cpuPolicyPerformance, cpuPolicyPerformance) 71 | assert.ErrorContains(t, err, "max Freq can't be lower than min") 72 | assert.Nil(t, profile) 73 | 74 | } 75 | -------------------------------------------------------------------------------- /pkg/power/power_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestFeatureSet_init(t *testing.T) { 12 | 13 | assert.Error(t, (&FeatureSet{}).init()) 14 | 15 | set := FeatureSet{} 16 | set[0] = &featureStatus{} 17 | 18 | // non-existing initFunc 19 | assert.Panics(t, func() { set.init() }) 20 | 21 | // no error 22 | called := false 23 | set[0] = &featureStatus{ 24 | initFunc: func() featureStatus { 25 | called = true 26 | return featureStatus{} 27 | }, 28 | } 29 | assert.Empty(t, set.init()) 30 | assert.True(t, called) 31 | 32 | // error 33 | called = false 34 | 35 | expectedFeatureError := fmt.Errorf("error") 36 | set[0] = &featureStatus{ 37 | initFunc: func() featureStatus { 38 | called = true 39 | return featureStatus{err: expectedFeatureError} 40 | }, 41 | } 42 | 43 | featureErr := set.init() 44 | assert.ErrorIs(t, featureErr, expectedFeatureError) 45 | assert.Len(t, featureErr.(interface{ Unwrap() []error }).Unwrap(), 1) 46 | assert.True(t, called) 47 | } 48 | 49 | func TestFeatureSet_anySupported(t *testing.T) { 50 | // empty set - nothing supported 51 | set := FeatureSet{} 52 | assert.False(t, set.anySupported()) 53 | 54 | // something supported 55 | set[0] = &featureStatus{err: nil} 56 | assert.True(t, set.anySupported()) 57 | 58 | //nothing supported 59 | set[0] = &featureStatus{err: fmt.Errorf("")} 60 | set[4] = &featureStatus{err: fmt.Errorf("")} 61 | set[2] = &featureStatus{err: fmt.Errorf("")} 62 | assert.False(t, set.anySupported()) 63 | } 64 | 65 | func TestFeatureSet_isFeatureIdSupported(t *testing.T) { 66 | // non existing 67 | set := FeatureSet{} 68 | assert.False(t, set.isFeatureIdSupported(0)) 69 | 70 | // error 71 | set[0] = &featureStatus{err: fmt.Errorf("")} 72 | assert.False(t, set.isFeatureIdSupported(0)) 73 | 74 | // no error 75 | set[0] = &featureStatus{err: nil} 76 | assert.True(t, set.isFeatureIdSupported(0)) 77 | } 78 | 79 | func TestFeatureSet_getFeatureIdError(t *testing.T) { 80 | // non existing 81 | set := FeatureSet{} 82 | assert.ErrorIs(t, undefinederr, set.getFeatureIdError(0)) 83 | 84 | // error 85 | set[0] = &featureStatus{err: fmt.Errorf("")} 86 | assert.Error(t, set.getFeatureIdError(0)) 87 | 88 | // no error 89 | set[0] = &featureStatus{err: nil} 90 | assert.NoError(t, set.getFeatureIdError(0)) 91 | } 92 | 93 | func TestInitialFeatureList(t *testing.T) { 94 | assert.False(t, featureList.anySupported()) 95 | 96 | for id, _ := range featureList { 97 | assert.ErrorIs(t, featureList.getFeatureIdError(id), uninitialisedErr) 98 | } 99 | } 100 | 101 | func TestCreateInstance(t *testing.T) { 102 | origFeatureList := featureList 103 | featureList = FeatureSet{} 104 | 105 | defer func() { featureList = origFeatureList }() 106 | 107 | const machineName = "host1" 108 | host, err := CreateInstance(machineName) 109 | assert.Nil(t, host) 110 | assert.Error(t, err) 111 | 112 | featureList[4] = &featureStatus{initFunc: func() featureStatus { return featureStatus{} }} 113 | host, err = CreateInstance(machineName) 114 | assert.NoError(t, err) 115 | assert.NotNil(t, host) 116 | 117 | hostObj := host.(*hostImpl) 118 | assert.Equal(t, machineName, hostObj.name) 119 | } 120 | 121 | func Fuzz_library(f *testing.F) { 122 | states := map[string]map[string]string{ 123 | "state0": {"name": "C0"}, 124 | "state1": {"name": "C1"}, 125 | "state2": {"name": "C2"}, 126 | "state3": {"name": "POLL"}, 127 | "notState": nil, 128 | } 129 | cstatesFiles := map[string]map[string]map[string]string{ 130 | "cpu0": states, 131 | "cpu1": states, 132 | "cpu2": states, 133 | "cpu3": states, 134 | "cpu4": states, 135 | "cpu5": states, 136 | "cpu6": states, 137 | "cpu7": states, 138 | "Driver": {"intel_idle\n": nil}, 139 | } 140 | uncoreFreqs := map[string]string{ 141 | "initMax": "200", 142 | "initMin": "100", 143 | "max": "123", 144 | "min": "100", 145 | } 146 | uncoreFiles := map[string]map[string]string{ 147 | "package_00_die_00": uncoreFreqs, 148 | "package_01_die_00": uncoreFreqs, 149 | } 150 | cpuFreqs := map[string]string{ 151 | "max": "123", 152 | "min": "100", 153 | "epp": "some", 154 | "driver": "intel_pstate", 155 | "available_governors": "conservative ondemand userspace powersave", 156 | "package": "0", 157 | "die": "0", 158 | } 159 | cpuFreqsFiles := map[string]map[string]string{ 160 | "cpu0": cpuFreqs, 161 | "cpu1": cpuFreqs, 162 | "cpu2": cpuFreqs, 163 | "cpu3": cpuFreqs, 164 | "cpu4": cpuFreqs, 165 | "cpu5": cpuFreqs, 166 | "cpu6": cpuFreqs, 167 | "cpu7": cpuFreqs, 168 | } 169 | teardownCpu := setupCpuScalingTests(cpuFreqsFiles) 170 | teardownCstates := setupCpuCStatesTests(cstatesFiles) 171 | teardownUncore := setupUncoreTests(uncoreFiles, "intel_uncore_frequency 16384 0 - Live 0xffffffffc09c8000") 172 | defer teardownCpu() 173 | defer teardownCstates() 174 | defer teardownUncore() 175 | governorList := []string{"powersave", "performance"} 176 | eppList := []string{"power", "performance", "balance-power", "balance-performance"} 177 | f.Add("node1", "performance", uint(120000), uint(250000), uint(120000), uint(160000), uint(5), uint(10)) 178 | fuzzTarget := func(t *testing.T, nodeName string, poolName string, min uint, max uint, emin uint, emax uint, governorSeed uint, eppSeed uint) { 179 | basePath = "testing/cpus" 180 | getNumberOfCpus = func() uint { return 8 } 181 | nodeName = strings.ReplaceAll(nodeName, " ", "") 182 | nodeName = strings.ReplaceAll(nodeName, "\t", "") 183 | nodeName = strings.ReplaceAll(nodeName, "\000", "") 184 | poolName = strings.ReplaceAll(poolName, " ", "") 185 | poolName = strings.ReplaceAll(poolName, "\t", "") 186 | poolName = strings.ReplaceAll(poolName, "\000", "") 187 | if nodeName == "" || poolName == "" { 188 | return 189 | } 190 | node, _ := CreateInstance(nodeName) 191 | 192 | if node == nil { 193 | return 194 | } 195 | node.GetReservedPool().MoveCpuIDs([]uint{0}) 196 | governor := governorList[int(governorSeed)%len(governorList)] 197 | epp := eppList[int(eppSeed)%len(eppList)] 198 | pool, _ := node.AddExclusivePool(poolName) 199 | profile, _ := NewEcorePowerProfile(poolName, min, max, emin, emax, governor, epp) 200 | pool.SetPowerProfile(profile) 201 | pool.SetCStates(CStates{"C0": true, "C1": false}) 202 | states := pool.getCStates() 203 | if states != nil { 204 | node.ValidateCStates(*states) 205 | } 206 | node.GetSharedPool().MoveCpuIDs([]uint{1, 3, 5}) 207 | node.GetExclusivePool(poolName).MoveCpuIDs([]uint{1, 3, 5}) 208 | node.GetSharedPool().MoveCpuIDs([]uint{3}) 209 | node.GetExclusivePool(poolName).SetPowerProfile(nil) 210 | node.Topology().SetUncore(&uncoreFreq{max: 24000, min: 13000}) 211 | node.Topology().Package(0).SetUncore(&uncoreFreq{max: 24000, min: 12000}) 212 | node.Topology().Package(0).Die(0).SetUncore(&uncoreFreq{max: 23000, min: 11000}) 213 | 214 | } 215 | f.Fuzz(fuzzTarget) 216 | 217 | } 218 | -------------------------------------------------------------------------------- /pkg/power/scaling_driver.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | // collection of Scaling Driver specific functions and methods 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | ) 12 | 13 | const ( 14 | pStatesDrvFile = "cpufreq/scaling_driver" 15 | 16 | cpuMaxFreqFile = "cpufreq/cpuinfo_max_freq" 17 | cpuMinFreqFile = "cpufreq/cpuinfo_min_freq" 18 | scalingMaxFile = "cpufreq/scaling_max_freq" 19 | scalingMinFile = "cpufreq/scaling_min_freq" 20 | 21 | scalingGovFile = "cpufreq/scaling_governor" 22 | availGovFile = "cpufreq/scaling_available_governors" 23 | eppFile = "cpufreq/energy_performance_preference" 24 | 25 | defaultEpp = "default" 26 | defaultGovernor = cpuPolicyPowersave 27 | 28 | cpuPolicyPerformance = "performance" 29 | cpuPolicyPowersave = "powersave" 30 | cpuPolicyUserspace = "userspace" 31 | cpuPolicyOndemand = "ondemand" 32 | cpuPolicySchedutil = "schedutil" 33 | cpuPolicyConservative = "conservative" 34 | ) 35 | 36 | type ( 37 | CpuFrequencySet struct { 38 | min uint 39 | max uint 40 | } 41 | FreqSet interface { 42 | GetMin() uint 43 | GetMax() uint 44 | } 45 | typeSetter interface { 46 | GetType() uint 47 | setType(uint) 48 | } 49 | CoreTypeList []FreqSet 50 | ) 51 | 52 | func (s *CpuFrequencySet) GetMin() uint { 53 | return s.min 54 | } 55 | 56 | func (s *CpuFrequencySet) GetMax() uint { 57 | return s.max 58 | } 59 | 60 | // returns the index of a frequency set in a list and appends it if it's not 61 | // in the list already. this index is used to classify a core's type 62 | func (l *CoreTypeList) appendIfUnique(min uint, max uint) uint { 63 | for i, coreType := range coreTypes { 64 | if coreType.GetMin() == min && coreType.GetMax() == max { 65 | // core type exists so return index 66 | return uint(i) 67 | } 68 | } 69 | // core type doesn't exist so append it and return index 70 | coreTypes = append(coreTypes, &CpuFrequencySet{min: min, max: max}) 71 | return uint(len(coreTypes) - 1) 72 | } 73 | 74 | var defaultPowerProfile *profileImpl 75 | 76 | func isScalingDriverSupported(driver string) bool { 77 | for _, s := range []string{"intel_pstate", "intel_cpufreq", "acpi-cpufreq"} { 78 | if driver == s { 79 | return true 80 | } 81 | } 82 | return false 83 | } 84 | 85 | func initScalingDriver() featureStatus { 86 | pStates := featureStatus{ 87 | name: "Frequency-Scaling", 88 | initFunc: initScalingDriver, 89 | } 90 | var err error 91 | availableGovs, err = initAvailableGovernors() 92 | if err != nil { 93 | pStates.err = fmt.Errorf("failed to read available governors: %w", err) 94 | } 95 | driver, err := readCpuStringProperty(0, pStatesDrvFile) 96 | if err != nil { 97 | pStates.err = fmt.Errorf("%s - failed to read driver name: %w", pStates.name, err) 98 | } 99 | pStates.driver = driver 100 | if !isScalingDriverSupported(driver) { 101 | pStates.err = fmt.Errorf("%s - unsupported driver: %s", pStates.name, driver) 102 | } 103 | if err != nil { 104 | pStates.err = fmt.Errorf("%s - failed to determine driver: %w", pStates.name, err) 105 | } 106 | if pStates.err == nil { 107 | if err := generateDefaultProfile(); err != nil { 108 | pStates.err = fmt.Errorf("failed to read default frequenices: %w", err) 109 | } 110 | } 111 | return pStates 112 | } 113 | func initEpp() featureStatus { 114 | epp := featureStatus{ 115 | name: "Energy-Performance-Preference", 116 | initFunc: initEpp, 117 | } 118 | _, err := readCpuStringProperty(0, eppFile) 119 | if os.IsNotExist(errors.Unwrap(err)) { 120 | epp.err = fmt.Errorf("EPP file %s does not exist", eppFile) 121 | } 122 | return epp 123 | } 124 | 125 | func initAvailableGovernors() ([]string, error) { 126 | govs, err := readCpuStringProperty(0, availGovFile) 127 | if err != nil { 128 | return []string{}, err 129 | } 130 | return strings.Split(govs, " "), nil 131 | } 132 | func GetAvailableGovernors() []string { 133 | return availableGovs 134 | } 135 | func generateDefaultProfile() error { 136 | maxFreq, err := readCpuUintProperty(0, cpuMaxFreqFile) 137 | if err != nil { 138 | return err 139 | } 140 | minFreq, err := readCpuUintProperty(0, cpuMinFreqFile) 141 | if err != nil { 142 | return err 143 | } 144 | 145 | _, err = readCpuStringProperty(0, eppFile) 146 | epp := defaultEpp 147 | if os.IsNotExist(errors.Unwrap(err)) { 148 | epp = "" 149 | } 150 | defaultPowerProfile = &profileImpl{ 151 | name: "default", 152 | max: maxFreq, 153 | min: minFreq, 154 | efficientMax: 0, 155 | efficientMin: 0, 156 | epp: epp, 157 | governor: defaultGovernor, 158 | } 159 | return nil 160 | } 161 | 162 | func (cpu *cpuImpl) updateFrequencies() error { 163 | if !IsFeatureSupported(FrequencyScalingFeature) { 164 | return nil 165 | } 166 | if cpu.pool.GetPowerProfile() != nil { 167 | return cpu.setDriverValues(cpu.pool.GetPowerProfile()) 168 | } 169 | return cpu.setDriverValues(defaultPowerProfile) 170 | } 171 | 172 | // setDriverValues is an entrypoint to power governor feature consolidation 173 | func (cpu *cpuImpl) setDriverValues(powerProfile Profile) error { 174 | if err := cpu.writeGovernorValue(powerProfile.Governor()); err != nil { 175 | return fmt.Errorf("failed to set governor for cpu %d: %w", cpu.id, err) 176 | } 177 | if powerProfile.Epp() != "" { 178 | if err := cpu.writeEppValue(powerProfile.Epp()); err != nil { 179 | return fmt.Errorf("failed to set EPP value for cpu %d: %w", cpu.id, err) 180 | } 181 | } 182 | minFreq, maxFreq := cpu.getFreqsToScale(powerProfile) 183 | absMin, absMax := cpu.GetAbsMinMax() 184 | if maxFreq > absMax || minFreq < absMin { 185 | return fmt.Errorf("setting frequency %d-%d aborted as frequency range is min: %d max: %d. resetting to default", 186 | powerProfile.MinFreq(), powerProfile.MaxFreq(), absMin, absMax) 187 | } 188 | if err := cpu.writeScalingMaxFreq(maxFreq); err != nil { 189 | return fmt.Errorf("failed to set MaxFreq value for cpu %d: %w", cpu.id, err) 190 | } 191 | if err := cpu.writeScalingMinFreq(minFreq); err != nil { 192 | return fmt.Errorf("failed to set MinFreq value for cpu %d: %w", cpu.id, err) 193 | } 194 | return nil 195 | 196 | } 197 | 198 | func (cpu *cpuImpl) getFreqsToScale(profile Profile) (uint, uint) { 199 | switch cpu.GetCore().GetType() { 200 | case CpuTypeReferences.Pcore(): 201 | return profile.MinFreq(), profile.MaxFreq() 202 | case CpuTypeReferences.Ecore(): 203 | return profile.EfficientMinFreq(), profile.EfficientMaxFreq() 204 | default: 205 | // something went wrong. default to these values which will likely result in error 206 | return profile.MinFreq(), profile.MaxFreq() 207 | } 208 | } 209 | 210 | func (cpu *cpuImpl) writeGovernorValue(governor string) error { 211 | return os.WriteFile(filepath.Join(basePath, fmt.Sprint("cpu", cpu.id), scalingGovFile), []byte(governor), 0644) 212 | } 213 | func (cpu *cpuImpl) writeEppValue(eppValue string) error { 214 | return os.WriteFile(filepath.Join(basePath, fmt.Sprint("cpu", cpu.id), eppFile), []byte(eppValue), 0644) 215 | } 216 | func (cpu *cpuImpl) writeScalingMaxFreq(freq uint) error { 217 | scalingFile := filepath.Join(basePath, fmt.Sprint("cpu", cpu.id), scalingMaxFile) 218 | f, err := os.OpenFile( 219 | scalingFile, 220 | os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 221 | 0644, 222 | ) 223 | if err != nil { 224 | return err 225 | } 226 | defer f.Close() 227 | 228 | _, err = f.WriteString(fmt.Sprint(freq)) 229 | if err != nil { 230 | return err 231 | } 232 | return nil 233 | } 234 | func (cpu *cpuImpl) writeScalingMinFreq(freq uint) error { 235 | scalingFile := filepath.Join(basePath, fmt.Sprint("cpu", cpu.id), scalingMinFile) 236 | f, err := os.OpenFile( 237 | scalingFile, 238 | os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 239 | 0644, 240 | ) 241 | if err != nil { 242 | return err 243 | } 244 | defer f.Close() 245 | 246 | _, err = f.WriteString(fmt.Sprint(freq)) 247 | if err != nil { 248 | return err 249 | } 250 | return nil 251 | } 252 | -------------------------------------------------------------------------------- /pkg/power/scaling_driver_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strconv" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestIsScalingDriverSupported(t *testing.T) { 14 | assert.False(t, isScalingDriverSupported("something")) 15 | assert.True(t, isScalingDriverSupported("intel_pstate")) 16 | assert.True(t, isScalingDriverSupported("intel_cpufreq")) 17 | assert.True(t, isScalingDriverSupported("acpi-cpufreq")) 18 | } 19 | func TestPreChecksScalingDriver(t *testing.T) { 20 | var pStates featureStatus 21 | origpath := basePath 22 | basePath = "" 23 | pStates = initScalingDriver() 24 | 25 | assert.Equal(t, pStates.name, "Frequency-Scaling") 26 | assert.ErrorContains(t, pStates.err, "failed to determine driver") 27 | epp := initEpp() 28 | assert.Equal(t, epp.name, "Energy-Performance-Preference") 29 | assert.ErrorContains(t, epp.err, "EPP file cpufreq/energy_performance_preference does not exist") 30 | basePath = origpath 31 | teardown := setupCpuScalingTests(map[string]map[string]string{ 32 | "cpu0": { 33 | "min": "111", 34 | "max": "999", 35 | "driver": "intel_pstate", 36 | "available_governors": "performance", 37 | "epp": "performance", 38 | }, 39 | }) 40 | 41 | pStates = initScalingDriver() 42 | assert.Equal(t, "intel_pstate", pStates.driver) 43 | assert.NoError(t, pStates.err) 44 | epp = initEpp() 45 | assert.NoError(t, epp.err) 46 | 47 | teardown() 48 | defer setupCpuScalingTests(map[string]map[string]string{ 49 | "cpu0": { 50 | "driver": "some_unsupported_driver", 51 | }, 52 | })() 53 | 54 | pStates = initScalingDriver() 55 | assert.ErrorContains(t, pStates.err, "unsupported") 56 | assert.Equal(t, pStates.driver, "some_unsupported_driver") 57 | teardown() 58 | defer setupCpuScalingTests(map[string]map[string]string{ 59 | "cpu0": { 60 | "driver": "acpi-cpufreq", 61 | "available_governors": "powersave", 62 | "max": "3700", 63 | "min": "3200", 64 | }, 65 | })() 66 | acpi := initScalingDriver() 67 | assert.Equal(t, "acpi-cpufreq", acpi.driver) 68 | assert.NoError(t, acpi.err) 69 | } 70 | 71 | func TestCoreImpl_updateFreqValues(t *testing.T) { 72 | var core *cpuImpl 73 | const ( 74 | maxDefault = 9990 75 | maxFreqToSet = 8888 76 | minFreqToSet = 1000 77 | ) 78 | typeCopy := coreTypes 79 | coreTypes = CoreTypeList{&CpuFrequencySet{min: minFreqToSet, max: maxDefault}} 80 | defer func() { coreTypes = typeCopy }() 81 | 82 | core = &cpuImpl{} 83 | // p-states not supported 84 | assert.NoError(t, core.updateFrequencies()) 85 | 86 | teardown := setupCpuScalingTests(map[string]map[string]string{ 87 | "cpu0": { 88 | "max": fmt.Sprint(maxDefault), 89 | "min": fmt.Sprint(minFreqToSet), 90 | }, 91 | }) 92 | 93 | defer teardown() 94 | 95 | // set desired power profile 96 | host := new(hostMock) 97 | pool := new(poolMock) 98 | core = &cpuImpl{ 99 | id: 0, 100 | pool: pool, 101 | core: &cpuCore{coreType: 0}, 102 | } 103 | pool.On("GetPowerProfile").Return(&profileImpl{max: maxFreqToSet, min: minFreqToSet}) 104 | pool.On("getHost").Return(host) 105 | host.On("NumCoreTypes").Return(uint(1)) 106 | 107 | assert.NoError(t, core.updateFrequencies()) 108 | maxFreqContent, _ := os.ReadFile(filepath.Join(basePath, "cpu0", scalingMaxFile)) 109 | maxFreqInt, _ := strconv.Atoi(string(maxFreqContent)) 110 | assert.Equal(t, maxFreqToSet, maxFreqInt) 111 | pool.AssertNumberOfCalls(t, "GetPowerProfile", 2) 112 | 113 | // set default power profile 114 | pool = new(poolMock) 115 | core.pool = pool 116 | pool.On("GetPowerProfile").Return(nil) 117 | pool.On("getHost").Return(host) 118 | assert.NoError(t, core.updateFrequencies()) 119 | maxFreqContent, _ = os.ReadFile(filepath.Join(basePath, "cpu0", scalingMaxFile)) 120 | maxFreqInt, _ = strconv.Atoi(string(maxFreqContent)) 121 | assert.Equal(t, maxDefault, maxFreqInt) 122 | pool.AssertNumberOfCalls(t, "GetPowerProfile", 1) 123 | 124 | } 125 | 126 | func TestCoreImpl_setPstatsValues(t *testing.T) { 127 | const ( 128 | maxFreqToSet = 8888 129 | minFreqToSet = 1111 130 | governorToSet = "powersave" 131 | eppToSet = "testEpp" 132 | ) 133 | featureList[FrequencyScalingFeature].err = nil 134 | featureList[EPPFeature].err = nil 135 | typeCopy := coreTypes 136 | coreTypes = CoreTypeList{&CpuFrequencySet{min: 1000, max: 9000}} 137 | defer func() { coreTypes = typeCopy }() 138 | defer func() { featureList[EPPFeature].err = uninitialisedErr }() 139 | defer func() { featureList[FrequencyScalingFeature].err = uninitialisedErr }() 140 | 141 | poolmk := new(poolMock) 142 | host := new(hostMock) 143 | poolmk.On("getHost").Return(host) 144 | host.On("NumCoreTypes").Return(uint(1)) 145 | core := &cpuImpl{ 146 | id: 0, 147 | core: &cpuCore{id: 0, coreType: 0}, 148 | pool: poolmk, 149 | } 150 | 151 | teardown := setupCpuScalingTests(map[string]map[string]string{ 152 | "cpu0": { 153 | "governor": "performance", 154 | "max": "9999", 155 | "min": "999", 156 | "epp": "balance-performance", 157 | }, 158 | }) 159 | defer teardown() 160 | 161 | profile := &profileImpl{ 162 | name: "default", 163 | max: maxFreqToSet, 164 | min: minFreqToSet, 165 | epp: eppToSet, 166 | governor: governorToSet, 167 | } 168 | assert.NoError(t, core.setDriverValues(profile)) 169 | 170 | governorFileContent, _ := os.ReadFile(filepath.Join(basePath, "cpu0", scalingGovFile)) 171 | assert.Equal(t, governorToSet, string(governorFileContent)) 172 | 173 | eppFileContent, _ := os.ReadFile(filepath.Join(basePath, "cpu0", eppFile)) 174 | assert.Equal(t, eppToSet, string(eppFileContent)) 175 | 176 | maxFreqContent, _ := os.ReadFile(filepath.Join(basePath, "cpu0", scalingMaxFile)) 177 | maxFreqInt, _ := strconv.Atoi(string(maxFreqContent)) 178 | assert.Equal(t, maxFreqToSet, maxFreqInt) 179 | 180 | minFreqContent, _ := os.ReadFile(filepath.Join(basePath, "cpu0", scalingMaxFile)) 181 | minFreqInt, _ := strconv.Atoi(string(minFreqContent)) 182 | assert.Equal(t, maxFreqToSet, minFreqInt) 183 | 184 | // check for empty epp unset 185 | profile.epp = "" 186 | assert.NoError(t, core.setDriverValues(profile)) 187 | eppFileContent, _ = os.ReadFile(filepath.Join(basePath, "cpu0", eppFile)) 188 | assert.Equal(t, eppToSet, string(eppFileContent)) 189 | } 190 | -------------------------------------------------------------------------------- /pkg/power/topology.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | const ( 4 | cpuTopologyDir = "topology/" 5 | packageIdFile = cpuTopologyDir + "physical_package_id" 6 | dieIdFile = cpuTopologyDir + "die_id" 7 | coreIdFile = cpuTopologyDir + "core_id" 8 | ) 9 | 10 | type topologyTypeObj interface { 11 | addCpu(uint) (Cpu, error) 12 | CPUs() *CpuList 13 | getID() uint 14 | } 15 | 16 | // this stores the frequencies of core types 17 | // cores can refer to this object using an array index 18 | var coreTypes CoreTypeList 19 | 20 | // parent struct to store system topology 21 | type ( 22 | cpuTopology struct { 23 | packages packageList 24 | allCpus CpuList 25 | uncore Uncore 26 | } 27 | 28 | Topology interface { 29 | topologyTypeObj 30 | hasUncore 31 | Packages() *[]Package 32 | Package(id uint) Package 33 | } 34 | ) 35 | 36 | func (s *cpuTopology) addCpu(cpuId uint) (Cpu, error) { 37 | var socketId uint 38 | var err error 39 | var cpu Cpu 40 | 41 | if socketId, err = readCpuUintProperty(cpuId, packageIdFile); err != nil { 42 | return nil, err 43 | } 44 | if socket, exists := s.packages[socketId]; exists { 45 | cpu, err = socket.addCpu(cpuId) 46 | } else { 47 | s.packages[socketId] = &cpuPackage{ 48 | topology: s, 49 | id: socketId, 50 | cpus: CpuList{}, 51 | dies: dieList{}, 52 | } 53 | cpu, err = s.packages[socketId].addCpu(cpuId) 54 | } 55 | if err != nil { 56 | return nil, err 57 | } 58 | s.allCpus[cpuId] = cpu 59 | return cpu, err 60 | } 61 | 62 | func (s *cpuTopology) CPUs() *CpuList { 63 | return &s.allCpus 64 | } 65 | 66 | func (s *cpuTopology) CoreTypes() CoreTypeList { 67 | return coreTypes 68 | } 69 | 70 | func (s *cpuTopology) Packages() *[]Package { 71 | pkgs := make([]Package, len(s.packages)) 72 | 73 | i := 0 74 | for _, pkg := range s.packages { 75 | pkgs[i] = pkg 76 | i++ 77 | } 78 | return &pkgs 79 | } 80 | 81 | func (s *cpuTopology) Package(id uint) Package { 82 | pkg := s.packages[id] 83 | return pkg 84 | } 85 | 86 | func (s *cpuTopology) getID() uint { 87 | return 0 88 | } 89 | 90 | // cpu socket represents a physical cpu package 91 | type ( 92 | cpuPackage struct { 93 | topology Topology 94 | id uint 95 | uncore Uncore 96 | cpus CpuList 97 | dies dieList 98 | } 99 | Package interface { 100 | hasUncore 101 | topologyTypeObj 102 | Dies() *[]Die 103 | Die(id uint) Die 104 | } 105 | ) 106 | 107 | func (c *cpuPackage) Dies() *[]Die { 108 | dice := make([]Die, len(c.dies)) 109 | i := 0 110 | for _, die := range c.dies { 111 | dice[i] = die 112 | i++ 113 | } 114 | return &dice 115 | } 116 | 117 | func (c *cpuPackage) Die(id uint) Die { 118 | die := c.dies[id] 119 | return die 120 | } 121 | 122 | func (c *cpuPackage) addCpu(cpuId uint) (Cpu, error) { 123 | var err error 124 | var dieId uint 125 | var cpu Cpu 126 | 127 | if dieId, err = readCpuUintProperty(cpuId, dieIdFile); err != nil { 128 | return nil, err 129 | } 130 | 131 | if die, exists := c.dies[dieId]; exists { 132 | cpu, err = die.addCpu(cpuId) 133 | } else { 134 | c.dies[dieId] = &cpuDie{ 135 | parentSocket: c, 136 | id: dieId, 137 | cores: coreList{}, 138 | cpus: CpuList{}, 139 | } 140 | cpu, err = c.dies[dieId].addCpu(cpuId) 141 | } 142 | if err != nil { 143 | return nil, err 144 | } 145 | c.cpus.add(cpu) 146 | return cpu, nil 147 | } 148 | 149 | func (c *cpuPackage) CPUs() *CpuList { 150 | return &c.cpus 151 | } 152 | 153 | func (c *cpuPackage) getID() uint { 154 | return c.id 155 | } 156 | 157 | type ( 158 | cpuDie struct { 159 | parentSocket Package 160 | id uint 161 | uncore Uncore 162 | cores coreList 163 | cpus CpuList 164 | } 165 | Die interface { 166 | topologyTypeObj 167 | hasUncore 168 | Cores() *[]Core 169 | Core(id uint) Core 170 | } 171 | ) 172 | 173 | func (d *cpuDie) Cores() *[]Core { 174 | cores := make([]Core, len(d.cores)) 175 | i := 0 176 | for _, core := range d.cores { 177 | cores[i] = core 178 | i++ 179 | } 180 | return &cores 181 | } 182 | 183 | func (d *cpuDie) Core(id uint) Core { 184 | core := d.cores[id] 185 | return core 186 | } 187 | 188 | func (d *cpuDie) CPUs() *CpuList { 189 | return &d.cpus 190 | } 191 | 192 | func (d *cpuDie) addCpu(cpuId uint) (Cpu, error) { 193 | var err error 194 | var coreId uint 195 | var cpu Cpu 196 | 197 | if coreId, err = readCpuUintProperty(cpuId, coreIdFile); err != nil { 198 | return nil, err 199 | } 200 | 201 | if core, exists := d.cores[coreId]; exists { 202 | cpu, err = core.addCpu(cpuId) 203 | } else { 204 | d.cores[coreId] = &cpuCore{ 205 | parentDie: d, 206 | id: coreId, 207 | cpus: CpuList{}, 208 | } 209 | cpu, err = d.cores[coreId].addCpu(cpuId) 210 | } 211 | if err != nil { 212 | return nil, err 213 | } 214 | d.cpus.add(cpu) 215 | return cpu, nil 216 | } 217 | 218 | func (d *cpuDie) getID() uint { 219 | return d.id 220 | } 221 | 222 | type ( 223 | cpuCore struct { 224 | parentDie Die 225 | id uint 226 | cpus CpuList 227 | // an array index pointing to a frequency set 228 | coreType uint 229 | } 230 | Core interface { 231 | topologyTypeObj 232 | typeSetter 233 | } 234 | ) 235 | 236 | func (c *cpuCore) GetType() uint { 237 | return c.coreType 238 | } 239 | 240 | func (c *cpuCore) setType(t uint) { 241 | c.coreType = t 242 | } 243 | 244 | func (c *cpuCore) addCpu(cpuId uint) (Cpu, error) { 245 | cpu, err := newCpu(cpuId, c) 246 | if err != nil { 247 | return nil, err 248 | } 249 | c.cpus.add(cpu) 250 | return cpu, nil 251 | } 252 | 253 | func (c *cpuCore) CPUs() *CpuList { 254 | return &c.cpus 255 | } 256 | 257 | func (c *cpuCore) getID() uint { 258 | return c.id 259 | } 260 | 261 | type packageList map[uint]Package 262 | 263 | type dieList map[uint]Die 264 | 265 | type coreList map[uint]Core 266 | 267 | var discoverTopology = func() (Topology, error) { 268 | numOfCores := getNumberOfCpus() 269 | topology := &cpuTopology{ 270 | allCpus: make(CpuList, numOfCores), 271 | packages: packageList{}, 272 | uncore: defaultUncore, 273 | } 274 | for i := uint(0); i < numOfCores; i++ { 275 | if _, err := topology.addCpu(i); err != nil { 276 | return nil, err 277 | } 278 | } 279 | return topology, nil 280 | } 281 | -------------------------------------------------------------------------------- /pkg/power/topology_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/mock" 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type mockCpuTopology struct { 15 | mock.Mock 16 | } 17 | 18 | func (m *mockCpuTopology) getID() uint { 19 | return m.Called().Get(0).(uint) 20 | } 21 | 22 | func (m *mockCpuTopology) SetUncore(uncore Uncore) error { 23 | return m.Called(uncore).Error(0) 24 | } 25 | 26 | func (m *mockCpuTopology) applyUncore() error { 27 | return m.Called().Error(0) 28 | } 29 | 30 | func (m *mockCpuTopology) getEffectiveUncore() Uncore { 31 | ret := m.Called() 32 | if ret.Get(0) != nil { 33 | return ret.Get(0).(Uncore) 34 | } 35 | return nil 36 | } 37 | 38 | func (m *mockCpuTopology) addCpu(u uint) (Cpu, error) { 39 | ret := m.Called(u) 40 | 41 | var r0 Cpu 42 | var r1 error 43 | 44 | if ret.Get(0) != nil { 45 | r0 = ret.Get(0).(Cpu) 46 | } 47 | r1 = ret.Error(1) 48 | 49 | return r0, r1 50 | } 51 | 52 | func (m *mockCpuTopology) CPUs() *CpuList { 53 | ret := m.Called() 54 | 55 | var r0 *CpuList 56 | if ret.Get(0) != nil { 57 | r0 = ret.Get(0).(*CpuList) 58 | } 59 | 60 | return r0 61 | } 62 | 63 | func (m *mockCpuTopology) Packages() *[]Package { 64 | ret := m.Called() 65 | 66 | var r0 *[]Package 67 | if ret.Get(0) != nil { 68 | r0 = ret.Get(0).(*[]Package) 69 | 70 | } 71 | return r0 72 | } 73 | 74 | func (m *mockCpuTopology) Package(id uint) Package { 75 | ret := m.Called(id) 76 | 77 | var r0 Package 78 | if ret.Get(0) != nil { 79 | r0 = ret.Get(0).(Package) 80 | } 81 | 82 | return r0 83 | } 84 | 85 | type mockCpuPackage struct { 86 | mock.Mock 87 | } 88 | 89 | func (m *mockCpuPackage) getID() uint { 90 | return m.Called().Get(0).(uint) 91 | } 92 | 93 | func (m *mockCpuPackage) SetUncore(uncore Uncore) error { 94 | return m.Called(uncore).Error(0) 95 | } 96 | 97 | func (m *mockCpuPackage) applyUncore() error { 98 | return m.Called().Error(0) 99 | } 100 | 101 | func (m *mockCpuPackage) getEffectiveUncore() Uncore { 102 | ret := m.Called() 103 | if ret.Get(0) != nil { 104 | return ret.Get(0).(Uncore) 105 | } 106 | return nil 107 | } 108 | 109 | func (m *mockCpuPackage) addCpu(u uint) (Cpu, error) { 110 | ret := m.Called(u) 111 | 112 | var r0 Cpu 113 | var r1 error 114 | 115 | if ret.Get(0) != nil { 116 | r0 = ret.Get(0).(Cpu) 117 | } 118 | r1 = ret.Error(1) 119 | 120 | return r0, r1 121 | } 122 | 123 | func (m *mockCpuPackage) CPUs() *CpuList { 124 | ret := m.Called() 125 | 126 | var r0 *CpuList 127 | if ret.Get(0) != nil { 128 | r0 = ret.Get(0).(*CpuList) 129 | } 130 | 131 | return r0 132 | } 133 | 134 | func (m *mockCpuPackage) Dies() *[]Die { 135 | ret := m.Called() 136 | 137 | var r0 *[]Die 138 | if ret.Get(0) != nil { 139 | r0 = ret.Get(0).(*[]Die) 140 | 141 | } 142 | return r0 143 | } 144 | 145 | func (m *mockCpuPackage) Die(id uint) Die { 146 | ret := m.Called(id) 147 | 148 | var r0 Die 149 | if ret.Get(0) != nil { 150 | r0 = ret.Get(0).(Die) 151 | } 152 | 153 | return r0 154 | } 155 | 156 | type mockCpuDie struct { 157 | mock.Mock 158 | } 159 | 160 | func (m *mockCpuDie) getID() uint { 161 | return m.Called().Get(0).(uint) 162 | } 163 | 164 | func (m *mockCpuDie) SetUncore(uncore Uncore) error { 165 | return m.Called(uncore).Error(0) 166 | } 167 | 168 | func (m *mockCpuDie) applyUncore() error { 169 | return m.Called().Error(0) 170 | } 171 | 172 | func (m *mockCpuDie) getEffectiveUncore() Uncore { 173 | ret := m.Called() 174 | if ret.Get(0) != nil { 175 | return ret.Get(0).(Uncore) 176 | } 177 | return nil 178 | } 179 | 180 | func (m *mockCpuDie) addCpu(u uint) (Cpu, error) { 181 | ret := m.Called(u) 182 | 183 | var r0 Cpu 184 | var r1 error 185 | 186 | if ret.Get(0) != nil { 187 | r0 = ret.Get(0).(Cpu) 188 | } 189 | r1 = ret.Error(1) 190 | 191 | return r0, r1 192 | } 193 | 194 | func (m *mockCpuDie) CPUs() *CpuList { 195 | ret := m.Called() 196 | 197 | var r0 *CpuList 198 | if ret.Get(0) != nil { 199 | r0 = ret.Get(0).(*CpuList) 200 | } 201 | 202 | return r0 203 | } 204 | 205 | func (m *mockCpuDie) Cores() *[]Core { 206 | ret := m.Called() 207 | 208 | var r0 *[]Core 209 | if ret.Get(0) != nil { 210 | r0 = ret.Get(0).(*[]Core) 211 | 212 | } 213 | return r0 214 | } 215 | 216 | func (m *mockCpuDie) Core(id uint) Core { 217 | ret := m.Called(id) 218 | 219 | var r0 Core 220 | if ret.Get(0) != nil { 221 | r0 = ret.Get(0).(Core) 222 | } 223 | 224 | return r0 225 | } 226 | 227 | type mockCpuCore struct { 228 | mock.Mock 229 | Core 230 | } 231 | 232 | func (m *mockCpuCore) GetType() uint { 233 | return m.Called().Get(0).(uint) 234 | } 235 | 236 | func (m *mockCpuCore) setType(t uint) { 237 | 238 | } 239 | 240 | func (m *mockCpuCore) addCpu(cpuId uint) (Cpu, error) { 241 | ret := m.Called(cpuId) 242 | 243 | var r0 Cpu 244 | var r1 error 245 | 246 | if ret.Get(0) != nil { 247 | r0 = ret.Get(0).(Cpu) 248 | } 249 | r1 = ret.Error(1) 250 | 251 | return r0, r1 252 | } 253 | 254 | func (m *mockCpuCore) CPUs() *CpuList { 255 | ret := m.Called() 256 | 257 | var r0 *CpuList 258 | if ret.Get(0) != nil { 259 | r0 = ret.Get(0).(*CpuList) 260 | 261 | } 262 | return r0 263 | } 264 | 265 | func (m *mockCpuCore) getID() uint { 266 | return m.Called().Get(0).(uint) 267 | } 268 | 269 | func setupTopologyTest(cpufiles map[string]map[string]string) func() { 270 | origBasePath := basePath 271 | basePath = "testing/cpus" 272 | 273 | // backup pointer to function that gets all CPUs 274 | // replace it with our controlled function 275 | origGetNumOfCpusFunc := getNumberOfCpus 276 | getNumberOfCpus = func() uint { return uint(len(cpufiles)) } 277 | 278 | for cpuName, cpuDetails := range cpufiles { 279 | cpudir := filepath.Join(basePath, cpuName) 280 | err := os.MkdirAll(filepath.Join(cpudir, "topology"), os.ModePerm) 281 | if err != nil { 282 | panic(err) 283 | } 284 | err = os.MkdirAll(filepath.Join(cpudir, "cpufreq"), os.ModePerm) 285 | if err != nil { 286 | panic(err) 287 | } 288 | for prop, value := range cpuDetails { 289 | switch prop { 290 | case "pkg": 291 | err := os.WriteFile(filepath.Join(cpudir, packageIdFile), []byte(value+"\n"), 0664) 292 | if err != nil { 293 | panic(err) 294 | } 295 | case "die": 296 | err := os.WriteFile(filepath.Join(cpudir, dieIdFile), []byte(value+"\n"), 0644) 297 | if err != nil { 298 | panic(err) 299 | } 300 | case "core": 301 | err := os.WriteFile(filepath.Join(cpudir, coreIdFile), []byte(value+"\n"), 0644) 302 | if err != nil { 303 | panic(err) 304 | } 305 | case "max": 306 | os.WriteFile(filepath.Join(cpudir, cpuMaxFreqFile), []byte(value+"\n"), 0644) 307 | case "min": 308 | os.WriteFile(filepath.Join(cpudir, cpuMinFreqFile), []byte(value+"\n"), 0644) 309 | } 310 | } 311 | } 312 | return func() { 313 | // wipe created cpus dir 314 | err := os.RemoveAll(strings.Split(basePath, "/")[0]) 315 | if err != nil { 316 | panic(err) 317 | } 318 | // revert cpu /sys path 319 | basePath = origBasePath 320 | // revert get number of system cpus function 321 | getNumberOfCpus = origGetNumOfCpusFunc 322 | } 323 | } 324 | 325 | type topologyTestSuite struct { 326 | suite.Suite 327 | origBasePath string 328 | origGetNumCpus func() uint 329 | origDiscoverTopology func() (Topology, error) 330 | } 331 | 332 | func TestTopologyDiscovery(t *testing.T) { 333 | tstSuite := &topologyTestSuite{ 334 | origBasePath: basePath, 335 | origGetNumCpus: getNumberOfCpus, 336 | origDiscoverTopology: discoverTopology, 337 | } 338 | suite.Run(t, tstSuite) 339 | } 340 | func (s *topologyTestSuite) AfterTest(suiteName, testName string) { 341 | os.RemoveAll(strings.Split(basePath, "/")[0]) 342 | basePath = s.origBasePath 343 | discoverTopology = s.origDiscoverTopology 344 | getNumberOfCpus = s.origGetNumCpus 345 | } 346 | 347 | func (s *topologyTestSuite) TestCpuImpl_discoverTopology() { 348 | t := s.T() 349 | // 2 packages, 1 die, 2 cores, 2 threads, cpus 0,1,4,5 belong to pkg0, 2,3,6,7 to pkg1, 4-7 are hyperthread cpus 350 | teardown := setupTopologyTest(map[string]map[string]string{ 351 | "cpu0": { 352 | "pkg": "0", 353 | "die": "0", 354 | "core": "0", 355 | "max": "900000", 356 | "min": "10000", 357 | }, 358 | "cpu1": { 359 | "pkg": "0", 360 | "die": "0", 361 | "core": "1", 362 | "max": "900000", 363 | "min": "10000", 364 | }, 365 | "cpu2": { 366 | "pkg": "1", 367 | "die": "0", 368 | "core": "0", 369 | "max": "900000", 370 | "min": "10000", 371 | }, 372 | "cpu3": { 373 | "pkg": "1", 374 | "die": "0", 375 | "core": "1", 376 | "max": "900000", 377 | "min": "10000", 378 | }, 379 | "cpu4": { 380 | "pkg": "0", 381 | "die": "0", 382 | "core": "0", 383 | "max": "500000", 384 | "min": "10000", 385 | }, 386 | "cpu5": { 387 | "pkg": "0", 388 | "die": "0", 389 | "core": "1", 390 | "max": "500000", 391 | "min": "10000", 392 | }, 393 | "cpu6": { 394 | "pkg": "1", 395 | "die": "0", 396 | "core": "0", 397 | "max": "500000", 398 | "min": "10000", 399 | }, 400 | "cpu7": { 401 | "pkg": "1", 402 | "die": "0", 403 | "core": "1", 404 | "max": "500000", 405 | "min": "10000", 406 | }, 407 | }) 408 | defer teardown() 409 | 410 | topology, err := discoverTopology() 411 | assert.NoError(t, err) 412 | topologyObj := topology.(*cpuTopology) 413 | 414 | assert.Len(t, topologyObj.packages, 2) 415 | assert.Len(t, topologyObj.allCpus, 8) 416 | assert.ElementsMatch(t, topologyObj.allCpus.IDs(), []uint{0, 1, 2, 3, 4, 5, 6, 7}) 417 | assert.Equal(t, topologyObj.packages[0].(*cpuPackage).id, uint(0)) 418 | assert.Equal(t, topologyObj.packages[1].(*cpuPackage).id, uint(1)) 419 | 420 | assert.Len(t, topologyObj.packages[0].(*cpuPackage).dies, 1) 421 | assert.Len(t, topologyObj.packages[1].(*cpuPackage).dies, 1) 422 | assert.NotEqual(t, topologyObj.packages[0].(*cpuPackage).dies[0], topologyObj.packages[1].(*cpuPackage).dies[0]) 423 | assert.ElementsMatch(t, topologyObj.packages[0].(*cpuPackage).cpus.IDs(), []uint{0, 1, 4, 5}) 424 | assert.ElementsMatch(t, topologyObj.packages[1].(*cpuPackage).cpus.IDs(), []uint{2, 3, 6, 7}) 425 | // only one die per pkg so pkg cpus == die cpus 426 | assert.ElementsMatch(t, topologyObj.packages[0].(*cpuPackage).dies[0].(*cpuDie).cpus, topologyObj.packages[0].(*cpuPackage).cpus) 427 | assert.ElementsMatch(t, topologyObj.packages[1].(*cpuPackage).dies[0].(*cpuDie).cpus, topologyObj.packages[1].(*cpuPackage).cpus) 428 | 429 | // emulate hyperthreading enabled so 2 cpus/threads per physical core 430 | // without hyperthreading we expect one thread per core 431 | assert.Len(t, topologyObj.packages[0].(*cpuPackage).dies[0].(*cpuDie).cores, 2) 432 | assert.Len(t, topologyObj.packages[1].(*cpuPackage).dies[0].(*cpuDie).cores, 2) 433 | 434 | assert.Len(t, topologyObj.packages[0].(*cpuPackage).dies[0].(*cpuDie).cpus, 4) 435 | assert.Len(t, topologyObj.packages[1].(*cpuPackage).dies[0].(*cpuDie).cpus, 4) 436 | 437 | assert.ElementsMatch(t, topologyObj.packages[0].(*cpuPackage).dies[0].(*cpuDie).cores[0].(*cpuCore).cpus.IDs(), []uint{0, 4}) 438 | assert.ElementsMatch(t, topologyObj.packages[0].(*cpuPackage).dies[0].(*cpuDie).cores[1].(*cpuCore).cpus.IDs(), []uint{1, 5}) 439 | assert.ElementsMatch(t, topologyObj.packages[1].(*cpuPackage).dies[0].(*cpuDie).cores[0].(*cpuCore).cpus.IDs(), []uint{2, 6}) 440 | assert.ElementsMatch(t, topologyObj.packages[1].(*cpuPackage).dies[0].(*cpuDie).cores[1].(*cpuCore).cpus.IDs(), []uint{3, 7}) 441 | } 442 | 443 | func (s *topologyTestSuite) TestSystemTopology_Getters() { 444 | cpus := make(CpuList, 2) 445 | cpus[0] = new(cpuMock) 446 | cpus[1] = new(cpuMock) 447 | 448 | pkgs := packageList{ 449 | 0: &cpuPackage{}, 450 | 1: &cpuPackage{}, 451 | } 452 | 453 | topo := &cpuTopology{ 454 | packages: pkgs, 455 | allCpus: cpus, 456 | } 457 | 458 | assert.ElementsMatch(s.T(), *topo.CPUs(), cpus) 459 | assert.ElementsMatch(s.T(), *topo.Packages(), []Package{pkgs[0], pkgs[1]}) 460 | assert.Equal(s.T(), topo.Package(1), pkgs[1]) 461 | assert.Nil(s.T(), topo.Package(6)) 462 | } 463 | func (s *topologyTestSuite) TestSystemTopology_addCpu() { 464 | defer setupTopologyTest(map[string]map[string]string{})() 465 | // fail to read fs 466 | topo := &cpuTopology{ 467 | packages: packageList{}, 468 | allCpus: make(CpuList, 1), 469 | } 470 | cpu, err := topo.addCpu(0) 471 | assert.Error(s.T(), err) 472 | assert.Nil(s.T(), cpu) 473 | } 474 | 475 | func (s *topologyTestSuite) TestCpuPackage_Getters() { 476 | cpus := make(CpuList, 2) 477 | cpus[0] = new(cpuMock) 478 | cpus[1] = new(cpuMock) 479 | 480 | dice := dieList{ 481 | 0: &cpuDie{}, 482 | 1: &cpuDie{}, 483 | } 484 | 485 | pkg := &cpuPackage{ 486 | dies: dice, 487 | cpus: cpus, 488 | } 489 | 490 | assert.ElementsMatch(s.T(), *pkg.CPUs(), cpus) 491 | assert.ElementsMatch(s.T(), *pkg.Dies(), []Die{dice[0], dice[1]}) 492 | assert.Equal(s.T(), pkg.Die(1), dice[1]) 493 | assert.Nil(s.T(), pkg.Die(6)) 494 | } 495 | func (s *topologyTestSuite) TestCpuPackage_addCpu() { 496 | defer setupTopologyTest(map[string]map[string]string{})() 497 | // fail to read fs 498 | pkg := &cpuPackage{ 499 | dies: dieList{}, 500 | cpus: make(CpuList, 1), 501 | } 502 | cpu, err := pkg.addCpu(0) 503 | assert.Error(s.T(), err) 504 | assert.Nil(s.T(), cpu) 505 | } 506 | 507 | func (s *topologyTestSuite) TestCpuDie_Getters() { 508 | cpus := make(CpuList, 2) 509 | cpus[0] = new(cpuMock) 510 | cpus[1] = new(cpuMock) 511 | 512 | cores := coreList{ 513 | 0: &cpuCore{}, 514 | 1: &cpuCore{}, 515 | } 516 | 517 | die := &cpuDie{ 518 | cores: cores, 519 | cpus: cpus, 520 | } 521 | 522 | assert.ElementsMatch(s.T(), *die.CPUs(), cpus) 523 | assert.ElementsMatch(s.T(), *die.Cores(), []Core{cores[0], cores[1]}) 524 | assert.Equal(s.T(), die.Core(1), cores[1]) 525 | assert.Nil(s.T(), die.Core(6)) 526 | } 527 | func (s *topologyTestSuite) TestCpuDie_addCpu() { 528 | defer setupTopologyTest(map[string]map[string]string{})() 529 | // fail to read fs 530 | pkg := &cpuPackage{ 531 | dies: dieList{}, 532 | cpus: make(CpuList, 1), 533 | } 534 | cpu, err := pkg.addCpu(0) 535 | assert.Error(s.T(), err) 536 | assert.Nil(s.T(), cpu) 537 | } 538 | 539 | func (s *topologyTestSuite) TestCpuCore_Getters() { 540 | cpus := make(CpuList, 2) 541 | cpus[0] = new(cpuMock) 542 | cpus[1] = new(cpuMock) 543 | 544 | core := &cpuCore{ 545 | cpus: cpus, 546 | } 547 | 548 | assert.ElementsMatch(s.T(), *core.CPUs(), cpus) 549 | } 550 | -------------------------------------------------------------------------------- /pkg/power/uncore.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "path" 8 | "strings" 9 | ) 10 | 11 | const ( 12 | uncoreKmodName = "intel_uncore_frequency" 13 | uncoreDirName = "intel_uncore_frequency" 14 | 15 | uncorePathFmt = uncoreDirName + "/package_%02d_die_%02d" 16 | uncoreInitMaxFreqFile = "initial_max_freq_khz" 17 | uncoreInitMinFreqFile = "initial_min_freq_khz" 18 | uncoreMaxFreqFile = "max_freq_khz" 19 | uncoreMinFreqFile = "min_freq_khz" 20 | ) 21 | 22 | type ( 23 | uncoreFreq struct { 24 | min uint 25 | max uint 26 | } 27 | Uncore interface { 28 | write(pkgID, dieID uint) error 29 | } 30 | ) 31 | 32 | func NewUncore(minFreq uint, maxFreq uint) (Uncore, error) { 33 | if !featureList.isFeatureIdSupported(UncoreFeature) { 34 | return nil, featureList.getFeatureIdError(UncoreFeature) 35 | } 36 | if minFreq < defaultUncore.min { 37 | return nil, fmt.Errorf("specified Min frequency is lower than %d kHZ allowed by the hardware", defaultUncore.min) 38 | } 39 | if maxFreq > defaultUncore.max { 40 | return nil, fmt.Errorf("specified Max frequency is higher than %d kHz allowed by the hardware", defaultUncore.max) 41 | } 42 | if maxFreq < minFreq { 43 | return nil, fmt.Errorf("max freq cannot be lower than min") 44 | } 45 | 46 | normalizedMin := normalizeUncoreFreq(minFreq) 47 | normalizedMax := normalizeUncoreFreq(maxFreq) 48 | if normalizedMin != minFreq { 49 | log.Info("Uncore Min Frequency was normalized due to driver requirements", "requested", minFreq, "normalized", normalizedMin) 50 | } 51 | if normalizedMax != maxFreq { 52 | log.Info("Uncore Max Frequency was normalized due to driver requirements", "requested", maxFreq, "normalized", normalizedMax) 53 | } 54 | return &uncoreFreq{min: normalizedMin, max: normalizedMax}, nil 55 | } 56 | 57 | func (u *uncoreFreq) write(pkgId, dieId uint) error { 58 | if err := os.WriteFile( 59 | path.Join(basePath, fmt.Sprintf(uncorePathFmt, pkgId, dieId), uncoreMaxFreqFile), 60 | []byte(fmt.Sprint(u.max)), 61 | 0644, 62 | ); err != nil { 63 | return err 64 | } 65 | if err := os.WriteFile( 66 | path.Join(basePath, fmt.Sprintf(uncorePathFmt, pkgId, dieId), uncoreMinFreqFile), 67 | []byte(fmt.Sprint(u.min)), 68 | 0644, 69 | ); err != nil { 70 | return err 71 | } 72 | return nil 73 | } 74 | 75 | var ( 76 | defaultUncore = &uncoreFreq{} 77 | kernelModulesFilePath = "/proc/modules" 78 | ) 79 | 80 | func initUncore() featureStatus { 81 | feature := featureStatus{ 82 | name: "Uncore frequency", 83 | driver: "N/A", 84 | initFunc: initUncore, 85 | } 86 | 87 | if !checkKernelModuleLoaded(uncoreKmodName) { 88 | feature.err = fmt.Errorf("uncore feature error: %w", fmt.Errorf("kernel module %s not loaded", uncoreKmodName)) 89 | return feature 90 | } 91 | uncoreDirPath := path.Join(basePath, uncoreDirName) 92 | uncoreDir, err := os.OpenFile(uncoreDirPath, os.O_RDONLY, 0) 93 | if err != nil { 94 | feature.err = fmt.Errorf("uncore feature error: %w", err) 95 | return feature 96 | } 97 | if _, err := uncoreDir.Readdirnames(1); err != nil { 98 | feature.err = fmt.Errorf("uncore feature error: %w", fmt.Errorf("uncore interace dir empty or invalid: %w", err)) 99 | return feature 100 | } 101 | 102 | if value, err := readUncoreProperty(0, 0, uncoreInitMaxFreqFile); err != nil { 103 | feature.err = fmt.Errorf("uncore feature error %w", fmt.Errorf("failed to determine init freq: %w", err)) 104 | return feature 105 | } else { 106 | defaultUncore.max = value 107 | } 108 | if value, err := readUncoreProperty(0, 0, uncoreInitMinFreqFile); err != nil { 109 | feature.err = fmt.Errorf("uncore feature error %w", fmt.Errorf("failed to determine init freq: %w", err)) 110 | return feature 111 | } else { 112 | defaultUncore.min = value 113 | } 114 | 115 | return feature 116 | } 117 | 118 | func checkKernelModuleLoaded(module string) bool { 119 | modulesFile, err := os.Open(kernelModulesFilePath) 120 | if err != nil { 121 | return false 122 | } 123 | defer modulesFile.Close() 124 | 125 | reader := bufio.NewScanner(modulesFile) 126 | for reader.Scan() { 127 | if strings.Contains(reader.Text(), module) { 128 | return true 129 | } 130 | } 131 | return false 132 | } 133 | 134 | type hasUncore interface { 135 | SetUncore(uncore Uncore) error 136 | applyUncore() error 137 | getEffectiveUncore() Uncore 138 | } 139 | 140 | func (s *cpuTopology) SetUncore(uncore Uncore) error { 141 | s.uncore = uncore 142 | return s.applyUncore() 143 | } 144 | 145 | func (s *cpuTopology) getEffectiveUncore() Uncore { 146 | if s.uncore == nil { 147 | return defaultUncore 148 | } 149 | return s.uncore 150 | } 151 | func (s *cpuTopology) applyUncore() error { 152 | for _, pkg := range s.packages { 153 | if err := pkg.applyUncore(); err != nil { 154 | return err 155 | } 156 | } 157 | return nil 158 | } 159 | func (c *cpuPackage) SetUncore(uncore Uncore) error { 160 | c.uncore = uncore 161 | return c.applyUncore() 162 | } 163 | 164 | func (c *cpuPackage) applyUncore() error { 165 | for _, die := range c.dies { 166 | if err := die.applyUncore(); err != nil { 167 | return err 168 | } 169 | } 170 | return nil 171 | } 172 | 173 | func (c *cpuPackage) getEffectiveUncore() Uncore { 174 | if c.uncore != nil { 175 | return c.uncore 176 | } 177 | return c.topology.getEffectiveUncore() 178 | } 179 | 180 | func (d *cpuDie) SetUncore(uncore Uncore) error { 181 | d.uncore = uncore 182 | return d.applyUncore() 183 | } 184 | 185 | func (d *cpuDie) applyUncore() error { 186 | return d.getEffectiveUncore().write(d.parentSocket.getID(), d.id) 187 | } 188 | 189 | func (d *cpuDie) getEffectiveUncore() Uncore { 190 | if d.uncore != nil { 191 | return d.uncore 192 | } 193 | return d.parentSocket.getEffectiveUncore() 194 | } 195 | 196 | func readUncoreProperty(pkgID, dieID uint, property string) (uint, error) { 197 | fullPath := path.Join(basePath, fmt.Sprintf(uncorePathFmt, pkgID, dieID), property) 198 | return readUintFromFile(fullPath) 199 | } 200 | 201 | func normalizeUncoreFreq(freq uint) uint { 202 | return freq - (freq % uint(100_000)) 203 | } 204 | -------------------------------------------------------------------------------- /pkg/power/uncore_test.go: -------------------------------------------------------------------------------- 1 | package power 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | type mockUncore struct { 16 | mock.Mock 17 | } 18 | 19 | func (m *mockUncore) write(pkIgD, dieID uint) error { 20 | return m.Called(pkIgD, dieID).Error(0) 21 | } 22 | 23 | func setupUncoreTests(files map[string]map[string]string, modulesFileContent string) func() { 24 | origBasePath := basePath 25 | basePath = "testing/cpus" 26 | 27 | origModulesFile := kernelModulesFilePath 28 | kernelModulesFilePath = basePath + "/kernelModules" 29 | 30 | featureList[UncoreFeature].err = nil 31 | 32 | if err := os.MkdirAll(filepath.Join(basePath, uncoreDirName), os.ModePerm); err != nil { 33 | panic(err) 34 | } 35 | 36 | if modulesFileContent != "" { 37 | if err := os.WriteFile(kernelModulesFilePath, []byte(modulesFileContent), 0644); err != nil { 38 | panic(err) 39 | } 40 | } 41 | 42 | for pkgDie, freqFiles := range files { 43 | pkgUncoreDir := filepath.Join(basePath, uncoreDirName, pkgDie) 44 | if err := os.MkdirAll(filepath.Join(pkgUncoreDir), os.ModePerm); err != nil { 45 | panic(err) 46 | } 47 | for file, value := range freqFiles { 48 | switch file { 49 | case "initMax": 50 | if err := os.WriteFile(path.Join(pkgUncoreDir, uncoreInitMaxFreqFile), []byte(value), 0644); err != nil { 51 | panic(err) 52 | } 53 | case "initMin": 54 | if err := os.WriteFile(path.Join(pkgUncoreDir, uncoreInitMinFreqFile), []byte(value), 0644); err != nil { 55 | panic(err) 56 | } 57 | case "Max": 58 | if err := os.WriteFile(path.Join(pkgUncoreDir, uncoreMaxFreqFile), []byte(value), 0644); err != nil { 59 | panic(err) 60 | } 61 | case "Min": 62 | if err := os.WriteFile(path.Join(pkgUncoreDir, uncoreMinFreqFile), []byte(value), 0644); err != nil { 63 | panic(err) 64 | } 65 | } 66 | } 67 | } 68 | return func() { 69 | if err := os.RemoveAll(strings.Split(basePath, "/")[0]); err != nil { 70 | panic(err) 71 | } 72 | featureList[UncoreFeature].err = uninitialisedErr 73 | kernelModulesFilePath = origModulesFile 74 | basePath = origBasePath 75 | 76 | defaultUncore = &uncoreFreq{} 77 | } 78 | } 79 | func Test_initUncore(t *testing.T) { 80 | var feature featureStatus 81 | var teardown func() 82 | teardown = setupUncoreTests(map[string]map[string]string{ 83 | "package_00_die_00": { 84 | "initMax": "999", 85 | "initMin": "100", 86 | }, 87 | }, 88 | "intel_cstates 14 0 - Live 0000ffffad212d\n"+ 89 | uncoreKmodName+" 324 0 - Live 0000ffff3ea334\n"+ 90 | "rtscan 2342 0 -Live 0000ffff234ab4d", 91 | ) 92 | defer teardown() 93 | // happy path 94 | feature = initUncore() 95 | 96 | assert.Equal(t, "Uncore frequency", feature.name) 97 | assert.Equal(t, "N/A", feature.driver) 98 | 99 | assert.NoError(t, feature.err) 100 | assert.Equal(t, uint(999), defaultUncore.max) 101 | assert.Equal(t, uint(100), defaultUncore.min) 102 | teardown() 103 | 104 | // module not loaded 105 | teardown = setupUncoreTests(map[string]map[string]string{}, 106 | "intel_cstates 14 0 - Live 0000ffffad212d\n"+ 107 | "rtscan 2342 0 -Live 0000ffff234ab4d", 108 | ) 109 | feature = initUncore() 110 | assert.ErrorContains(t, feature.err, "not loaded") 111 | teardown() 112 | 113 | // no dies to manage 114 | teardown = setupUncoreTests(map[string]map[string]string{}, 115 | "intel_cstates 14 0 - Live 0000ffffad212d\n"+ 116 | uncoreKmodName+" 324 0 - Live 0000ffff3ea334\n"+ 117 | "rtscan 2342 0 -Live 0000ffff234ab4d", 118 | ) 119 | feature = initUncore() 120 | assert.ErrorContains(t, feature.err, "empty or invalid") 121 | teardown() 122 | 123 | // cant read init freqs 124 | teardown = setupUncoreTests(map[string]map[string]string{ 125 | "package_00_die_00": {}, 126 | }, 127 | "intel_cstates 14 0 - Live 0000ffffad212d\n"+ 128 | uncoreKmodName+" 324 0 - Live 0000ffff3ea334\n"+ 129 | "rtscan 2342 0 -Live 0000ffff234ab4d", 130 | ) 131 | feature = initUncore() 132 | assert.ErrorContains(t, feature.err, "failed to determine init freq") 133 | teardown() 134 | } 135 | 136 | func TestNewUncore(t *testing.T) { 137 | var ucre Uncore 138 | var err error 139 | defer setupUncoreTests(map[string]map[string]string{}, "")() 140 | 141 | // happy path 142 | defaultUncore.min = 1_200_000 143 | defaultUncore.max = 2_400_000 144 | 145 | ucre, err = NewUncore(1_400_000, 2_200_000) 146 | assert.NoError(t, err) 147 | assert.Equal(t, uint(1_400_000), ucre.(*uncoreFreq).min) 148 | assert.Equal(t, uint(2_200_000), ucre.(*uncoreFreq).max) 149 | 150 | // max too high 151 | ucre, err = NewUncore(1_400_000, 9999999) 152 | assert.Nil(t, ucre) 153 | assert.ErrorContains(t, err, "Max frequency is higher than") 154 | 155 | // min too low 156 | ucre, err = NewUncore(100, 2_200_000) 157 | assert.Nil(t, ucre) 158 | assert.ErrorContains(t, err, "Min frequency is lower than") 159 | 160 | //uncore not supported 161 | featureList[UncoreFeature].err = fmt.Errorf("uncore borked") 162 | ucre, err = NewUncore(1_400_000, 2_200_000) 163 | assert.ErrorIs(t, err, featureList[UncoreFeature].err) 164 | } 165 | 166 | func TestUncoreFreq_write(t *testing.T) { 167 | defer setupUncoreTests(map[string]map[string]string{ 168 | "package_00_die_00": { 169 | "Max": "999", 170 | "Min": "100", 171 | }, 172 | "package_01_die_00": { 173 | "Max": "999", 174 | "Min": "100", 175 | }, 176 | }, "")() 177 | 178 | uncore := uncoreFreq{min: 1, max: 9323} 179 | err := uncore.write(1, 0) 180 | assert.NoError(t, err) 181 | 182 | value, _ := readUncoreProperty(1, 0, uncoreMinFreqFile) 183 | assert.Equal(t, uint(1), value) 184 | 185 | value, _ = readUncoreProperty(1, 0, uncoreMaxFreqFile) 186 | assert.Equal(t, uint(9323), value) 187 | 188 | // write to non-existing file 189 | err = uncore.write(2, 3) 190 | assert.ErrorContains(t, err, "no such file or directory") 191 | } 192 | 193 | func TestCpuTopology_SetUncoreFrequency(t *testing.T) { 194 | uncore := &uncoreFreq{} 195 | pkg1 := new(mockCpuPackage) 196 | pkg1.On("applyUncore").Return(nil) 197 | topo := cpuTopology{ 198 | packages: packageList{0: pkg1}, 199 | } 200 | 201 | assert.NoError(t, topo.SetUncore(uncore)) 202 | assert.Equal(t, uncore, topo.uncore) 203 | pkg1.AssertExpectations(t) 204 | } 205 | 206 | func TestCpuTopology_applyUncore(t *testing.T) { 207 | pkg1 := new(mockCpuPackage) 208 | pkg1.On("applyUncore").Return(nil) 209 | pkg2 := new(mockCpuPackage) 210 | pkg2.On("applyUncore").Return(nil) 211 | 212 | topo := &cpuTopology{packages: packageList{0: pkg1, 1: pkg2}} 213 | assert.NoError(t, topo.applyUncore()) 214 | pkg1.AssertExpectations(t) 215 | pkg2.AssertExpectations(t) 216 | 217 | toRetErr := fmt.Errorf("scuffed") 218 | pkg3 := new(mockCpuPackage) 219 | pkg3.On("applyUncore").Return(toRetErr) 220 | topo = &cpuTopology{packages: packageList{42: pkg3}} 221 | assert.ErrorIs(t, topo.applyUncore(), toRetErr) 222 | } 223 | 224 | func TestCpuTopology_GetEffectiveUncore(t *testing.T) { 225 | uncore := new(mockUncore) 226 | topo := &cpuTopology{uncore: uncore} 227 | 228 | assert.Equal(t, uncore, topo.getEffectiveUncore()) 229 | 230 | topo.uncore = nil 231 | assert.Equal(t, defaultUncore, topo.getEffectiveUncore()) 232 | } 233 | 234 | func TestCpuPackage_SetUncoreFrequency(t *testing.T) { 235 | uncore := &uncoreFreq{} 236 | die := new(mockCpuDie) 237 | die.On("applyUncore").Return(nil) 238 | pkg := cpuPackage{ 239 | dies: dieList{0: die}, 240 | } 241 | 242 | assert.NoError(t, pkg.SetUncore(uncore)) 243 | assert.Equal(t, uncore, pkg.uncore) 244 | die.AssertExpectations(t) 245 | } 246 | 247 | func TestCpuPackage_applyUncore(t *testing.T) { 248 | die1 := new(mockCpuDie) 249 | die1.On("applyUncore").Return(nil) 250 | die2 := new(mockCpuDie) 251 | die2.On("applyUncore").Return(nil) 252 | 253 | pkg := &cpuPackage{dies: dieList{0: die1, 1: die2}} 254 | assert.NoError(t, pkg.applyUncore()) 255 | die1.AssertExpectations(t) 256 | die2.AssertExpectations(t) 257 | 258 | toRetErr := fmt.Errorf("scuffed") 259 | die3 := new(mockCpuDie) 260 | die3.On("applyUncore").Return(toRetErr) 261 | pkg = &cpuPackage{dies: dieList{42: die3}} 262 | assert.ErrorIs(t, pkg.applyUncore(), toRetErr) 263 | } 264 | 265 | func TestCpuPackage_getEffectiveUncore(t *testing.T) { 266 | topo := new(mockCpuTopology) 267 | uncore := new(mockUncore) 268 | pkg := &cpuPackage{ 269 | topology: topo, 270 | uncore: uncore, 271 | } 272 | topo.AssertNotCalled(t, "getEffectiveUncore") 273 | assert.Equal(t, uncore, pkg.getEffectiveUncore()) 274 | 275 | topo = new(mockCpuTopology) 276 | uncore = new(mockUncore) 277 | topo.On("getEffectiveUncore").Return(uncore) 278 | pkg = &cpuPackage{topology: topo} 279 | assert.Equal(t, uncore, pkg.getEffectiveUncore()) 280 | topo.AssertExpectations(t) 281 | } 282 | 283 | func TestCpuDie_SetUncoreFrequency(t *testing.T) { 284 | uncore := new(mockUncore) 285 | uncore.On("write", uint(1), uint(0)).Return(nil) 286 | 287 | pkg := new(mockCpuPackage) 288 | pkg.On("getID").Return(uint(1)) 289 | 290 | die := &cpuDie{ 291 | parentSocket: pkg, 292 | id: 0, 293 | } 294 | 295 | assert.NoError(t, die.SetUncore(uncore)) 296 | 297 | assert.Equal(t, uncore, die.uncore) 298 | pkg.AssertExpectations(t) 299 | uncore.AssertExpectations(t) 300 | } 301 | 302 | func TestCpuDie_getEffectiveUncore(t *testing.T) { 303 | pkg := new(mockCpuPackage) 304 | uncore := new(mockUncore) 305 | die := &cpuDie{ 306 | parentSocket: pkg, 307 | uncore: uncore, 308 | } 309 | pkg.AssertNotCalled(t, "getEffectiveUncore") 310 | assert.Equal(t, uncore, die.getEffectiveUncore()) 311 | 312 | pkg = new(mockCpuPackage) 313 | uncore = new(mockUncore) 314 | pkg.On("getEffectiveUncore").Return(uncore) 315 | die = &cpuDie{parentSocket: pkg} 316 | assert.Equal(t, uncore, die.getEffectiveUncore()) 317 | pkg.AssertExpectations(t) 318 | } 319 | 320 | func TestCpuDie_applyUncore(t *testing.T) { 321 | uncore := new(mockUncore) 322 | uncore.On("write", uint(2), uint(2)).Return(nil) 323 | 324 | pkg := new(mockCpuPackage) 325 | pkg.On("getID").Return(uint(2)) 326 | 327 | die := &cpuDie{ 328 | parentSocket: pkg, 329 | id: 2, 330 | uncore: uncore, 331 | } 332 | 333 | assert.NoError(t, die.applyUncore()) 334 | 335 | pkg.AssertExpectations(t) 336 | uncore.AssertExpectations(t) 337 | 338 | //error writing 339 | uncore = new(mockUncore) 340 | expectedErr := fmt.Errorf("") 341 | uncore.On("write", uint(2), uint(2)).Return(expectedErr) 342 | 343 | pkg = new(mockCpuPackage) 344 | pkg.On("getID").Return(uint(2)) 345 | 346 | die = &cpuDie{ 347 | parentSocket: pkg, 348 | id: 2, 349 | uncore: uncore, 350 | } 351 | 352 | assert.ErrorIs(t, die.applyUncore(), expectedErr) 353 | 354 | pkg.AssertExpectations(t) 355 | uncore.AssertExpectations(t) 356 | } 357 | 358 | func TestNormalizeUncoreFrequency(t *testing.T) { 359 | assert.Equal(t, uint(1_500_000), normalizeUncoreFreq(1_511_111)) 360 | assert.Equal(t, uint(1_500_000), normalizeUncoreFreq(1_500_000)) 361 | assert.Equal(t, uint(0), normalizeUncoreFreq(12)) 362 | assert.Equal(t, uint(1_100_000), normalizeUncoreFreq(1_100_001)) 363 | } 364 | --------------------------------------------------------------------------------