├── .gitignore ├── CODE-OF-CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.txt ├── NOTICE.txt ├── README.md ├── Roadmap ├── cleanup.sh ├── cmd └── kbench.go ├── config ├── cp_heavy_12client │ └── config.json ├── cp_heavy_8client │ └── config.json ├── cp_light_1client │ └── config.json ├── cp_light_4client │ └── config.json ├── cp_other_resources │ └── pv_pvc │ │ ├── pv.yaml │ │ ├── pv_config.json │ │ ├── pvc.yaml │ │ └── pvc_config.json ├── default │ ├── config.json │ ├── create-4-rc-list-matchgoroutine-demo.json │ ├── deployment.yaml │ ├── docker-compose.yml │ ├── docker-compose1.yml │ ├── download-files.json │ ├── kompose-test-config.json │ ├── matchgoroutine-matchoperation-example.json │ ├── pod.yaml │ ├── replication_controller.yaml │ └── service.yaml ├── dp_fio │ ├── README │ ├── config.json │ ├── describe_fio.sh │ ├── fio_pod.yaml │ ├── fio_pvc.yaml │ ├── get_fio.sh │ └── logs_fio.sh ├── dp_netperf_internode │ ├── README │ ├── config.json │ ├── describe_netperf.sh │ ├── get_netperf.sh │ ├── logs_netperf.sh │ ├── netperf_client_pod.yaml │ ├── netperf_server_pod.yaml │ └── netperf_service.yaml ├── dp_network_internode │ ├── README │ ├── config.json │ ├── describe_netperf.sh │ ├── get_netperf.sh │ ├── iperf_client_pod.yaml │ ├── iperf_server_pod.yaml │ ├── iperf_service.yaml │ ├── logs_netperf.sh │ └── wcp-netperf-3iperfstreams-config.json ├── dp_network_interzone │ ├── README │ ├── config.json │ ├── describe_netperf.sh │ ├── get_netperf.sh │ ├── iperf_client_pod.yaml │ ├── iperf_server_pod.yaml │ ├── iperf_service.yaml │ ├── logs_netperf.sh │ └── wcp-netperf-3iperfstreams-config.json ├── dp_network_intranode │ ├── README │ ├── config.json │ ├── describe_netperf.sh │ ├── get_netperf.sh │ ├── iperf_client_pod.yaml │ ├── iperf_server_pod.yaml │ ├── iperf_service.yaml │ ├── logs_netperf.sh │ └── wcp-netperf-3iperfstreams-config.json ├── dp_redis │ ├── README │ ├── config.json │ ├── describe_redis.sh │ ├── get_redis.sh │ ├── logs_redis.sh │ └── redis_pod.yaml ├── dp_redis_density │ ├── README │ ├── aggregate_results.sh │ ├── config.json │ ├── describe_redis.sh │ ├── get_redis.sh │ ├── logs_redis.sh │ └── redis_pod.yaml ├── dp_redis_service │ ├── README │ ├── config.json │ ├── describe_redis.sh │ ├── get_redis.sh │ ├── logs_redis.sh │ ├── redis_client_pod.yaml │ ├── redis_deployment.yaml │ └── redis_service.yaml └── predicate_example │ ├── command_in_container_predicate.json │ ├── command_outside_container_and_resource_predicate.json │ ├── group_version_resource_predicate.json │ ├── labelled_resources_predicate.json │ └── simple_resource_predicate.json ├── documentation └── kbench-overview.jpg ├── go.mod ├── go.sum ├── infrastructure ├── gke │ └── type.go └── vmware │ ├── type.go │ └── vmw_client.go ├── install.sh ├── manager ├── deployment_manager.go ├── manager.go ├── namespace_manager.go ├── pod_manager.go ├── replicationcontroller_manager.go ├── resource_manager.go ├── service_manager.go └── statefulset_manager.go ├── perf_util ├── perf_data.go └── perf_util.go ├── pkg ├── prometheus │ ├── README.md │ ├── manifests │ │ ├── alertmanager-alertmanager.yaml │ │ ├── alertmanager-secret.yaml │ │ ├── alertmanager-service.yaml │ │ ├── alertmanager-serviceAccount.yaml │ │ ├── alertmanager-serviceMonitor.yaml │ │ ├── grafana-dashboardDatasources.yaml │ │ ├── grafana-dashboardDefinitions.yaml │ │ ├── grafana-dashboardSources.yaml │ │ ├── grafana-deployment.yaml │ │ ├── grafana-service.yaml │ │ ├── grafana-serviceAccount.yaml │ │ ├── grafana-serviceMonitor.yaml │ │ ├── kube-state-metrics-clusterRole.yaml │ │ ├── kube-state-metrics-clusterRoleBinding.yaml │ │ ├── kube-state-metrics-deployment.yaml │ │ ├── kube-state-metrics-service.yaml │ │ ├── kube-state-metrics-serviceAccount.yaml │ │ ├── kube-state-metrics-serviceMonitor.yaml │ │ ├── node-exporter-clusterRole.yaml │ │ ├── node-exporter-clusterRoleBinding.yaml │ │ ├── node-exporter-daemonset.yaml │ │ ├── node-exporter-service.yaml │ │ ├── node-exporter-serviceAccount.yaml │ │ ├── node-exporter-serviceMonitor.yaml │ │ ├── prometheus-adapter-apiService.yaml │ │ ├── prometheus-adapter-clusterRole.yaml │ │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ │ ├── prometheus-adapter-clusterRoleBinding.yaml │ │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ │ ├── prometheus-adapter-configMap.yaml │ │ ├── prometheus-adapter-deployment.yaml │ │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ │ ├── prometheus-adapter-service.yaml │ │ ├── prometheus-adapter-serviceAccount.yaml │ │ ├── prometheus-clusterRole.yaml │ │ ├── prometheus-clusterRoleBinding.yaml │ │ ├── prometheus-operator-serviceMonitor.yaml │ │ ├── prometheus-prometheus.yaml │ │ ├── prometheus-roleBindingConfig.yaml │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ │ ├── prometheus-roleConfig.yaml │ │ ├── prometheus-roleSpecificNamespaces.yaml │ │ ├── prometheus-rules.yaml │ │ ├── prometheus-service.yaml │ │ ├── prometheus-serviceAccount.yaml │ │ ├── prometheus-serviceMonitor.yaml │ │ ├── prometheus-serviceMonitorApiserver.yaml │ │ ├── prometheus-serviceMonitorCoreDNS.yaml │ │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ │ ├── prometheus-serviceMonitorKubelet.yaml │ │ └── setup │ │ │ ├── 0namespace-namespace.yaml │ │ │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0thanosrulerCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-clusterRole.yaml │ │ │ ├── prometheus-operator-clusterRoleBinding.yaml │ │ │ ├── prometheus-operator-deployment.yaml │ │ │ ├── prometheus-operator-service.yaml │ │ │ └── prometheus-operator-serviceAccount.yaml │ └── prometheus.go └── waverunner │ ├── WR_wcpwrapper.sh │ ├── golden │ ├── ESX │ │ ├── WC │ │ │ ├── EVENTS │ │ │ ├── WF │ │ │ │ └── telegraf.conf │ │ │ ├── WaveCounter.py │ │ │ ├── install_WaveCounter.sh │ │ │ ├── notes │ │ │ ├── perfcounters.py │ │ │ └── sample_event_files │ │ │ │ ├── .gitkeep │ │ │ │ ├── demoEvents │ │ │ │ ├── events_16.txt │ │ │ │ ├── events_24.txt │ │ │ │ ├── events_8.txt │ │ │ │ └── events_demo.txt │ │ └── esxtop_cpu.sh │ ├── LIN │ │ └── collect_guest_stats.sh │ └── WF │ │ ├── install_configure_wavefront_hyperv.sh │ │ ├── install_configure_wavefront_linux.sh │ │ ├── install_configure_wavefront_windows.sh │ │ ├── install_popVM.sh │ │ ├── python_wavefront.py │ │ ├── telegraf_HYV.conf │ │ ├── telegraf_linux.conf │ │ ├── telegraf_windows.conf │ │ └── waverunner_guest.pp │ ├── install.sh │ ├── scripts │ └── config_monitor_hosts.sh │ └── waverunner_driver.pp ├── recompile.sh ├── run.sh └── util ├── driver_util.go ├── predicate.go ├── spec_generator.go ├── testdriver.go └── wcpconfig.go /.gitignore: -------------------------------------------------------------------------------- 1 | results* 2 | results*/ 3 | kbench*.log 4 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in K-Bench project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at oss-coc@vmware.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | 78 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to K-Bench 2 | 3 | Before you start to contribute to K-Bench, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). 4 | All contributions to this repository must be signed as described on that page. Your signature certifies 5 | that you wrote the patch or have the right to pass it on as an open-source patch. 6 | 7 | ## Community and Communication 8 | 9 | K-Bench's success relies on community support and feedback. Users and contributors are welcome to join our 10 | slack channel (in construction) to discussion K-Bench collaboration and provide feedback: 11 | 12 | - [Slack](https://vmwarecode.slack.com/messages/kbench) (TBD) 13 | 14 | If you find a bug while using K-Bench, or you have an idea or proposal to improve K-Bench project, you may 15 | also create an issue (and mark it with appropriate kind/category) using the Github's issue tracker. 16 | 17 | ## Contributors' Guideline 18 | 19 | The K-Bench project welcomes contributions from anyone who would like to contribute. In general, 20 | bug fixes, new features, improvements in code and documentation, extensible or independent modules 21 | that can be integrated into the current framework are all acceptable changes. 22 | 23 | ### Coding Style 24 | 25 | All go source files should be formatted using go fmt tool. 26 | 27 | ### Contribution Workflow 28 | 29 | To contribute to the code base, you may submit a pull request after thoroughly testing your change. 30 | The project maintainers will review and approve the request if everything looks fine with the change. 31 | In particular, below is a high level workflow that contributors should follow to make contributions 32 | to the project: 33 | 34 | - Create a new branch from the repository (e.g., your fork of K-Bench) where you would like to work on the development 35 | - Make changes, run tests, and make commits to your local repo 36 | - Push your changes to the branch in your fork of the repository 37 | - Submit a pull request 38 | 39 | Example: 40 | 41 | ``` shell 42 | git remote add upstream https://github.com/vmware/k-bench.git 43 | git checkout -b my-new-feature master 44 | git commit -a 45 | git push origin my-new-feature 46 | ``` 47 | 48 | ### Staying In Sync With Upstream 49 | 50 | When your branch gets out of sync with the vmware/master branch, use the following to update: 51 | 52 | ``` shell 53 | git checkout my-new-feature 54 | git fetch -a 55 | git pull --rebase upstream master 56 | git push --force-with-lease origin my-new-feature 57 | ``` 58 | 59 | ### Updating pull requests 60 | 61 | If your PR needs changes based on code review or testing, you'll most likely want to squash these 62 | changes into existing commits. 63 | 64 | If your pull request contains a single commit or your changes are related to the most recent commit, 65 | you can simply amend the commit. 66 | 67 | ``` shell 68 | git add . 69 | git commit --amend 70 | git push --force-with-lease origin my-new-feature 71 | ``` 72 | 73 | If you need to squash changes into an earlier commit, you can use: 74 | 75 | ``` shell 76 | git add . 77 | git commit --fixup 78 | git rebase -i --autosquash master 79 | git push --force-with-lease origin my-new-feature 80 | ``` 81 | 82 | Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub 83 | does not generate a notification when you git push. 84 | 85 | ## Contribution Testing 86 | All code changes have to be tested locally before being pushed upstream. In addition, more 87 | testing may be conducted before merging into the production branches. K-Bench maintainers 88 | run regular tests and checks to ensure code stability. Any regressions or issues, once detected, 89 | should be reported in the Github issue tracker. 90 | 91 | ## Reporting Bugs and Creating Issues 92 | 93 | You may use the Github issue tracker to report issues or bugs you find while using and testing K-Bench. 94 | Before reporting the issue, please check whether it is in the existing issue list. 95 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | Workload Benchmark for Kubernetes 2 | Copyright (c) 2020 VMware, Inc. All Rights Reserved. 3 | 4 | This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. 5 | 6 | This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. 7 | 8 | -------------------------------------------------------------------------------- /Roadmap: -------------------------------------------------------------------------------- 1 | Roadmap: 2 | 3 | Control plane 4 | 1. Support for tracking events and latencies across CRDs like VMs (WIP) 5 | 6 | Data plane 7 | 8 | 1. Like we have workloads to stress Compute, memory, I/O, network components, we should also include workloads to stress accelerators like GPUs from K8s 9 | 2. Include workloads with most common network patterns between pods, like mesh etc 10 | -------------------------------------------------------------------------------- /cleanup.sh: -------------------------------------------------------------------------------- 1 | kubectl delete all --all -n kbench-pod-namespace; 2 | kubectl delete pvc fio-block-pvc -n kbench-pod-namespace; 3 | kubectl delete all --all -n kbench-deployment-namespace; 4 | kubectl delete all --all -n kbench-service-namespace; 5 | kubectl delete all --all -n kbench-rc-namespace; 6 | kubectl delete all --all -n kbench-resource-namespace; 7 | -------------------------------------------------------------------------------- /config/cp_heavy_12client/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1" 15 | } 16 | }, 17 | { 18 | "Act": "LIST" 19 | }, 20 | { 21 | "Act": "GET" 22 | }, 23 | { 24 | "Act": "UPDATE" 25 | }, 26 | { 27 | "Act": "DELETE" 28 | } 29 | ], 30 | "SleepTimes": [ 31 | 30000, 32 | 3000, 33 | 3000, 34 | 3000, 35 | 9000, 36 | 3000 37 | ], 38 | "Count": 12 39 | } 40 | }, 41 | { 42 | "Deployments": { 43 | "Actions": [ 44 | { 45 | "Act": "CREATE", 46 | "Spec": { 47 | "ImagePullPolicy": "IfNotPresent", 48 | "Image": "k8s.gcr.io/pause:3.1", 49 | "NumReplicas": 5 50 | } 51 | }, 52 | { 53 | "Act": "LIST" 54 | }, 55 | { 56 | "Act": "GET" 57 | }, 58 | { 59 | "Act": "UPDATE" 60 | }, 61 | { 62 | "Act": "DELETE" 63 | } 64 | ], 65 | "SleepTimes": [ 66 | 60000, 67 | 3000, 68 | 3000, 69 | 3000, 70 | 9000, 71 | 3000 72 | ], 73 | "Count": 12 74 | } 75 | }, 76 | { 77 | "Namespaces": { 78 | "Actions": [ 79 | { 80 | "Act": "CREATE" 81 | }, 82 | { 83 | "Act": "LIST" 84 | }, 85 | { 86 | "Act": "GET" 87 | }, 88 | { 89 | "Act": "UPDATE" 90 | }, 91 | { 92 | "Act": "DELETE" 93 | } 94 | ], 95 | "SleepTimes": [ 96 | 5000, 97 | 3000, 98 | 3000, 99 | 3000, 100 | 3000 101 | ], 102 | "Count": 12 103 | } 104 | }, 105 | { 106 | "Services": { 107 | "Actions": [ 108 | { 109 | "Act": "CREATE" 110 | }, 111 | { 112 | "Act": "LIST" 113 | }, 114 | { 115 | "Act": "GET" 116 | }, 117 | { 118 | "Act": "UPDATE" 119 | }, 120 | { 121 | "Act": "DELETE" 122 | } 123 | ], 124 | "SleepTimes": [ 125 | 5000, 126 | 3000, 127 | 3000, 128 | 3000, 129 | 3000 130 | ], 131 | "Count": 12 132 | } 133 | }, 134 | { 135 | "Pods": { 136 | "Actions": [ 137 | { 138 | "Act": "CREATE", 139 | "Spec": { 140 | "ImagePullPolicy": "IfNotPresent", 141 | "Image": "k8s.gcr.io/pause:3.1" 142 | } 143 | }, 144 | { 145 | "Act": "LIST" 146 | }, 147 | { 148 | "Act": "GET" 149 | }, 150 | { 151 | "Act": "UPDATE" 152 | }, 153 | { 154 | "Act": "DELETE" 155 | } 156 | ], 157 | "SleepTimes": [ 158 | 30000, 159 | 3000, 160 | 3000, 161 | 3000, 162 | 9000, 163 | 3000 164 | ], 165 | "Count": 12 166 | } 167 | }, 168 | { 169 | "Deployments": { 170 | "Actions": [ 171 | { 172 | "Act": "CREATE", 173 | "Spec": { 174 | "ImagePullPolicy": "IfNotPresent", 175 | "Image": "k8s.gcr.io/pause:3.1", 176 | "NumReplicas": 5 177 | } 178 | }, 179 | { 180 | "Act": "LIST" 181 | }, 182 | { 183 | "Act": "GET" 184 | }, 185 | { 186 | "Act": "UPDATE" 187 | }, 188 | { 189 | "Act": "DELETE" 190 | } 191 | ], 192 | "SleepTimes": [ 193 | 60000, 194 | 3000, 195 | 3000, 196 | 3000, 197 | 9000, 198 | 3000 199 | ], 200 | "Count": 12 201 | } 202 | }, 203 | { 204 | "Namespaces": { 205 | "Actions": [ 206 | { 207 | "Act": "CREATE" 208 | }, 209 | { 210 | "Act": "LIST" 211 | }, 212 | { 213 | "Act": "GET" 214 | }, 215 | { 216 | "Act": "UPDATE" 217 | }, 218 | { 219 | "Act": "DELETE" 220 | } 221 | ], 222 | "SleepTimes": [ 223 | 5000, 224 | 3000, 225 | 3000, 226 | 3000, 227 | 3000 228 | ], 229 | "Count": 12 230 | } 231 | }, 232 | { 233 | "Services": { 234 | "Actions": [ 235 | { 236 | "Act": "CREATE" 237 | }, 238 | { 239 | "Act": "LIST" 240 | }, 241 | { 242 | "Act": "GET" 243 | }, 244 | { 245 | "Act": "UPDATE" 246 | }, 247 | { 248 | "Act": "DELETE" 249 | } 250 | ], 251 | "SleepTimes": [ 252 | 5000, 253 | 3000, 254 | 3000, 255 | 3000, 256 | 3000 257 | ], 258 | "Count": 12 259 | } 260 | } 261 | ] 262 | } 263 | -------------------------------------------------------------------------------- /config/cp_heavy_8client/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1" 15 | } 16 | }, 17 | { 18 | "Act": "LIST" 19 | }, 20 | { 21 | "Act": "GET" 22 | }, 23 | { 24 | "Act": "UPDATE" 25 | }, 26 | { 27 | "Act": "DELETE" 28 | } 29 | ], 30 | "SleepTimes": [ 31 | 20000, 32 | 3000, 33 | 3000, 34 | 3000, 35 | 9000, 36 | 3000 37 | ], 38 | "Count": 8 39 | } 40 | }, 41 | { 42 | "Deployments": { 43 | "Actions": [ 44 | { 45 | "Act": "CREATE", 46 | "Spec": { 47 | "ImagePullPolicy": "IfNotPresent", 48 | "Image": "k8s.gcr.io/pause:3.1", 49 | "NumReplicas": 5 50 | } 51 | }, 52 | { 53 | "Act": "LIST" 54 | }, 55 | { 56 | "Act": "GET" 57 | }, 58 | { 59 | "Act": "UPDATE" 60 | }, 61 | { 62 | "Act": "DELETE" 63 | } 64 | ], 65 | "SleepTimes": [ 66 | 40000, 67 | 3000, 68 | 3000, 69 | 3000, 70 | 9000, 71 | 3000 72 | ], 73 | "Count": 8 74 | } 75 | }, 76 | { 77 | "Namespaces": { 78 | "Actions": [ 79 | { 80 | "Act": "CREATE" 81 | }, 82 | { 83 | "Act": "LIST" 84 | }, 85 | { 86 | "Act": "GET" 87 | }, 88 | { 89 | "Act": "UPDATE" 90 | }, 91 | { 92 | "Act": "DELETE" 93 | } 94 | ], 95 | "SleepTimes": [ 96 | 5000, 97 | 3000, 98 | 3000, 99 | 3000, 100 | 3000 101 | ], 102 | "Count": 8 103 | } 104 | }, 105 | { 106 | "Services": { 107 | "Actions": [ 108 | { 109 | "Act": "CREATE" 110 | }, 111 | { 112 | "Act": "LIST" 113 | }, 114 | { 115 | "Act": "GET" 116 | }, 117 | { 118 | "Act": "UPDATE" 119 | }, 120 | { 121 | "Act": "DELETE" 122 | } 123 | ], 124 | "SleepTimes": [ 125 | 5000, 126 | 3000, 127 | 3000, 128 | 3000, 129 | 3000 130 | ], 131 | "Count": 8 132 | } 133 | }, 134 | { 135 | "Pods": { 136 | "Actions": [ 137 | { 138 | "Act": "CREATE", 139 | "Spec": { 140 | "ImagePullPolicy": "IfNotPresent", 141 | "Image": "k8s.gcr.io/pause:3.1" 142 | } 143 | }, 144 | { 145 | "Act": "LIST" 146 | }, 147 | { 148 | "Act": "GET" 149 | }, 150 | { 151 | "Act": "UPDATE" 152 | }, 153 | { 154 | "Act": "DELETE" 155 | } 156 | ], 157 | "SleepTimes": [ 158 | 20000, 159 | 3000, 160 | 3000, 161 | 3000, 162 | 9000, 163 | 3000 164 | ], 165 | "Count": 8 166 | } 167 | }, 168 | { 169 | "Deployments": { 170 | "Actions": [ 171 | { 172 | "Act": "CREATE", 173 | "Spec": { 174 | "ImagePullPolicy": "IfNotPresent", 175 | "Image": "k8s.gcr.io/pause:3.1", 176 | "NumReplicas": 5 177 | } 178 | }, 179 | { 180 | "Act": "LIST" 181 | }, 182 | { 183 | "Act": "GET" 184 | }, 185 | { 186 | "Act": "UPDATE" 187 | }, 188 | { 189 | "Act": "DELETE" 190 | } 191 | ], 192 | "SleepTimes": [ 193 | 40000, 194 | 3000, 195 | 3000, 196 | 3000, 197 | 9000, 198 | 3000 199 | ], 200 | "Count": 8 201 | } 202 | }, 203 | { 204 | "Namespaces": { 205 | "Actions": [ 206 | { 207 | "Act": "CREATE" 208 | }, 209 | { 210 | "Act": "LIST" 211 | }, 212 | { 213 | "Act": "GET" 214 | }, 215 | { 216 | "Act": "UPDATE" 217 | }, 218 | { 219 | "Act": "DELETE" 220 | } 221 | ], 222 | "SleepTimes": [ 223 | 5000, 224 | 3000, 225 | 3000, 226 | 3000, 227 | 3000 228 | ], 229 | "Count": 8 230 | } 231 | }, 232 | { 233 | "Services": { 234 | "Actions": [ 235 | { 236 | "Act": "CREATE" 237 | }, 238 | { 239 | "Act": "LIST" 240 | }, 241 | { 242 | "Act": "GET" 243 | }, 244 | { 245 | "Act": "UPDATE" 246 | }, 247 | { 248 | "Act": "DELETE" 249 | } 250 | ], 251 | "SleepTimes": [ 252 | 5000, 253 | 3000, 254 | 3000, 255 | 3000, 256 | 3000 257 | ], 258 | "Count": 8 259 | } 260 | } 261 | ], 262 | "Tags": [ 263 | { 264 | "Key": "testProfile", 265 | "Value": "heavy-8" 266 | } 267 | ] 268 | } 269 | -------------------------------------------------------------------------------- /config/cp_light_1client/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1" 15 | } 16 | }, 17 | { 18 | "Act": "LIST" 19 | }, 20 | { 21 | "Act": "GET" 22 | }, 23 | { 24 | "Act": "UPDATE" 25 | }, 26 | { 27 | "Act": "DELETE" 28 | } 29 | ], 30 | "SleepTimes": [ 31 | 10000, 32 | 3000, 33 | 3000, 34 | 3000, 35 | 9000, 36 | 3000 37 | ], 38 | "Count": 1 39 | } 40 | }, 41 | { 42 | "Deployments": { 43 | "Actions": [ 44 | { 45 | "Act": "CREATE", 46 | "Spec": { 47 | "ImagePullPolicy": "IfNotPresent", 48 | "Image": "k8s.gcr.io/pause:3.1", 49 | "NumReplicas": 5 50 | } 51 | }, 52 | { 53 | "Act": "LIST" 54 | }, 55 | { 56 | "Act": "GET" 57 | }, 58 | { 59 | "Act": "UPDATE" 60 | }, 61 | { 62 | "Act": "DELETE" 63 | } 64 | ], 65 | "SleepTimes": [ 66 | 30000, 67 | 3000, 68 | 3000, 69 | 3000, 70 | 9000, 71 | 3000 72 | ], 73 | "Count": 1 74 | } 75 | }, 76 | { 77 | "Namespaces": { 78 | "Actions": [ 79 | { 80 | "Act": "CREATE" 81 | }, 82 | { 83 | "Act": "LIST" 84 | }, 85 | { 86 | "Act": "GET" 87 | }, 88 | { 89 | "Act": "UPDATE" 90 | }, 91 | { 92 | "Act": "DELETE" 93 | } 94 | ], 95 | "SleepTimes": [ 96 | 5000, 97 | 3000, 98 | 3000, 99 | 3000, 100 | 3000 101 | ], 102 | "Count": 1 103 | } 104 | }, 105 | { 106 | "Services": { 107 | "Actions": [ 108 | { 109 | "Act": "CREATE" 110 | }, 111 | { 112 | "Act": "LIST" 113 | }, 114 | { 115 | "Act": "GET" 116 | }, 117 | { 118 | "Act": "UPDATE" 119 | }, 120 | { 121 | "Act": "DELETE" 122 | } 123 | ], 124 | "SleepTimes": [ 125 | 5000, 126 | 3000, 127 | 3000, 128 | 3000, 129 | 3000 130 | ], 131 | "Count": 1 132 | } 133 | }, 134 | { 135 | "Pods": { 136 | "Actions": [ 137 | { 138 | "Act": "CREATE", 139 | "Spec": { 140 | "ImagePullPolicy": "IfNotPresent", 141 | "Image": "k8s.gcr.io/pause:3.1" 142 | } 143 | }, 144 | { 145 | "Act": "LIST" 146 | }, 147 | { 148 | "Act": "GET" 149 | }, 150 | { 151 | "Act": "UPDATE" 152 | }, 153 | { 154 | "Act": "DELETE" 155 | } 156 | ], 157 | "SleepTimes": [ 158 | 10000, 159 | 3000, 160 | 3000, 161 | 3000, 162 | 9000, 163 | 3000 164 | ], 165 | "Count": 1 166 | } 167 | }, 168 | { 169 | "Deployments": { 170 | "Actions": [ 171 | { 172 | "Act": "CREATE", 173 | "Spec": { 174 | "ImagePullPolicy": "IfNotPresent", 175 | "Image": "k8s.gcr.io/pause:3.1", 176 | "NumReplicas": 5 177 | } 178 | }, 179 | { 180 | "Act": "LIST" 181 | }, 182 | { 183 | "Act": "GET" 184 | }, 185 | { 186 | "Act": "UPDATE" 187 | }, 188 | { 189 | "Act": "DELETE" 190 | } 191 | ], 192 | "SleepTimes": [ 193 | 30000, 194 | 3000, 195 | 3000, 196 | 3000, 197 | 9000, 198 | 3000 199 | ], 200 | "Count": 1 201 | } 202 | }, 203 | { 204 | "Namespaces": { 205 | "Actions": [ 206 | { 207 | "Act": "CREATE" 208 | }, 209 | { 210 | "Act": "LIST" 211 | }, 212 | { 213 | "Act": "GET" 214 | }, 215 | { 216 | "Act": "UPDATE" 217 | }, 218 | { 219 | "Act": "DELETE" 220 | } 221 | ], 222 | "SleepTimes": [ 223 | 5000, 224 | 3000, 225 | 3000, 226 | 3000, 227 | 3000 228 | ], 229 | "Count": 1 230 | } 231 | }, 232 | { 233 | "Services": { 234 | "Actions": [ 235 | { 236 | "Act": "CREATE" 237 | }, 238 | { 239 | "Act": "LIST" 240 | }, 241 | { 242 | "Act": "GET" 243 | }, 244 | { 245 | "Act": "UPDATE" 246 | }, 247 | { 248 | "Act": "DELETE" 249 | } 250 | ], 251 | "SleepTimes": [ 252 | 5000, 253 | 3000, 254 | 3000, 255 | 3000, 256 | 3000 257 | ], 258 | "Count": 1 259 | } 260 | } 261 | ] 262 | } 263 | -------------------------------------------------------------------------------- /config/cp_light_4client/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1" 15 | } 16 | }, 17 | { 18 | "Act": "LIST" 19 | }, 20 | { 21 | "Act": "GET" 22 | }, 23 | { 24 | "Act": "UPDATE" 25 | }, 26 | { 27 | "Act": "DELETE" 28 | } 29 | ], 30 | "SleepTimes": [ 31 | 10000, 32 | 3000, 33 | 3000, 34 | 3000, 35 | 9000, 36 | 3000 37 | ], 38 | "Count": 4 39 | } 40 | }, 41 | { 42 | "Deployments": { 43 | "Actions": [ 44 | { 45 | "Act": "CREATE", 46 | "Spec": { 47 | "ImagePullPolicy": "IfNotPresent", 48 | "Image": "k8s.gcr.io/pause:3.1", 49 | "NumReplicas": 5 50 | } 51 | }, 52 | { 53 | "Act": "LIST" 54 | }, 55 | { 56 | "Act": "GET" 57 | }, 58 | { 59 | "Act": "UPDATE" 60 | }, 61 | { 62 | "Act": "DELETE" 63 | } 64 | ], 65 | "SleepTimes": [ 66 | 30000, 67 | 3000, 68 | 3000, 69 | 3000, 70 | 9000, 71 | 3000 72 | ], 73 | "Count": 4 74 | } 75 | }, 76 | { 77 | "Namespaces": { 78 | "Actions": [ 79 | { 80 | "Act": "CREATE" 81 | }, 82 | { 83 | "Act": "LIST" 84 | }, 85 | { 86 | "Act": "GET" 87 | }, 88 | { 89 | "Act": "UPDATE" 90 | }, 91 | { 92 | "Act": "DELETE" 93 | } 94 | ], 95 | "SleepTimes": [ 96 | 5000, 97 | 3000, 98 | 3000, 99 | 3000, 100 | 3000 101 | ], 102 | "Count": 4 103 | } 104 | }, 105 | { 106 | "Services": { 107 | "Actions": [ 108 | { 109 | "Act": "CREATE" 110 | }, 111 | { 112 | "Act": "LIST" 113 | }, 114 | { 115 | "Act": "GET" 116 | }, 117 | { 118 | "Act": "UPDATE" 119 | }, 120 | { 121 | "Act": "DELETE" 122 | } 123 | ], 124 | "SleepTimes": [ 125 | 5000, 126 | 3000, 127 | 3000, 128 | 3000, 129 | 3000 130 | ], 131 | "Count": 4 132 | } 133 | }, 134 | { 135 | "Pods": { 136 | "Actions": [ 137 | { 138 | "Act": "CREATE", 139 | "Spec": { 140 | "ImagePullPolicy": "IfNotPresent", 141 | "Image": "k8s.gcr.io/pause:3.1" 142 | } 143 | }, 144 | { 145 | "Act": "LIST" 146 | }, 147 | { 148 | "Act": "GET" 149 | }, 150 | { 151 | "Act": "UPDATE" 152 | }, 153 | { 154 | "Act": "DELETE" 155 | } 156 | ], 157 | "SleepTimes": [ 158 | 10000, 159 | 3000, 160 | 3000, 161 | 3000, 162 | 9000, 163 | 3000 164 | ], 165 | "Count": 4 166 | } 167 | }, 168 | { 169 | "Deployments": { 170 | "Actions": [ 171 | { 172 | "Act": "CREATE", 173 | "Spec": { 174 | "ImagePullPolicy": "IfNotPresent", 175 | "Image": "k8s.gcr.io/pause:3.1", 176 | "NumReplicas": 5 177 | } 178 | }, 179 | { 180 | "Act": "LIST" 181 | }, 182 | { 183 | "Act": "GET" 184 | }, 185 | { 186 | "Act": "UPDATE" 187 | }, 188 | { 189 | "Act": "DELETE" 190 | } 191 | ], 192 | "SleepTimes": [ 193 | 30000, 194 | 3000, 195 | 3000, 196 | 3000, 197 | 9000, 198 | 3000 199 | ], 200 | "Count": 4 201 | } 202 | }, 203 | { 204 | "Namespaces": { 205 | "Actions": [ 206 | { 207 | "Act": "CREATE" 208 | }, 209 | { 210 | "Act": "LIST" 211 | }, 212 | { 213 | "Act": "GET" 214 | }, 215 | { 216 | "Act": "UPDATE" 217 | }, 218 | { 219 | "Act": "DELETE" 220 | } 221 | ], 222 | "SleepTimes": [ 223 | 5000, 224 | 3000, 225 | 3000, 226 | 3000, 227 | 3000 228 | ], 229 | "Count": 4 230 | } 231 | }, 232 | { 233 | "Services": { 234 | "Actions": [ 235 | { 236 | "Act": "CREATE" 237 | }, 238 | { 239 | "Act": "LIST" 240 | }, 241 | { 242 | "Act": "GET" 243 | }, 244 | { 245 | "Act": "UPDATE" 246 | }, 247 | { 248 | "Act": "DELETE" 249 | } 250 | ], 251 | "SleepTimes": [ 252 | 5000, 253 | 3000, 254 | 3000, 255 | 3000, 256 | 3000 257 | ], 258 | "Count": 4 259 | } 260 | } 261 | ] 262 | } 263 | -------------------------------------------------------------------------------- /config/cp_other_resources/pv_pvc/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: test-pv-volume 5 | labels: 6 | type: local 7 | spec: 8 | storageClassName: manual 9 | capacity: 10 | storage: 10Gi 11 | accessModes: 12 | - ReadWriteOnce 13 | hostPath: 14 | path: "/mnt/data" 15 | -------------------------------------------------------------------------------- /config/cp_other_resources/pv_pvc/pv_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "PersistentVolumes": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "YamlSpec": "./config/cp_other_resources/pv_pvc/pv.yaml" 14 | } 15 | }, 16 | { 17 | "Act": "LIST" 18 | } 19 | ], 20 | "SleepTimes": [ 21 | 10000, 22 | 3000 23 | ], 24 | "Count": 2 25 | }, 26 | "RepeatTimes": 0 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /config/cp_other_resources/pv_pvc/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test-pv-claim 5 | spec: 6 | storageClassName: manual 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 3Gi 12 | -------------------------------------------------------------------------------- /config/cp_other_resources/pv_pvc/pvc_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "PersistentVolumeClaims": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "YamlSpec": "./config/cp_other_resources/pv_pvc/pvc.yaml" 14 | } 15 | }, 16 | { 17 | "Act": "LIST" 18 | } 19 | ], 20 | "Namespace": "test-namespace", 21 | "SleepTimes": [ 22 | 10000, 23 | 3000 24 | ], 25 | "Count": 2 26 | }, 27 | "RepeatTimes": 0 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /config/default/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Deployments": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1", 15 | "YamlSpec": "./config/default/deployment.yaml", 16 | "NumReplicas": 3 17 | } 18 | }, 19 | { 20 | "Act": "LIST", 21 | "Spec": { 22 | "Namespace": "default" 23 | } 24 | }, 25 | { 26 | "Act": "UPDATE", 27 | "Spec": { 28 | "Namespace": "default" 29 | } 30 | }, 31 | { 32 | "Act": "SCALE", 33 | "Spec": { 34 | "Namespace": "default" 35 | } 36 | }, 37 | { 38 | "Act": "DELETE", 39 | "Spec": { 40 | "Namespace": "default" 41 | } 42 | } 43 | ], 44 | "SleepTimes": [ 45 | 20000, 46 | 300, 47 | 300, 48 | 300, 49 | 10000, 50 | 3000 51 | ], 52 | "Count": 3 53 | }, 54 | "Pods": { 55 | "Actions": [ 56 | { 57 | "Act": "CREATE", 58 | "Spec": { 59 | "ImagePullPolicy": "IfNotPresent", 60 | "Image": "k8s.gcr.io/pause:3.1" 61 | } 62 | }, 63 | { 64 | "Act": "LIST" 65 | }, 66 | { 67 | "Act": "GET" 68 | }, 69 | { 70 | "Act": "UPDATE" 71 | }, 72 | { 73 | "Act": "DELETE" 74 | } 75 | ], 76 | "SleepTimes": [ 77 | 20000, 78 | 300, 79 | 300, 80 | 3000, 81 | 3000 82 | ], 83 | "Count": 4, 84 | "ContainerNamePrefix": "data-plane-" 85 | }, 86 | "Services": { 87 | "Actions": [ 88 | { 89 | "Act": "CREATE", 90 | "Spec": { 91 | "YamlSpec": "./config/default/service.yaml" 92 | } 93 | }, 94 | { 95 | "Act": "LIST" 96 | }, 97 | { 98 | "Act": "GET" 99 | }, 100 | { 101 | "Act": "UPDATE" 102 | }, 103 | { 104 | "Act": "DELETE" 105 | } 106 | ], 107 | "SleepTimes": [ 108 | 20000, 109 | 300, 110 | 300, 111 | 3000, 112 | 3000 113 | ], 114 | "Count": 5 115 | }, 116 | "RepeatTimes": 0 117 | } 118 | ] 119 | } 120 | -------------------------------------------------------------------------------- /config/default/create-4-rc-list-matchgoroutine-demo.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations" : [ 7 | { 8 | "ReplicationControllers": 9 | { 10 | "Actions": ["CREATE", "LIST", "GET", "UPDATE", "SCALE"], 11 | "SleepTimes": [30000, 3000, 3000, 3000, 5000, 5000], 12 | "ImagePullPolicy": "IfNotPresent", 13 | "Count": 4, 14 | "YamlSpec": "./config/replication_controller.yaml" 15 | }, 16 | "RepeatTimes": 0 17 | }, 18 | { 19 | "ReplicationControllers": 20 | { 21 | "Actions": ["LIST"], 22 | "SleepTimes": [3000], 23 | "Namespace": "test-simple", 24 | "Count": 4, 25 | "MatchGoroutine": false 26 | } 27 | }, 28 | { 29 | "ReplicationControllers": 30 | { 31 | "Actions": ["LIST"], 32 | "SleepTimes": [3000], 33 | "Namespace": "test-simple", 34 | "Count": 4, 35 | "MatchGoroutine": true 36 | } 37 | } 38 | ], 39 | "Tags" : [ 40 | { 41 | "Key": "testProfile", 42 | "Value": "heavy" 43 | } 44 | ], 45 | "SleepTimeAfterRun": 1000 46 | } 47 | -------------------------------------------------------------------------------- /config/default/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hello-dep 5 | namespace: default 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: hello-dep 11 | template: 12 | metadata: 13 | labels: 14 | app: hello-dep 15 | spec: 16 | containers: 17 | - image: gcr.io/google-samples/hello-app:2.0 18 | imagePullPolicy: Always 19 | name: hello-dep 20 | ports: 21 | - containerPort: 8080 22 | -------------------------------------------------------------------------------- /config/default/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | redis: 4 | image: "redis:alpine" 5 | deploy: 6 | replicas: 1 7 | 8 | mysql: 9 | image: "mysql:5.7" 10 | ports: 11 | - "3306:3306" -------------------------------------------------------------------------------- /config/default/docker-compose1.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | redis: 4 | image: "redis:alpine" 5 | deploy: 6 | replicas: 3 7 | -------------------------------------------------------------------------------- /config/default/download-files.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "COPY", 12 | "Spec": { 13 | "LabelKey": "env", 14 | "LabelValue": "test", 15 | "Namespace": "test-simple", 16 | "LocalPath": "./test-data-download/", 17 | "ContainerPath": "/tmp/", 18 | "Upload": false 19 | } 20 | } 21 | ], 22 | "SleepTimes": [ 23 | 2000 24 | ], 25 | "Count": 1 26 | } 27 | } 28 | ], 29 | "Tags": [ 30 | { 31 | "Key": "testProfile", 32 | "Value": "test-download" 33 | } 34 | ], 35 | "WavefrontPathDir": "/home/vmware", 36 | "SleepTimeAfterRun": 10000 37 | } 38 | -------------------------------------------------------------------------------- /config/default/kompose-test-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "DockerCompose":"./config/docker-compose1.yml", 7 | "Operations": [ 8 | { 9 | "Pods": { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "ImagePullPolicy": "IfNotPresent", 15 | "Image": "k8s.gcr.io/pause:3.1" 16 | } 17 | }, 18 | { 19 | "Act": "LIST" 20 | }, 21 | { 22 | "Act": "GET" 23 | }, 24 | { 25 | "Act": "UPDATE" 26 | }, 27 | { 28 | "Act": "DELETE" 29 | } 30 | ], 31 | "SleepTimes": [ 32 | 5000, 33 | 300, 34 | 300, 35 | 300, 36 | 900, 37 | 300 38 | ], 39 | "Count": 1 40 | } 41 | }, 42 | { 43 | "Deployments": { 44 | "Actions": [ 45 | { 46 | "Act": "CREATE", 47 | "Spec": { 48 | "ImagePullPolicy": "IfNotPresent", 49 | "Image": "k8s.gcr.io/pause:3.1", 50 | "NumReplicas": 2 51 | } 52 | }, 53 | { 54 | "Act": "LIST" 55 | }, 56 | { 57 | "Act": "GET" 58 | }, 59 | { 60 | "Act": "UPDATE" 61 | }, 62 | { 63 | "Act": "DELETE" 64 | } 65 | ], 66 | "SleepTimes": [ 67 | 10000, 68 | 1000, 69 | 1000, 70 | 1000, 71 | 1000, 72 | 1000 73 | ], 74 | "Count": 1 75 | } 76 | }, 77 | { 78 | "Namespaces": { 79 | "Actions": [ 80 | { 81 | "Act": "CREATE" 82 | }, 83 | { 84 | "Act": "LIST" 85 | }, 86 | { 87 | "Act": "GET" 88 | }, 89 | { 90 | "Act": "UPDATE" 91 | }, 92 | { 93 | "Act": "DELETE" 94 | } 95 | ], 96 | "SleepTimes": [ 97 | 10000, 98 | 3000, 99 | 3000, 100 | 3000, 101 | 3000 102 | ], 103 | "Count": 1 104 | } 105 | } 106 | ], 107 | "Tags": [ 108 | { 109 | "Key": "testProfile", 110 | "Value": "light-1" 111 | } 112 | ], 113 | "WavefrontPathDir": "/home/vmware", 114 | "SleepTimeAfterRun": 10000 115 | } 116 | -------------------------------------------------------------------------------- /config/default/matchgoroutine-matchoperation-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Pods": { 9 | "Actions": [ 10 | { 11 | "Act": "CREATE", 12 | "Spec": { 13 | "ImagePullPolicy": "IfNotPresent", 14 | "Image": "k8s.gcr.io/pause:3.1" 15 | } 16 | }, 17 | { 18 | "Act": "LIST" 19 | }, 20 | { 21 | "Act": "GET" 22 | }, 23 | { 24 | "Act": "UPDATE" 25 | } 26 | ], 27 | "SleepTimes": [ 28 | 10000, 29 | 3000, 30 | 3000, 31 | 3000 32 | ], 33 | "Count": 16 34 | } 35 | }, 36 | { 37 | "Pods": { 38 | "Actions": [ 39 | { 40 | "Act": "DELETE", 41 | "Spec": { 42 | "MatchGoroutine": true 43 | } 44 | } 45 | ], 46 | "SleepTimes": [ 47 | 1000 48 | ], 49 | "Count": 1 50 | } 51 | }, 52 | { 53 | "Pods": { 54 | "Actions": [ 55 | { 56 | "Act": "CREATE", 57 | "Spec": { 58 | "ImagePullPolicy": "IfNotPresent", 59 | "Image": "k8s.gcr.io/pause:3.1" 60 | } 61 | }, 62 | { 63 | "Act": "LIST", 64 | "Spec": { 65 | "MatchOperation": "ALL", 66 | "MatchGoroutine": true 67 | } 68 | }, 69 | { 70 | "Act": "LIST", 71 | "Spec": { 72 | "MatchOperation": "ALL", 73 | "MatchGoroutine": false 74 | } 75 | }, 76 | { 77 | "Act": "LIST", 78 | "Spec": { 79 | "MatchOperation": "CURRENT", 80 | "MatchGoroutine": true 81 | } 82 | }, 83 | { 84 | "Act": "LIST", 85 | "Spec": { 86 | "MatchOperation": "CURRENT", 87 | "MatchGoroutine": false 88 | } 89 | } 90 | ], 91 | "SleepTimes": [ 92 | 10000 93 | ], 94 | "Count": 2 95 | } 96 | }, 97 | { 98 | "Pods": { 99 | "Actions": [ 100 | { 101 | "Act": "CREATE", 102 | "Spec": { 103 | "ImagePullPolicy": "IfNotPresent", 104 | "Image": "k8s.gcr.io/pause:3.1" 105 | } 106 | }, 107 | { 108 | "Act": "LIST", 109 | "Spec": { 110 | "MatchOperation": "ALL", 111 | "MatchGoroutine": true 112 | } 113 | }, 114 | { 115 | "Act": "LIST", 116 | "Spec": { 117 | "MatchOperation": "ALL", 118 | "MatchGoroutine": false 119 | } 120 | }, 121 | { 122 | "Act": "LIST", 123 | "Spec": { 124 | "MatchOperation": "CURRENT", 125 | "MatchGoroutine": true 126 | } 127 | }, 128 | { 129 | "Act": "LIST", 130 | "Spec": { 131 | "MatchOperation": "CURRENT", 132 | "MatchGoroutine": false 133 | } 134 | } 135 | ], 136 | "SleepTimes": [ 137 | 10000 138 | ], 139 | "Count": 2 140 | } 141 | } 142 | ], 143 | "Tags": [ 144 | { 145 | "Key": "testProfile", 146 | "Value": "example-config" 147 | } 148 | ] 149 | } 150 | -------------------------------------------------------------------------------- /config/default/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | namespace: test-simple 5 | name: nginx 6 | labels: 7 | env: test 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx 12 | imagePullPolicy: IfNotPresent 13 | -------------------------------------------------------------------------------- /config/default/replication_controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | namespace: test-simple 5 | name: nginx 6 | spec: 7 | replicas: 3 8 | selector: 9 | app: nginx 10 | template: 11 | metadata: 12 | name: nginx 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /config/default/service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: my-service 5 | spec: 6 | selector: 7 | app: kbench 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 9376 12 | -------------------------------------------------------------------------------- /config/dp_fio/README: -------------------------------------------------------------------------------- 1 | Run fio individually using the following command: 2 | cd ../../; ./run.sh -t "dp_fio" -r -o " 3 | -------------------------------------------------------------------------------- /config/dp_fio/describe_fio.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_fio/fio_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myfiopod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myfio 8 | spec: 9 | volumes: 10 | - name: fio-volume 11 | persistentVolumeClaim: 12 | claimName: fio-block-pvc 13 | containers: 14 | - image: nginx 15 | resources: 16 | limits: 17 | memory: "4200Mi" 18 | cpu: "8000m" 19 | requests: 20 | memory: "4200Mi" 21 | cpu: "1000m" 22 | ephemeral-storage: "10Gi" 23 | volumeMounts: 24 | - name: fio-volume 25 | mountPath: /mnt/fio-volume 26 | command: ["/bin/sh","-c"] 27 | args: 28 | - apt-get update; 29 | apt-get install -y fio ioping; 30 | apt-get install -y fio ioping; 31 | apt-get install -y fio ioping; 32 | sleep infinity; 33 | name: fiocontainer 34 | -------------------------------------------------------------------------------- /config/dp_fio/fio_pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: fio-block-pvc 5 | namespace: kbench-pod-namespace 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | storageClassName: wcp-policy 13 | -------------------------------------------------------------------------------- /config/dp_fio/get_fio.sh: -------------------------------------------------------------------------------- 1 | kubectl get pvc -n kbench-pod-namespace 2 | kubectl get pods --namespace=kbench-pod-namespace 3 | -------------------------------------------------------------------------------- /config/dp_fio/logs_fio.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/README: -------------------------------------------------------------------------------- 1 | Run dp_netperf_internode individually using the following command: 2 | cd ../../; ./run.sh -t "dp_netperf_internode" -r -o 3 | 4 | Please use the netperf results for network performance measurement. 5 | 6 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/describe_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/get_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | kubectl get services --namespace=kbench-pod-namespace 3 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/logs_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/netperf_client_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mynetperfclientpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: mynetperf 8 | podtype: client 9 | spec: 10 | affinity: 11 | podAntiAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - labelSelector: 14 | matchExpressions: 15 | - key: podtype 16 | operator: In 17 | values: 18 | - server 19 | topologyKey: kubernetes.io/hostname 20 | hostname: netperfclient 21 | subdomain: kbench-service-oid-0-tid-0 22 | containers: 23 | - image: gcr.io/google-containers/nginx 24 | resources: 25 | limits: 26 | memory: "4200Mi" 27 | cpu: "16000m" 28 | requests: 29 | memory: "4200Mi" 30 | cpu: "1000m" 31 | ephemeral-storage: "5Gi" 32 | command: ["/bin/sh","-c"] 33 | args: 34 | - wget https://github.com/HewlettPackard/netperf/archive/netperf-2.7.0.tar.gz; 35 | tar xf netperf-2.7.0.tar.gz && cd netperf-netperf-2.7.0; 36 | ./configure && make && make install; 37 | sleep infinity; 38 | name: netperfclientcontainer 39 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/netperf_server_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mynetperfserverpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: mynetperf 8 | podtype: server 9 | spec: 10 | hostname: netperfserver 11 | subdomain: kbench-service-oid-0-tid-0 12 | containers: 13 | - image: gcr.io/google-containers/nginx 14 | resources: 15 | limits: 16 | memory: "4200Mi" 17 | cpu: "16000m" 18 | requests: 19 | memory: "4200Mi" 20 | cpu: "1000m" 21 | ephemeral-storage: "5Gi" 22 | command: ["/bin/sh","-c"] 23 | args: 24 | - wget https://github.com/HewlettPackard/netperf/archive/netperf-2.7.0.tar.gz; 25 | tar xf netperf-2.7.0.tar.gz && cd netperf-netperf-2.7.0; 26 | ./configure && make && make install; 27 | sleep infinity; 28 | name: netperfservercontainer 29 | -------------------------------------------------------------------------------- /config/dp_netperf_internode/netperf_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kbench-service-oid-0-tid-0 5 | namespace: kbench-pod-namespace 6 | spec: 7 | selector: 8 | name: mynetperf 9 | clusterIP: None 10 | ports: 11 | - name: foo # Actually, no port is needed. 12 | port: 1234 13 | targetPort: 1234 14 | -------------------------------------------------------------------------------- /config/dp_network_internode/README: -------------------------------------------------------------------------------- 1 | Run dp_network_internode individually using the following command: 2 | cd ../../; ./run.sh -t "dp_network_internode" -r -o " 3 | 4 | Please use the qperf results for the network latency and the iperf3 results for bandwidth. 5 | 6 | -------------------------------------------------------------------------------- /config/dp_network_internode/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_internode/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "YamlSpec": "./config/dp_network_internode/iperf_server_pod.yaml" 34 | } 35 | } 36 | ], 37 | "SleepTimes": [100], 38 | "Count": 1 39 | } 40 | }, 41 | { 42 | "Pods": 43 | { 44 | "Actions": [ 45 | { 46 | "Act": "CREATE", 47 | "Spec": { 48 | "ImagePullPolicy": "IfNotPresent", 49 | "Image": "nginx", 50 | "Namespace": "kbench-pod-namespace", 51 | "PodNamePrefix": "client", 52 | "YamlSpec": "./config/dp_network_internode/iperf_client_pod.yaml" 53 | } 54 | } 55 | ], 56 | "SleepTimes": [100000], 57 | "Count": 1 58 | } 59 | }, 60 | { 61 | "Pods": 62 | { 63 | "Actions": [ 64 | { 65 | "Act": "RUN", 66 | "Spec": { 67 | "Namespace": "kbench-pod-namespace", 68 | "LabelKey": "podtype", 69 | "LabelValue": "server", 70 | "MatchGoroutine": true, 71 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_server; iperf3 -s -p 5101 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err &" 72 | } 73 | } 74 | ], 75 | "SleepTimes": [10000], 76 | "Count": 1 77 | } 78 | }, 79 | { 80 | "Pods": 81 | { 82 | "Actions": [ 83 | { 84 | "Act": "RUN", 85 | "Spec": { 86 | "Namespace": "kbench-pod-namespace", 87 | "LabelKey": "podtype", 88 | "LabelValue": "client", 89 | "MatchGoroutine": true, 90 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_client; ping -c 10 iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local > /tmp/perfoutput/ping.out 2> /tmp/perfoutput/ping.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err; iperf3 -t 90 -T s1 -p 5101 -M 8800 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo.out 2>> /tmp/perfoutput/iperfclient_Jumbo.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -l 8800 -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo_UDP.out 2>> /tmp/perfoutput/iperfclient_Jumbo_UDP.err" 91 | } 92 | } 93 | ], 94 | "SleepTimes": [10000], 95 | "Count": 1 96 | } 97 | }, 98 | { 99 | "Pods": 100 | { 101 | "Actions": [ 102 | { 103 | "Act": "RUN", 104 | "Spec": { 105 | "Namespace": "kbench-pod-namespace", 106 | "LabelKey": "podtype", 107 | "LabelValue": "server", 108 | "MatchGoroutine": true, 109 | "Command": "pkill iperf3; /tmp/qperf-0.4.9/src/qperf >> /tmp/perfoutput/qperfserver.out 2>> /tmp/perfoutput/qperfserver.err &" 110 | } 111 | } 112 | ], 113 | "SleepTimes": [10000], 114 | "Count": 1 115 | } 116 | }, 117 | { 118 | "Pods": 119 | { 120 | "Actions": [ 121 | { 122 | "Act": "RUN", 123 | "Spec": { 124 | "Namespace": "kbench-pod-namespace", 125 | "LabelKey": "podtype", 126 | "LabelValue": "client", 127 | "MatchGoroutine": true, 128 | "Command": "/tmp/qperf-0.4.9/src/qperf -v iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local tcp_lat udp_lat tcp_bw udp_bw >> /tmp/perfoutput/qperfclient.out 2>> /tmp/perfoutput/qperfclient.err" 129 | } 130 | } 131 | ], 132 | "SleepTimes": [10000], 133 | "Count": 1 134 | } 135 | }, 136 | { 137 | "Pods": 138 | { 139 | "Actions": [ 140 | { 141 | "Act": "COPY", 142 | "Spec": { 143 | "Namespace": "kbench-pod-namespace", 144 | "LocalPath": "./", 145 | "ContainerPath": "/tmp/perfoutput", 146 | "Upload": false, 147 | "MatchOperation": "ALL", 148 | "MatchGoroutine": false 149 | } 150 | } 151 | ], 152 | "SleepTimes": [20000], 153 | "Count": 1 154 | } 155 | } 156 | ], 157 | "Tags" : [ 158 | { 159 | "Key": "testProfile", 160 | "Value": "dp_network_internode" 161 | } 162 | ], 163 | "WavefrontPathDir": "/home/vmware" 164 | } 165 | -------------------------------------------------------------------------------- /config/dp_network_internode/describe_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_internode/get_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | kubectl get services --namespace=kbench-pod-namespace 3 | -------------------------------------------------------------------------------- /config/dp_network_internode/iperf_client_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfclientpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: client 9 | spec: 10 | affinity: 11 | podAntiAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - labelSelector: 14 | matchExpressions: 15 | - key: podtype 16 | operator: In 17 | values: 18 | - server 19 | topologyKey: kubernetes.io/hostname 20 | hostname: iperfclient 21 | subdomain: kbench-service-oid-0-tid-0 22 | containers: 23 | - image: nginx 24 | resources: 25 | limits: 26 | memory: "4200Mi" 27 | cpu: "8000m" 28 | requests: 29 | memory: "4200Mi" 30 | cpu: "1000m" 31 | ephemeral-storage: "5Gi" 32 | command: ["/bin/sh","-c"] 33 | args: 34 | - sleep 2s; 35 | apt-get update; 36 | apt-get install -y iperf3; 37 | apt-get install -y procps; 38 | apt-get install -y net-tools; 39 | apt-get install -y iputils-ping; 40 | apt-get install -y qperf; 41 | apt-get clean; 42 | apt-get install -y wget gawk make; 43 | apt-get clean; 44 | apt-get install -y gcc libpath-tiny-perl; 45 | cd /tmp; 46 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 47 | tar xvf qperf-0.4.9.tar.gz; 48 | cd qperf-0.4.9; 49 | ./configure; 50 | make; 51 | sleep infinity; 52 | name: iperfclientcontainer 53 | -------------------------------------------------------------------------------- /config/dp_network_internode/iperf_server_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfserverpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: server 9 | spec: 10 | hostname: iperfserver 11 | subdomain: kbench-service-oid-0-tid-0 12 | containers: 13 | - image: nginx 14 | resources: 15 | limits: 16 | memory: "4200Mi" 17 | cpu: "8000m" 18 | requests: 19 | memory: "4200Mi" 20 | cpu: "1000m" 21 | ephemeral-storage: "5Gi" 22 | command: ["/bin/sh","-c"] 23 | args: 24 | - sleep 2s; 25 | apt-get update; 26 | apt-get install -y iperf3; 27 | apt-get install -y procps; 28 | apt-get install -y net-tools; 29 | apt-get install -y qperf; 30 | apt-get install -y iputils-ping; 31 | apt-get clean; 32 | apt-get install -y wget gawk make; 33 | apt-get clean; 34 | apt-get install -y gcc libpath-tiny-perl; 35 | cd /tmp; 36 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 37 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 38 | tar xvf qperf-0.4.9.tar.gz; 39 | cd qperf-0.4.9; 40 | ./configure; 41 | make; 42 | sleep infinity; 43 | name: iperfservercontainer 44 | -------------------------------------------------------------------------------- /config/dp_network_internode/iperf_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kbench-service-oid-0-tid-0 5 | namespace: kbench-pod-namespace 6 | spec: 7 | selector: 8 | name: myiperf 9 | clusterIP: None 10 | ports: 11 | - name: foo # Actually, no port is needed. 12 | port: 1234 13 | targetPort: 1234 14 | -------------------------------------------------------------------------------- /config/dp_network_internode/logs_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_internode/wcp-netperf-3iperfstreams-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_internode/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "LabelKey": "podtype", 34 | "LabelValue": "server", 35 | "YamlSpec": "./config/dp_network_internode/iperf_server_pod.yaml" 36 | } 37 | } 38 | ], 39 | "SleepTimes": [100000], 40 | "Count": 1 41 | } 42 | }, 43 | { 44 | "Pods": 45 | { 46 | "Actions": [ 47 | { 48 | "Act": "RUN", 49 | "Spec": { 50 | "Namespace": "kbench-pod-namespace", 51 | "LabelKey": "podtype", 52 | "LabelValue": "server", 53 | "MatchGoroutine": true, 54 | "Command": "mkdir /tmp/iperfoutput; iperf3 -s -p 5101 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err & iperf3 -s -p 5102 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err & iperf3 -s -p 5103 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err &" 55 | } 56 | } 57 | ], 58 | "SleepTimes": [100], 59 | "Count": 1 60 | } 61 | }, 62 | { 63 | "Pods": 64 | { 65 | "Actions": [ 66 | { 67 | "Act": "CREATE", 68 | "Spec": { 69 | "ImagePullPolicy": "IfNotPresent", 70 | "Image": "nginx", 71 | "Namespace": "kbench-pod-namespace", 72 | "PodNamePrefix": "client", 73 | "LabelKey": "podtype", 74 | "LabelValue": "client", 75 | "YamlSpec": "./config/dp_network_internode/iperf_client_pod.yaml" 76 | } 77 | } 78 | ], 79 | "SleepTimes": [200000], 80 | "Count": 1 81 | } 82 | }, 83 | { 84 | "Pods": 85 | { 86 | "Actions": [ 87 | { 88 | "Act": "RUN", 89 | "Spec": { 90 | "Namespace": "kbench-pod-namespace", 91 | "LabelKey": "podtype", 92 | "LabelValue": "client", 93 | "MatchGoroutine": true, 94 | "Command": "mkdir /tmp/iperfoutput; iperf3 -t 50 -T s1 -P2 -w 2M -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err & iperf3 -t 50 -T s2 -P2 -w 2M -p 5102 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err & iperf3 -t 50 -T s3-P2 -w 2M -p 5103 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err; " 95 | } 96 | } 97 | ], 98 | "SleepTimes": [100000], 99 | "Count": 1 100 | } 101 | }, 102 | { 103 | "Pods": 104 | { 105 | "Actions": [ 106 | { 107 | "Act": "COPY", 108 | "Spec": { 109 | "Namespace": "kbench-pod-namespace", 110 | "LocalPath": "./", 111 | "ContainerPath": "/tmp/iperfoutput", 112 | "Upload": false, 113 | "MatchOperation": "ALL", 114 | "MatchGoroutine": false 115 | } 116 | } 117 | ], 118 | "SleepTimes": [20000], 119 | "Count": 1 120 | } 121 | } 122 | ], 123 | "Tags" : [ 124 | { 125 | "Key": "testProfile", 126 | "Value": "heavy" 127 | } 128 | ], 129 | "WavefrontPathDir": "/home/vmware" 130 | } 131 | -------------------------------------------------------------------------------- /config/dp_network_interzone/README: -------------------------------------------------------------------------------- 1 | Run dp_network_interzone individually using the following command: 2 | cd ../../; ./run.sh -t "dp_network_interzone" -r -o " 3 | 4 | Please use the qperf results for the network latency and the iperf3 results for bandwidth. 5 | 6 | -------------------------------------------------------------------------------- /config/dp_network_interzone/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_interzone/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "YamlSpec": "./config/dp_network_interzone/iperf_server_pod.yaml" 34 | } 35 | } 36 | ], 37 | "SleepTimes": [100], 38 | "Count": 1 39 | } 40 | }, 41 | { 42 | "Pods": 43 | { 44 | "Actions": [ 45 | { 46 | "Act": "CREATE", 47 | "Spec": { 48 | "ImagePullPolicy": "IfNotPresent", 49 | "Image": "nginx", 50 | "Namespace": "kbench-pod-namespace", 51 | "PodNamePrefix": "client", 52 | "YamlSpec": "./config/dp_network_interzone/iperf_client_pod.yaml" 53 | } 54 | } 55 | ], 56 | "SleepTimes": [100000], 57 | "Count": 1 58 | } 59 | }, 60 | { 61 | "Pods": 62 | { 63 | "Actions": [ 64 | { 65 | "Act": "RUN", 66 | "Spec": { 67 | "Namespace": "kbench-pod-namespace", 68 | "LabelKey": "podtype", 69 | "LabelValue": "server", 70 | "MatchGoroutine": true, 71 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_server; iperf3 -s -p 5101 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err &" 72 | } 73 | } 74 | ], 75 | "SleepTimes": [10000], 76 | "Count": 1 77 | } 78 | }, 79 | { 80 | "Pods": 81 | { 82 | "Actions": [ 83 | { 84 | "Act": "RUN", 85 | "Spec": { 86 | "Namespace": "kbench-pod-namespace", 87 | "LabelKey": "podtype", 88 | "LabelValue": "client", 89 | "MatchGoroutine": true, 90 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_client; ping -c 10 iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local > /tmp/perfoutput/ping.out 2> /tmp/perfoutput/ping.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err; iperf3 -t 90 -T s1 -p 5101 -M 8800 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo.out 2>> /tmp/perfoutput/iperfclient_Jumbo.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -l 8800 -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo_UDP.out 2>> /tmp/perfoutput/iperfclient_Jumbo_UDP.err" 91 | } 92 | } 93 | ], 94 | "SleepTimes": [10000], 95 | "Count": 1 96 | } 97 | }, 98 | { 99 | "Pods": 100 | { 101 | "Actions": [ 102 | { 103 | "Act": "RUN", 104 | "Spec": { 105 | "Namespace": "kbench-pod-namespace", 106 | "LabelKey": "podtype", 107 | "LabelValue": "server", 108 | "MatchGoroutine": true, 109 | "Command": "pkill iperf3; /tmp/qperf-0.4.9/src/qperf >> /tmp/perfoutput/qperfserver.out 2>> /tmp/perfoutput/qperfserver.err &" 110 | } 111 | } 112 | ], 113 | "SleepTimes": [10000], 114 | "Count": 1 115 | } 116 | }, 117 | { 118 | "Pods": 119 | { 120 | "Actions": [ 121 | { 122 | "Act": "RUN", 123 | "Spec": { 124 | "Namespace": "kbench-pod-namespace", 125 | "LabelKey": "podtype", 126 | "LabelValue": "client", 127 | "MatchGoroutine": true, 128 | "Command": "/tmp/qperf-0.4.9/src/qperf -v iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local tcp_lat udp_lat tcp_bw udp_bw >> /tmp/perfoutput/qperfclient.out 2>> /tmp/perfoutput/qperfclient.err" 129 | } 130 | } 131 | ], 132 | "SleepTimes": [10000], 133 | "Count": 1 134 | } 135 | }, 136 | { 137 | "Pods": 138 | { 139 | "Actions": [ 140 | { 141 | "Act": "COPY", 142 | "Spec": { 143 | "Namespace": "kbench-pod-namespace", 144 | "LocalPath": "./", 145 | "ContainerPath": "/tmp/perfoutput", 146 | "Upload": false, 147 | "MatchOperation": "ALL", 148 | "MatchGoroutine": false 149 | } 150 | } 151 | ], 152 | "SleepTimes": [20000], 153 | "Count": 1 154 | } 155 | } 156 | ], 157 | "Tags" : [ 158 | { 159 | "Key": "testProfile", 160 | "Value": "dp_network_interzone" 161 | } 162 | ], 163 | "WavefrontPathDir": "/home/vmware" 164 | } 165 | -------------------------------------------------------------------------------- /config/dp_network_interzone/describe_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_interzone/get_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | kubectl get services --namespace=kbench-pod-namespace 3 | -------------------------------------------------------------------------------- /config/dp_network_interzone/iperf_client_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfclientpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: client 9 | spec: 10 | affinity: 11 | podAntiAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - labelSelector: 14 | matchExpressions: 15 | - key: podtype 16 | operator: In 17 | values: 18 | - server 19 | topologyKey: failure-domain.beta.kubernetes.io/zone 20 | hostname: iperfclient 21 | subdomain: kbench-service-oid-0-tid-0 22 | containers: 23 | - image: nginx 24 | resources: 25 | limits: 26 | memory: "4200Mi" 27 | cpu: "8000m" 28 | requests: 29 | memory: "4200Mi" 30 | cpu: "1000m" 31 | ephemeral-storage: "5Gi" 32 | command: ["/bin/sh","-c"] 33 | args: 34 | - sleep 2s; 35 | apt-get update; 36 | apt-get install -y iperf3; 37 | apt-get install -y procps; 38 | apt-get install -y net-tools; 39 | apt-get install -y iputils-ping; 40 | apt-get install -y qperf; 41 | apt-get clean; 42 | apt-get install -y wget gawk make; 43 | apt-get clean; 44 | apt-get install -y gcc libpath-tiny-perl; 45 | cd /tmp; 46 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 47 | tar xvf qperf-0.4.9.tar.gz; 48 | cd qperf-0.4.9; 49 | ./configure; 50 | make; 51 | sleep infinity; 52 | name: iperfclientcontainer 53 | -------------------------------------------------------------------------------- /config/dp_network_interzone/iperf_server_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfserverpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: server 9 | spec: 10 | hostname: iperfserver 11 | subdomain: kbench-service-oid-0-tid-0 12 | containers: 13 | - image: nginx 14 | resources: 15 | limits: 16 | memory: "4200Mi" 17 | cpu: "8000m" 18 | requests: 19 | memory: "4200Mi" 20 | cpu: "1000m" 21 | ephemeral-storage: "5Gi" 22 | command: ["/bin/sh","-c"] 23 | args: 24 | - sleep 2s; 25 | apt-get update; 26 | apt-get install -y iperf3; 27 | apt-get install -y procps; 28 | apt-get install -y net-tools; 29 | apt-get install -y qperf; 30 | apt-get install -y iputils-ping; 31 | apt-get clean; 32 | apt-get install -y wget gawk make; 33 | apt-get clean; 34 | apt-get install -y gcc libpath-tiny-perl; 35 | cd /tmp; 36 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 37 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 38 | tar xvf qperf-0.4.9.tar.gz; 39 | cd qperf-0.4.9; 40 | ./configure; 41 | make; 42 | sleep infinity; 43 | name: iperfservercontainer 44 | -------------------------------------------------------------------------------- /config/dp_network_interzone/iperf_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kbench-service-oid-0-tid-0 5 | namespace: kbench-pod-namespace 6 | spec: 7 | selector: 8 | name: myiperf 9 | clusterIP: None 10 | ports: 11 | - name: foo # Actually, no port is needed. 12 | port: 1234 13 | targetPort: 1234 14 | -------------------------------------------------------------------------------- /config/dp_network_interzone/logs_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_interzone/wcp-netperf-3iperfstreams-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_interzone/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "LabelKey": "podtype", 34 | "LabelValue": "server", 35 | "YamlSpec": "./config/dp_network_interzone/iperf_server_pod.yaml" 36 | } 37 | } 38 | ], 39 | "SleepTimes": [100000], 40 | "Count": 1 41 | } 42 | }, 43 | { 44 | "Pods": 45 | { 46 | "Actions": [ 47 | { 48 | "Act": "RUN", 49 | "Spec": { 50 | "Namespace": "kbench-pod-namespace", 51 | "LabelKey": "podtype", 52 | "LabelValue": "server", 53 | "MatchGoroutine": true, 54 | "Command": "mkdir /tmp/iperfoutput; iperf3 -s -p 5101 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err & iperf3 -s -p 5102 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err & iperf3 -s -p 5103 >> /tmp/iperfoutput/iperfserver.out 2>> /tmp/iperfoutput/iperfserver.err &" 55 | } 56 | } 57 | ], 58 | "SleepTimes": [100], 59 | "Count": 1 60 | } 61 | }, 62 | { 63 | "Pods": 64 | { 65 | "Actions": [ 66 | { 67 | "Act": "CREATE", 68 | "Spec": { 69 | "ImagePullPolicy": "IfNotPresent", 70 | "Image": "nginx", 71 | "Namespace": "kbench-pod-namespace", 72 | "PodNamePrefix": "client", 73 | "LabelKey": "podtype", 74 | "LabelValue": "client", 75 | "YamlSpec": "./config/dp_network_interzone/iperf_client_pod.yaml" 76 | } 77 | } 78 | ], 79 | "SleepTimes": [200000], 80 | "Count": 1 81 | } 82 | }, 83 | { 84 | "Pods": 85 | { 86 | "Actions": [ 87 | { 88 | "Act": "RUN", 89 | "Spec": { 90 | "Namespace": "kbench-pod-namespace", 91 | "LabelKey": "podtype", 92 | "LabelValue": "client", 93 | "MatchGoroutine": true, 94 | "Command": "mkdir /tmp/iperfoutput; iperf3 -t 50 -T s1 -P2 -w 2M -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err & iperf3 -t 50 -T s2 -P2 -w 2M -p 5102 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err & iperf3 -t 50 -T s3-P2 -w 2M -p 5103 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/iperfoutput/iperfclient.out 2>> /tmp/iperfoutput/iperfclient.err; " 95 | } 96 | } 97 | ], 98 | "SleepTimes": [100000], 99 | "Count": 1 100 | } 101 | }, 102 | { 103 | "Pods": 104 | { 105 | "Actions": [ 106 | { 107 | "Act": "COPY", 108 | "Spec": { 109 | "Namespace": "kbench-pod-namespace", 110 | "LocalPath": "./", 111 | "ContainerPath": "/tmp/iperfoutput", 112 | "Upload": false, 113 | "MatchOperation": "ALL", 114 | "MatchGoroutine": false 115 | } 116 | } 117 | ], 118 | "SleepTimes": [20000], 119 | "Count": 1 120 | } 121 | } 122 | ], 123 | "Tags" : [ 124 | { 125 | "Key": "testProfile", 126 | "Value": "heavy" 127 | } 128 | ], 129 | "WavefrontPathDir": "/home/vmware" 130 | } 131 | -------------------------------------------------------------------------------- /config/dp_network_intranode/README: -------------------------------------------------------------------------------- 1 | Run dp_network_intranode individually using the following command: 2 | cd ../../; ./run.sh -t "dp_network_intranode" -r -o " 3 | 4 | Please use the qperf results for the network latency and the iperf3 results for bandwidth. 5 | -------------------------------------------------------------------------------- /config/dp_network_intranode/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_intranode/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "YamlSpec": "./config/dp_network_intranode/iperf_server_pod.yaml" 34 | } 35 | } 36 | ], 37 | "SleepTimes": [100], 38 | "Count": 1 39 | } 40 | }, 41 | { 42 | "Pods": 43 | { 44 | "Actions": [ 45 | { 46 | "Act": "CREATE", 47 | "Spec": { 48 | "ImagePullPolicy": "IfNotPresent", 49 | "Image": "nginx", 50 | "Namespace": "kbench-pod-namespace", 51 | "PodNamePrefix": "client", 52 | "YamlSpec": "./config/dp_network_intranode/iperf_client_pod.yaml" 53 | } 54 | } 55 | ], 56 | "SleepTimes": [100000], 57 | "Count": 1 58 | } 59 | }, 60 | { 61 | "Pods": 62 | { 63 | "Actions": [ 64 | { 65 | "Act": "RUN", 66 | "Spec": { 67 | "Namespace": "kbench-pod-namespace", 68 | "LabelKey": "podtype", 69 | "LabelValue": "server", 70 | "MatchGoroutine": true, 71 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_server; iperf3 -s -p 5101 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err &" 72 | } 73 | } 74 | ], 75 | "SleepTimes": [10000], 76 | "Count": 1 77 | } 78 | }, 79 | { 80 | "Pods": 81 | { 82 | "Actions": [ 83 | { 84 | "Act": "RUN", 85 | "Spec": { 86 | "Namespace": "kbench-pod-namespace", 87 | "LabelKey": "podtype", 88 | "LabelValue": "client", 89 | "MatchGoroutine": true, 90 | "Command": "mkdir /tmp/perfoutput; ifconfig > /tmp/perfoutput/ifconfig_client; ping -c 10 iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local > /tmp/perfoutput/ping.out 2> /tmp/perfoutput/ping.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err; iperf3 -t 90 -T s1 -p 5101 -M 8800 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo.out 2>> /tmp/perfoutput/iperfclient_Jumbo.err; iperf3 -t 90 -T s1 -p 5101 -O 10 -u -l 8800 -w256k -b 0 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_Jumbo_UDP.out 2>> /tmp/perfoutput/iperfclient_Jumbo_UDP.err" 91 | } 92 | } 93 | ], 94 | "SleepTimes": [10000], 95 | "Count": 1 96 | } 97 | }, 98 | { 99 | "Pods": 100 | { 101 | "Actions": [ 102 | { 103 | "Act": "RUN", 104 | "Spec": { 105 | "Namespace": "kbench-pod-namespace", 106 | "LabelKey": "podtype", 107 | "LabelValue": "server", 108 | "MatchGoroutine": true, 109 | "Command": "pkill iperf3; /tmp/qperf-0.4.9/src/qperf >> /tmp/perfoutput/qperfserver.out 2>> /tmp/perfoutput/qperfserver.err &" 110 | } 111 | } 112 | ], 113 | "SleepTimes": [10000], 114 | "Count": 1 115 | } 116 | }, 117 | { 118 | "Pods": 119 | { 120 | "Actions": [ 121 | { 122 | "Act": "RUN", 123 | "Spec": { 124 | "Namespace": "kbench-pod-namespace", 125 | "LabelKey": "podtype", 126 | "LabelValue": "client", 127 | "MatchGoroutine": true, 128 | "Command": "/tmp/qperf-0.4.9/src/qperf -v iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local tcp_lat udp_lat tcp_bw udp_bw >> /tmp/perfoutput/qperfclient.out 2>> /tmp/perfoutput/qperfclient.err" 129 | } 130 | } 131 | ], 132 | "SleepTimes": [10000], 133 | "Count": 1 134 | } 135 | }, 136 | { 137 | "Pods": 138 | { 139 | "Actions": [ 140 | { 141 | "Act": "COPY", 142 | "Spec": { 143 | "Namespace": "kbench-pod-namespace", 144 | "LocalPath": "./", 145 | "ContainerPath": "/tmp/perfoutput", 146 | "Upload": false, 147 | "MatchOperation": "ALL", 148 | "MatchGoroutine": false 149 | } 150 | } 151 | ], 152 | "SleepTimes": [20000], 153 | "Count": 1 154 | } 155 | } 156 | ], 157 | "Tags" : [ 158 | { 159 | "Key": "testProfile", 160 | "Value": "dp_network_intranode" 161 | } 162 | ], 163 | "WavefrontPathDir": "/home/vmware" 164 | } 165 | -------------------------------------------------------------------------------- /config/dp_network_intranode/describe_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_intranode/get_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | kubectl get services --namespace=kbench-pod-namespace 3 | -------------------------------------------------------------------------------- /config/dp_network_intranode/iperf_client_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfclientpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: client 9 | spec: 10 | affinity: 11 | podAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - labelSelector: 14 | matchExpressions: 15 | - key: podtype 16 | operator: In 17 | values: 18 | - server 19 | topologyKey: kubernetes.io/hostname 20 | hostname: iperfclient 21 | subdomain: kbench-service-oid-0-tid-0 22 | containers: 23 | - image: nginx 24 | resources: 25 | limits: 26 | memory: "4200Mi" 27 | cpu: "8000m" 28 | requests: 29 | memory: "4200Mi" 30 | cpu: "1000m" 31 | ephemeral-storage: "5Gi" 32 | command: ["/bin/sh","-c"] 33 | args: 34 | - sleep 2s; 35 | apt-get update; 36 | apt-get install -y iperf3; 37 | apt-get install -y procps; 38 | apt-get install -y net-tools; 39 | apt-get install -y iputils-ping; 40 | apt-get install -y qperf; 41 | apt-get clean; 42 | apt-get install -y wget gawk make; 43 | apt-get clean; 44 | apt-get install -y gcc libpath-tiny-perl; 45 | cd /tmp; 46 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 47 | tar xvf qperf-0.4.9.tar.gz; 48 | cd qperf-0.4.9; 49 | ./configure; 50 | make; 51 | sleep infinity; 52 | name: iperfclientcontainer 53 | -------------------------------------------------------------------------------- /config/dp_network_intranode/iperf_server_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myiperfserverpod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myiperf 8 | podtype: server 9 | spec: 10 | hostname: iperfserver 11 | subdomain: kbench-service-oid-0-tid-0 12 | containers: 13 | - image: nginx 14 | resources: 15 | limits: 16 | memory: "4200Mi" 17 | cpu: "8000m" 18 | requests: 19 | memory: "4200Mi" 20 | cpu: "1000m" 21 | ephemeral-storage: "5Gi" 22 | command: ["/bin/sh","-c"] 23 | args: 24 | - sleep 2s; 25 | apt-get update; 26 | apt-get install -y iperf3; 27 | apt-get install -y procps; 28 | apt-get install -y net-tools; 29 | apt-get install -y qperf; 30 | apt-get install -y iputils-ping; 31 | apt-get clean; 32 | apt-get install -y wget gawk make; 33 | apt-get clean; 34 | apt-get install -y gcc libpath-tiny-perl; 35 | cd /tmp; 36 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 37 | wget https://www.openfabrics.org/downloads/qperf/qperf-0.4.9.tar.gz; 38 | tar xvf qperf-0.4.9.tar.gz; 39 | cd qperf-0.4.9; 40 | ./configure; 41 | make; 42 | sleep infinity; 43 | name: iperfservercontainer 44 | -------------------------------------------------------------------------------- /config/dp_network_intranode/iperf_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kbench-service-oid-0-tid-0 5 | namespace: kbench-pod-namespace 6 | spec: 7 | selector: 8 | name: myiperf 9 | clusterIP: None 10 | ports: 11 | - name: foo # Actually, no port is needed. 12 | port: 1234 13 | targetPort: 1234 14 | -------------------------------------------------------------------------------- /config/dp_network_intranode/logs_netperf.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-1-tid-0 2 | echo "SECOND POD" 3 | echo "" 4 | echo "" 5 | echo "" 6 | echo "" 7 | echo "" 8 | echo "" 9 | echo "" 10 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-2-tid-0 11 | -------------------------------------------------------------------------------- /config/dp_network_intranode/wcp-netperf-3iperfstreams-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Services": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_network_intranode/iperf_service.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [100], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "ImagePullPolicy": "IfNotPresent", 30 | "Image": "nginx", 31 | "Namespace": "kbench-pod-namespace", 32 | "PodNamePrefix": "server", 33 | "YamlSpec": "./config/dp_network_intranode/iperf_server_pod.yaml" 34 | } 35 | } 36 | ], 37 | "SleepTimes": [100], 38 | "Count": 1 39 | } 40 | }, 41 | { 42 | "Pods": 43 | { 44 | "Actions": [ 45 | { 46 | "Act": "CREATE", 47 | "Spec": { 48 | "ImagePullPolicy": "IfNotPresent", 49 | "Image": "nginx", 50 | "Namespace": "kbench-pod-namespace", 51 | "PodNamePrefix": "client", 52 | "YamlSpec": "./config/dp_network_intranode/iperf_client_pod.yaml" 53 | } 54 | } 55 | ], 56 | "SleepTimes": [100000], 57 | "Count": 1 58 | } 59 | }, 60 | { 61 | "Pods": 62 | { 63 | "Actions": [ 64 | { 65 | "Act": "RUN", 66 | "Spec": { 67 | "Namespace": "kbench-pod-namespace", 68 | "LabelKey": "podtype", 69 | "LabelValue": "server", 70 | "MatchGoroutine": true, 71 | "Command": "mkdir /tmp/perfoutput; iperf3 -s -p 5101 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err & iperf3 -s -p 5102 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err & iperf3 -s -p 5103 >> /tmp/perfoutput/iperfserver.out 2>> /tmp/perfoutput/iperfserver.err &" 72 | } 73 | } 74 | ], 75 | "SleepTimes": [10000], 76 | "Count": 1 77 | } 78 | }, 79 | { 80 | "Pods": 81 | { 82 | "Actions": [ 83 | { 84 | "Act": "RUN", 85 | "Spec": { 86 | "Namespace": "kbench-pod-namespace", 87 | "LabelKey": "podtype", 88 | "LabelValue": "client", 89 | "MatchGoroutine": true, 90 | "Command": "mkdir /tmp/perfoutput; iperf3 -t 90 -T s1 -P2 -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err & iperf3 -t 90 -T s2 -P2 -p 5102 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err & iperf3 -t 90 -T s3 -P2 -p 5103 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient.out 2>> /tmp/perfoutput/iperfclient.err; " 91 | } 92 | } 93 | ], 94 | "SleepTimes": [10000], 95 | "Count": 1 96 | } 97 | }, 98 | { 99 | "Pods": 100 | { 101 | "Actions": [ 102 | { 103 | "Act": "RUN", 104 | "Spec": { 105 | "Namespace": "kbench-pod-namespace", 106 | "LabelKey": "podtype", 107 | "LabelValue": "client", 108 | "MatchGoroutine": true, 109 | "Command": "mkdir /tmp/perfoutput; iperf3 -u -b 0 -w 256k -t 90 -T s1 -P2 -p 5101 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err & iperf3 -u -b 0 -w 256k -t 90 -T s2 -P2 -p 5102 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err & iperf3 -u -b 0 -w 256k -t 90 -T s3 -P2 -p 5103 -O 10 -c iperfserver.kbench-service-oid-0-tid-0.kbench-pod-namespace.svc.cluster.local >> /tmp/perfoutput/iperfclient_UDP.out 2>> /tmp/perfoutput/iperfclient_UDP.err; " 110 | } 111 | } 112 | ], 113 | "SleepTimes": [10000], 114 | "Count": 1 115 | } 116 | }, 117 | { 118 | "Pods": 119 | { 120 | "Actions": [ 121 | { 122 | "Act": "COPY", 123 | "Spec": { 124 | "Namespace": "kbench-pod-namespace", 125 | "LocalPath": "./", 126 | "ContainerPath": "/tmp/perfoutput", 127 | "Upload": false, 128 | "MatchOperation": "ALL", 129 | "MatchGoroutine": false 130 | } 131 | } 132 | ], 133 | "SleepTimes": [20000], 134 | "Count": 1 135 | } 136 | } 137 | ], 138 | "Tags" : [ 139 | { 140 | "Key": "testProfile", 141 | "Value": "dp_network_intranode" 142 | } 143 | ], 144 | "WavefrontPathDir": "/home/vmware" 145 | } 146 | -------------------------------------------------------------------------------- /config/dp_redis/README: -------------------------------------------------------------------------------- 1 | Run redis individually using the following command: 2 | cd ../../; ./run.sh -t "dp_redis" -r -o " 3 | -------------------------------------------------------------------------------- /config/dp_redis/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Pods": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "ImagePullPolicy": "IfNotPresent", 15 | "Image": "nginx", 16 | "Namespace": "kbench-pod-namespace", 17 | "PodNamePrefix": "myredis", 18 | "LabelKey": "podtype", 19 | "LabelValue": "redisworker", 20 | "YamlSpec": "./config/dp_redis/redis_pod.yaml" 21 | } 22 | } 23 | ], 24 | "SleepTimes": [50000], 25 | "Count": 1 26 | } 27 | }, 28 | { 29 | "Predicate": 30 | { 31 | "Resource": "kbench-pod-namespace/pod/kbench-pod-oid-0-tid-0/rediscontainer", 32 | "Command": "ls /memtier_benchmark/memtier_benchmark", 33 | "Expect": "contains:memtier_benchmark" 34 | }, 35 | "Pods": 36 | { 37 | "Actions": [ 38 | { 39 | "Act": "RUN", 40 | "Spec": { 41 | "Namespace": "kbench-pod-namespace", 42 | "LabelKey": "podtype", 43 | "LabelValue": "redisworker", 44 | "MatchGoroutine": true, 45 | "Command": "mkdir /tmp/redisoutput; redis-server > /tmp/redisoutput/redisserver.out 2> /tmp/redisoutput/redisserver.err &" 46 | } 47 | } 48 | ], 49 | "SleepTimes": [5000], 50 | "Count": 1 51 | } 52 | }, 53 | { 54 | "Pods": 55 | { 56 | "Actions": [ 57 | { 58 | "Act": "RUN", 59 | "Spec": { 60 | "Namespace": "kbench-pod-namespace", 61 | "LabelKey": "podtype", 62 | "LabelValue": "redisworker", 63 | "MatchGoroutine": true, 64 | "Command": "cd /memtier_benchmark; ./memtier_benchmark > /tmp/redisoutput/memtier.out 2> /tmp/redisoutput/memtier.err;" 65 | } 66 | } 67 | ], 68 | "SleepTimes": [5000], 69 | "Count": 1 70 | } 71 | }, 72 | { 73 | "Pods": 74 | { 75 | "Actions": [ 76 | { 77 | "Act": "COPY", 78 | "Spec": { 79 | "Namespace": "kbench-pod-namespace", 80 | "LocalPath": "./", 81 | "ContainerPath": "/tmp/redisoutput", 82 | "Upload": false, 83 | "MatchOperation": "ALL", 84 | "MatchGoroutine": false 85 | } 86 | } 87 | ], 88 | "SleepTimes": [20000], 89 | "Count": 1 90 | } 91 | } 92 | ], 93 | "Tags" : [ 94 | { 95 | "Key": "testProfile", 96 | "Value": "redis" 97 | } 98 | ], 99 | "WavefrontPathDir": "/home/vmware" 100 | } 101 | -------------------------------------------------------------------------------- /config/dp_redis/describe_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis/get_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | -------------------------------------------------------------------------------- /config/dp_redis/logs_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis/redis_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myredispod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myredisserverclient 8 | spec: 9 | containers: 10 | - image: nginx 11 | resources: 12 | limits: 13 | memory: "4000Mi" 14 | cpu: "8000m" 15 | requests: 16 | memory: "4000Mi" 17 | cpu: "2000m" 18 | ephemeral-storage: "10Gi" 19 | command: ["/bin/sh","-c"] 20 | args: 21 | - apt-get update; 22 | apt-get install -y redis-server; 23 | apt-get install -y git libssl-dev build-essential autoconf automake libpcre3-dev libevent-dev pkg-config zlib1g-dev; 24 | git clone https://github.com/RedisLabs/memtier_benchmark.git; 25 | cd memtier_benchmark/; 26 | autoreconf -ivf; 27 | ./configure; 28 | make; 29 | sleep infinity; 30 | name: rediscontainer 31 | -------------------------------------------------------------------------------- /config/dp_redis_density/README: -------------------------------------------------------------------------------- 1 | Run redis individually using the following command: 2 | cd ../../; ./run.sh -t "dp_redis_density" -r -o " 3 | -------------------------------------------------------------------------------- /config/dp_redis_density/aggregate_results.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | Num_Instances=25; 3 | 4 | Agg_throughput=0; 5 | for((num=0;num < ${Num_Instances};num++)) 6 | { 7 | throughput=`cat */kbench-pod-oid-0-tid-${num}/memtier.out | grep "Totals" | awk {'print $2'}` 8 | echo "Throughput of pod $num is $throughput"; 9 | Agg_throughput=`echo "$throughput + $Agg_throughput" | bc` 10 | } 11 | echo "Aggregate throughput = $Agg_throughput"; 12 | -------------------------------------------------------------------------------- /config/dp_redis_density/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Pods": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "ImagePullPolicy": "IfNotPresent", 15 | "Image": "nginx", 16 | "Namespace": "kbench-pod-namespace", 17 | "PodNamePrefix": "myredis", 18 | "LabelKey": "podtype", 19 | "LabelValue": "redisworker", 20 | "YamlSpec": "./config/dp_redis_density/redis_pod.yaml" 21 | } 22 | } 23 | ], 24 | "SleepTimes": [100000], 25 | "Count": 25 26 | } 27 | }, 28 | { 29 | "Predicate": 30 | { 31 | "Resource": "kbench-pod-namespace/pod/kbench-pod-oid-0-tid-0/rediscontainer", 32 | "Command": "ls /memtier_benchmark/memtier_benchmark", 33 | "Expect": "contains:memtier_benchmark" 34 | }, 35 | "Pods": 36 | { 37 | "Actions": [ 38 | { 39 | "Act": "RUN", 40 | "Spec": { 41 | "Namespace": "kbench-pod-namespace", 42 | "LabelKey": "podtype", 43 | "LabelValue": "redisworker", 44 | "MatchGoroutine": true, 45 | "Command": "mkdir /tmp/redisoutput; redis-server > /tmp/redisoutput/redisserver.out 2> /tmp/redisoutput/redisserver.err &" 46 | } 47 | } 48 | ], 49 | "SleepTimes": [5000], 50 | "Count": 25 51 | } 52 | }, 53 | { 54 | "Pods": 55 | { 56 | "Actions": [ 57 | { 58 | "Act": "RUN", 59 | "Spec": { 60 | "Namespace": "kbench-pod-namespace", 61 | "LabelKey": "podtype", 62 | "LabelValue": "redisworker", 63 | "MatchGoroutine": true, 64 | "Command": "cd /memtier_benchmark; ./memtier_benchmark --test-time=600 > /tmp/redisoutput/memtier.out 2> /tmp/redisoutput/memtier.err;" 65 | } 66 | } 67 | ], 68 | "SleepTimes": [5000], 69 | "Count": 25 70 | } 71 | }, 72 | { 73 | "Pods": 74 | { 75 | "Actions": [ 76 | { 77 | "Act": "COPY", 78 | "Spec": { 79 | "Namespace": "kbench-pod-namespace", 80 | "LocalPath": "./", 81 | "ContainerPath": "/tmp/redisoutput", 82 | "Upload": false, 83 | "MatchOperation": "ALL", 84 | "MatchGoroutine": true 85 | } 86 | } 87 | ], 88 | "SleepTimes": [20000], 89 | "Count": 25 90 | } 91 | } 92 | ], 93 | "Tags" : [ 94 | { 95 | "Key": "testProfile", 96 | "Value": "redis" 97 | } 98 | ], 99 | "WavefrontPathDir": "/home/vmware" 100 | } 101 | -------------------------------------------------------------------------------- /config/dp_redis_density/describe_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis_density/get_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | -------------------------------------------------------------------------------- /config/dp_redis_density/logs_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis_density/redis_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myredispod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myredisserverclient 8 | spec: 9 | affinity: 10 | nodeAffinity: 11 | requiredDuringSchedulingIgnoredDuringExecution: 12 | nodeSelectorTerms: 13 | - matchExpressions: 14 | - key: kubernetes.io/hostname 15 | operator: In 16 | values: 17 | - w4-hs2-k0705.eng.vmware.com 18 | containers: 19 | - image: nginx 20 | resources: 21 | limits: 22 | memory: "12000Mi" 23 | cpu: "2000m" 24 | requests: 25 | cpu: "50m" 26 | memory: "4000Mi" 27 | ephemeral-storage: "10Gi" 28 | command: ["/bin/sh","-c"] 29 | args: 30 | - apt-get update; 31 | apt-get install -y redis-server; 32 | apt-get install -y git libssl-dev build-essential autoconf automake libpcre3-dev libevent-dev pkg-config zlib1g-dev; 33 | git clone https://github.com/RedisLabs/memtier_benchmark.git; 34 | cd memtier_benchmark/; 35 | autoreconf -ivf; 36 | ./configure; 37 | make; 38 | sleep infinity; 39 | name: rediscontainer 40 | -------------------------------------------------------------------------------- /config/dp_redis_service/README: -------------------------------------------------------------------------------- 1 | Run redis service using the following command: 2 | cd ../../; ./run.sh -t "dp_redis_service" -r -o " 3 | -------------------------------------------------------------------------------- /config/dp_redis_service/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 60000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations" : [ 7 | { 8 | "Deployments": 9 | { 10 | "Actions": [ 11 | { 12 | "Act": "CREATE", 13 | "Spec": { 14 | "YamlSpec": "./config/dp_redis_service/redis_deployment.yaml" 15 | } 16 | } 17 | ], 18 | "SleepTimes": [50000], 19 | "Count": 1 20 | } 21 | }, 22 | { 23 | "Pods": 24 | { 25 | "Actions": [ 26 | { 27 | "Act": "CREATE", 28 | "Spec": { 29 | "PodNamePrefix": "client", 30 | "LabelKey": "podtype", 31 | "LabelValue": "redisworker", 32 | "YamlSpec": "./config/dp_redis_service/redis_client_pod.yaml" 33 | } 34 | } 35 | ], 36 | "SleepTimes": [100000], 37 | "Count": 1 38 | } 39 | }, 40 | { 41 | "Services": 42 | { 43 | "Actions": [ 44 | { 45 | "Act": "CREATE", 46 | "Spec": { 47 | "YamlSpec": "./config/dp_redis_service/redis_service.yaml" 48 | } 49 | } 50 | ], 51 | "SleepTimes": [10000], 52 | "Count": 1 53 | } 54 | }, 55 | { 56 | "Pods": 57 | { 58 | "Actions": [ 59 | { 60 | "Act": "RUN", 61 | "Spec": { 62 | "Namespace": "kbench-pod-namespace", 63 | "LabelKey": "podtype", 64 | "LabelValue": "redisworker", 65 | "MatchGoroutine": true, 66 | "Command": "mkdir /tmp/redisoutput; ping -c 10 kbench-service-oid-2-tid-0.kbench-pod-namespace.svc.cluster.local > /tmp/redisoutput/ping.out 2> /tmp/redisoutput/ping.err; cd /etc/nginx/memtier_benchmark; ./memtier_benchmark -s kbench-service-oid-2-tid-0.kbench-pod-namespace.svc.cluster.local --ratio=1:0 --hide-histogram --key-pattern P:P --key-minimum=100000 --key-maximum=200000 -n allkeys --data-size=10 > /tmp/redisoutput/memtier-warmup.out 2> /tmp/redisoutput/memtier-warmup.err" 67 | } 68 | } 69 | ], 70 | "SleepTimes": [10000], 71 | "Count": 1 72 | } 73 | }, 74 | { 75 | "Pods": 76 | { 77 | "Actions": [ 78 | { 79 | "Act": "RUN", 80 | "Spec": { 81 | "Namespace": "kbench-pod-namespace", 82 | "LabelKey": "podtype", 83 | "LabelValue": "redisworker", 84 | "MatchGoroutine": true, 85 | "Command": "cd /etc/nginx/memtier_benchmark; ./memtier_benchmark -s kbench-service-oid-2-tid-0.kbench-pod-namespace.svc.cluster.local --ratio=10:90 --threads=5 --test-time=300 --key-minimum=100000 --key-maximum=200000 --pipeline=5 -c 20 --hide-histogram -x 1 --key-pattern R:R --distinct-client-seed --data-size=10 > /tmp/redisoutput/memtier.out 2> /tmp/redisoutput/memtier.err;" 86 | } 87 | } 88 | ], 89 | "SleepTimes": [10000], 90 | "Count": 1 91 | } 92 | }, 93 | { 94 | "Pods": 95 | { 96 | "Actions": [ 97 | { 98 | "Act": "COPY", 99 | "Spec": { 100 | "Namespace": "kbench-pod-namespace", 101 | "LocalPath": "./", 102 | "ContainerPath": "/tmp/redisoutput", 103 | "Upload": false, 104 | "MatchOperation": "ALL", 105 | "MatchGoroutine": false 106 | } 107 | } 108 | ], 109 | "SleepTimes": [20000], 110 | "Count": 1 111 | } 112 | } 113 | ], 114 | "Tags" : [ 115 | { 116 | "Key": "testProfile", 117 | "Value": "redis" 118 | } 119 | ], 120 | "WavefrontPathDir": "/home/vmware" 121 | } 122 | -------------------------------------------------------------------------------- /config/dp_redis_service/describe_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl --namespace=kbench-pod-namespace describe po kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis_service/get_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods --namespace=kbench-pod-namespace 2 | -------------------------------------------------------------------------------- /config/dp_redis_service/logs_redis.sh: -------------------------------------------------------------------------------- 1 | kubectl logs --namespace=kbench-pod-namespace kbench-pod-oid-0-tid-0 2 | -------------------------------------------------------------------------------- /config/dp_redis_service/redis_client_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myredispod 5 | namespace: kbench-pod-namespace 6 | labels: 7 | name: myredisserverclient 8 | app: redis 9 | spec: 10 | affinity: 11 | podAntiAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | - labelSelector: 14 | matchExpressions: 15 | - key: app 16 | operator: In 17 | values: 18 | - redis 19 | topologyKey: "kubernetes.io/hostname" 20 | containers: 21 | - image: gcr.io/google-containers/nginx 22 | resources: 23 | limits: 24 | memory: "12000Mi" 25 | cpu: "8000m" 26 | requests: 27 | memory: "4000Mi" 28 | cpu: "2000m" 29 | ephemeral-storage: "10Gi" 30 | command: ["/bin/sh","-c"] 31 | args: 32 | - apt-get update; 33 | apt-get install -y redis-server; 34 | apt-get install -y git libssl-dev build-essential autoconf automake libpcre3-dev libevent-dev pkg-config zlib1g-dev; 35 | git clone https://github.com/RedisLabs/memtier_benchmark.git; 36 | cd memtier_benchmark/; 37 | autoreconf -ivf; 38 | ./configure; 39 | make; 40 | sleep infinity; 41 | name: rediscontainer 42 | -------------------------------------------------------------------------------- /config/dp_redis_service/redis_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | component: cache 6 | name: redis 7 | name: redis 8 | namespace: kbench-pod-namespace 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | name: redis 14 | template: 15 | metadata: 16 | labels: 17 | component: cache 18 | name: redis 19 | app: redis 20 | spec: 21 | affinity: 22 | podAntiAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: app 27 | operator: In 28 | values: 29 | - redis 30 | topologyKey: "kubernetes.io/hostname" 31 | containers: 32 | - args: 33 | env: 34 | - name: MASTER 35 | value: "true" 36 | image: gcr.io/google-containers/redis 37 | imagePullPolicy: Always 38 | name: redis 39 | ports: 40 | - containerPort: 6379 41 | name: redis 42 | resources: 43 | limits: 44 | memory: "12000Mi" 45 | cpu: "8000m" 46 | requests: 47 | memory: "4000Mi" 48 | cpu: "2000m" 49 | -------------------------------------------------------------------------------- /config/dp_redis_service/redis_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | component: cache 6 | name: redis 7 | name: redis 8 | namespace: kbench-pod-namespace 9 | spec: 10 | ports: 11 | - nodePort: 31115 12 | port: 6379 13 | protocol: TCP 14 | targetPort: 6379 15 | selector: 16 | name: redis 17 | type: LoadBalancer 18 | -------------------------------------------------------------------------------- /config/predicate_example/command_in_container_predicate.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Predicate": { 9 | "Resource": "kube-system/pod/etcd-k8s-master/etcd", 10 | "Command": "ls", 11 | "Expect": "contains:etc" 12 | }, 13 | "Deployments": { 14 | "Actions": [ 15 | { 16 | "Act": "CREATE", 17 | "Spec": { 18 | "ImagePullPolicy": "IfNotPresent", 19 | "Image": "k8s.gcr.io/pause:3.1", 20 | "NumReplicas": 2, 21 | "Namespace": "test-simple" 22 | } 23 | }, 24 | { 25 | "Act": "DELETE", 26 | "Spec": { 27 | "Namespace": "test-simple" 28 | } 29 | } 30 | ], 31 | "SleepTimes": [ 32 | 10000, 33 | 3000 34 | ], 35 | "Count": 2 36 | }, 37 | "RepeatTimes": 0 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /config/predicate_example/command_outside_container_and_resource_predicate.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": false, 6 | "Operations": [ 7 | { 8 | "Predicate": { 9 | "Resource": "kube-system/pod/etcd-k8s-master", 10 | "Command": "pwd", 11 | "Expect": "!contains:root" 12 | }, 13 | "Deployments": { 14 | "Actions": [ 15 | { 16 | "Act": "CREATE", 17 | "Spec": { 18 | "ImagePullPolicy": "IfNotPresent", 19 | "Image": "k8s.gcr.io/pause:3.1", 20 | "NumReplicas": 2, 21 | "Namespace": "test-simple" 22 | } 23 | }, 24 | { 25 | "Act": "DELETE", 26 | "Spec": { 27 | "Namespace": "test-simple" 28 | } 29 | } 30 | ], 31 | "SleepTimes": [ 32 | 10000, 33 | 3000 34 | ], 35 | "Count": 2 36 | }, 37 | "RepeatTimes": 0 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /config/predicate_example/group_version_resource_predicate.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Predicate": { 9 | "Resource": "apps/v1/namespaces/kube-system/deployment/coredns" 10 | }, 11 | "Deployments": { 12 | "Actions": [ 13 | { 14 | "Act": "CREATE", 15 | "Spec": { 16 | "ImagePullPolicy": "IfNotPresent", 17 | "Image": "k8s.gcr.io/pause:3.1", 18 | "NumReplicas": 2, 19 | "Namespace": "test-simple" 20 | } 21 | }, 22 | { 23 | "Act": "DELETE", 24 | "Spec": { 25 | "Namespace": "test-simple" 26 | } 27 | } 28 | ], 29 | "SleepTimes": [ 30 | 10000, 31 | 3000 32 | ], 33 | "Count": 2 34 | }, 35 | "RepeatTimes": 0 36 | } 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /config/predicate_example/labelled_resources_predicate.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Predicate": { 9 | "Resource": "test-namespace/pod", 10 | "Labels": "key1=value1;key2=value2" 11 | }, 12 | "Deployments": { 13 | "Actions": [ 14 | { 15 | "Act": "CREATE", 16 | "Spec": { 17 | "ImagePullPolicy": "IfNotPresent", 18 | "Image": "k8s.gcr.io/pause:3.1", 19 | "NumReplicas": 2, 20 | "Namespace": "test-simple" 21 | } 22 | }, 23 | { 24 | "Act": "DELETE", 25 | "Spec": { 26 | "Namespace": "test-simple" 27 | } 28 | } 29 | ], 30 | "SleepTimes": [ 31 | 10000, 32 | 3000 33 | ], 34 | "Count": 2 35 | }, 36 | "RepeatTimes": 0 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /config/predicate_example/simple_resource_predicate.json: -------------------------------------------------------------------------------- 1 | { 2 | "BlockingLevel": "operation", 3 | "Timeout": 540000, 4 | "CheckingInterval": 3000, 5 | "Cleanup": true, 6 | "Operations": [ 7 | { 8 | "Predicate": { 9 | "Resource": "kube-public/configmap/cluster-info" 10 | }, 11 | "Deployments": { 12 | "Actions": [ 13 | { 14 | "Act": "CREATE", 15 | "Spec": { 16 | "ImagePullPolicy": "IfNotPresent", 17 | "Image": "k8s.gcr.io/pause:3.1", 18 | "NumReplicas": 2, 19 | "Namespace": "test-simple" 20 | } 21 | }, 22 | { 23 | "Act": "DELETE", 24 | "Spec": { 25 | "Namespace": "test-simple" 26 | } 27 | } 28 | ], 29 | "SleepTimes": [ 30 | 10000, 31 | 3000 32 | ], 33 | "Count": 2 34 | }, 35 | "RepeatTimes": 0 36 | } 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /documentation/kbench-overview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-tanzu/k-bench/53a82d316effaaf562d81a7cd306bf5f0d40cfc6/documentation/kbench-overview.jpg -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module k-bench 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect 7 | github.com/gogo/protobuf v1.3.2 // indirect 8 | github.com/hashicorp/golang-lru v0.5.4 // indirect 9 | github.com/imdario/mergo v0.3.8 // indirect 10 | github.com/sirupsen/logrus v1.4.2 11 | golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect 12 | k8s.io/api v0.20.0-alpha.2 13 | k8s.io/apimachinery v0.20.0-alpha.2 14 | k8s.io/client-go v0.20.0-alpha.2 15 | ) 16 | -------------------------------------------------------------------------------- /infrastructure/gke/type.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package gke 18 | 19 | import () 20 | 21 | // TODO: add cluster provisioning support for gke 22 | -------------------------------------------------------------------------------- /infrastructure/vmware/type.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package vmware 18 | 19 | import () 20 | 21 | /* Below are structs for the VMware infrastructure part in benchmark config */ 22 | 23 | type Cidr struct { 24 | Address string 25 | Prefix int 26 | } 27 | 28 | type NetworkConfig struct { 29 | Mode string 30 | VcNetwork string 31 | Gateway string 32 | Start string 33 | Count int 34 | NetworkMask string 35 | } 36 | 37 | type EsxConfig struct { 38 | HostIp string 39 | UserName string 40 | Password string 41 | } 42 | 43 | type VsphereConfig struct { 44 | VcUser string 45 | VcPassword string 46 | VcIp string 47 | Cluster string 48 | Hosts []EsxConfig 49 | Size string 50 | MasterDNS []string 51 | WorkerDNS []string 52 | MasterNTP []string 53 | WorkerNTP []string 54 | MasterNetwork NetworkConfig 55 | ManagementNetwork NetworkConfig 56 | WorkerNetwork NetworkConfig 57 | MasterStore string // Only support using one datastore at this point 58 | WorkerStore string // Only support using one datastore at this point 59 | PodCidr Cidr 60 | ServiceCidr Cidr 61 | } 62 | 63 | /* Below are structs that represent json body in wcp request */ 64 | 65 | type WcpAddressRange struct { 66 | AddressCount int `json:"address_count"` 67 | Gateway string `json:"gateway"` 68 | StartingAddress string `json:"starting_address"` 69 | SubnetMask string `json:"subnet_mask"` 70 | } 71 | 72 | type WcpNetwork struct { 73 | Range WcpAddressRange `json:"address_range"` 74 | Mode string `json:"mode"` 75 | VcNetwork string `json:"network"` 76 | } 77 | 78 | type WcpCidr struct { 79 | Address string `json:"address"` 80 | Prefix int `json:"prefix"` 81 | } 82 | 83 | type WcpSpec struct { 84 | SizeHint string `json:"size_hint"` 85 | MasterDNS []string `json:"master_DNS"` 86 | MasterNTP []string `json:"master_NTP_servers"` 87 | MasterNetwork WcpNetwork `json:"master_cluster_network"` 88 | ManagementNetwork WcpNetwork `json:"master_management_network"` 89 | MasterStore []string `json:"master_storage"` 90 | PodCidr WcpCidr `json:"pod_cidr"` 91 | ServiceCidr WcpCidr `json:"service_cidr"` 92 | VxlanPort int `json:"vxlan_port"` 93 | WorkerDNS []string `json:"worker_DNS"` 94 | WorkerNTP []string `json:"worker_NTP_servers"` 95 | WorkerNetwork WcpNetwork `json:"worker_cluster_network"` 96 | WorkerStore []string `json:"worker_storage"` 97 | } 98 | 99 | type WcpConfig struct { 100 | Cluster string `json:"cluster"` 101 | Spec WcpSpec `json:"spec"` 102 | } 103 | 104 | /* Below are structs that capture json responses from vCenter Server */ 105 | 106 | type Cluster struct { 107 | Id string `json:"cluster"` 108 | Name string `json:"name"` 109 | DrsEnabled bool `json:"drs_enabled"` 110 | HaEnabled bool `json:"ha_enabled"` 111 | } 112 | 113 | type Clusters struct { 114 | AllClusters []Cluster `json:"value"` 115 | } 116 | 117 | type Datastore struct { 118 | Id string `json:"datastore"` 119 | Name string `json:"name"` 120 | Type string `json:"type"` 121 | FreeSpace int64 `json:"free_space"` 122 | Capacity int64 `json:"capacity"` 123 | } 124 | 125 | type Datastores struct { 126 | AllDatastores []Datastore `json:"value"` 127 | } 128 | 129 | type Network struct { 130 | Id string `json:"network"` 131 | Name string `json:"name"` 132 | Type string `json:"type"` 133 | } 134 | 135 | type Networks struct { 136 | AllNetworks []Network `json:"value"` 137 | } 138 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GOVERSION="1.13.7" 4 | MY_GOPATH="$HOME/go" 5 | MY_GOROOT="$HOME/go-root/go" 6 | 7 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"; 8 | 9 | set_env () { 10 | echo "Checking env..." 11 | if [[ -z "${GOPATH}" ]]; then 12 | echo "Environment variable GOPATH not found, use $MY_GOPATH" 13 | else 14 | MY_GOPATH=$GOPATH 15 | fi 16 | if [[ -z "${GOROOT}" ]]; then 17 | echo "Environment variable GOROOT not found, use $MY_GOROOT" 18 | else 19 | MY_GOROOT=$GOROOT 20 | fi 21 | } 22 | 23 | install () { 24 | mkdir -p $MY_GOPATH/src/ 25 | cp -r $DIR $MY_GOPATH/src/ 26 | cd $MY_GOPATH/src/k-bench 27 | #echo "Getting benchmark's Go dependencies..." 28 | #go get -d ./... 29 | if [ -d "$MY_GOPATH/pkg/mod/github.com/kubernetes/kompose"* ]; then 30 | echo "Kompose already installed." 31 | else 32 | echo "Installing kompose..." 33 | if [ -f "$MY_GOROOT/bin/go" ]; then 34 | $MY_GOROOT/bin/go get -u github.com/kubernetes/kompose 35 | else 36 | go get -u github.com/kubernetes/kompose 37 | fi 38 | fi 39 | echo "Installing k-bench..." 40 | if [ -f "$MY_GOROOT/bin/go" ]; then 41 | $MY_GOROOT/bin/go install cmd/kbench.go 42 | else 43 | go install cmd/kbench.go 44 | fi 45 | 46 | #$MY_GOROOT/bin/go install cmd/kbench.go 47 | #go build ./kbench 48 | cp $MY_GOPATH/bin/kbench /usr/local/bin/ 49 | if [ $? -ne 0 ]; then 50 | echo "Tried to copy kbench to /usr/local/bin but failed. If you are not root, run from $MY_GOPATH/bin/kbench" 51 | fi 52 | echo "Completed k-bench installation. To rebuild the benchmark, just run \"go install cmd/kbench.go\"" 53 | } 54 | 55 | install_go () { 56 | GOFILE="go$GOVERSION.linux-amd64.tar.gz" 57 | echo "" 58 | if [ -d $MY_GOROOT ]; then 59 | echo "Installation directories already exist $MY_GOROOT" 60 | read -p "Would you like to overwrite (Y/n)? " -n 1 -r 61 | echo 62 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 63 | exit 1 64 | fi 65 | fi 66 | 67 | mkdir -p "$MY_GOROOT" "$MY_GOPATH" "$MY_GOPATH/src" "$MY_GOPATH/pkg" "$MY_GOPATH/bin" 68 | 69 | wget https://dl.google.com/go/$GOFILE -O $HOME/$GOFILE 70 | if [ $? -ne 0 ]; then 71 | echo "Go download failed! Exiting." 72 | exit 1 73 | fi 74 | 75 | tar -C $(dirname $MY_GOROOT) -xzf $HOME/$GOFILE 76 | # remove go tarball 77 | if [ -f $HOME/$GOFILE ]; then 78 | rm $HOME/$GOFILE 79 | echo "$HOME/$GOFILE is removed" 80 | fi 81 | 82 | read -p "Would you like to add Go to your profile .bashrc (Y/n)? " -n 1 -r 83 | echo 84 | if [[ $REPLY =~ ^[Yy]$ ]]; then 85 | if grep -q "MY_GOROOT" "$HOME/.bashrc"; then 86 | echo "GOROOT already found in .bashrc." 87 | else 88 | cp -f "$HOME/.bashrc" "$HOME/.bashrc.bkp" 89 | 90 | touch "$HOME/.bashrc" 91 | { 92 | echo '' 93 | echo '# GOLANG' 94 | echo 'export GOROOT='$MY_GOROOT 95 | echo 'export GOPATH='$MY_GOPATH 96 | echo 'export GOBIN=$GOPATH/bin' 97 | echo 'export PATH=$PATH:$GOROOT/bin:$GOBIN' 98 | echo '' 99 | } >> "$HOME/.bashrc" 100 | fi 101 | fi 102 | source $HOME/.bashrc 103 | 104 | #cp $MY_GOROOT/bin/go /usr/local/bin/ 105 | } 106 | 107 | check () { 108 | if ! which $1 > /dev/null; then 109 | echo "$1 is not installed, let me try to install"; 110 | install_go 111 | else 112 | echo "$1 is installed" 113 | fi 114 | } 115 | 116 | set_env; 117 | 118 | echo "Start to install the benchmark and tools..."; 119 | 120 | check go; 121 | 122 | install; 123 | 124 | #echo "Start to install waverunner for host stats collection..." 125 | 126 | #sudo $dir/waverunner/install.sh; 127 | 128 | #if [ $? -ne 0 ]; then 129 | # echo "Waverunner installation failed, you can still run the benchmark. Exiting." 130 | # exit 1 131 | #fi 132 | -------------------------------------------------------------------------------- /manager/manager.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package manager 18 | 19 | import ( 20 | "fmt" 21 | "strconv" 22 | "strings" 23 | 24 | log "github.com/sirupsen/logrus" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "k8s.io/apimachinery/pkg/fields" 27 | "k8s.io/apimachinery/pkg/labels" 28 | //"k8s.io/client-go/kubernetes" 29 | "k-bench/perf_util" 30 | "time" 31 | ) 32 | 33 | // Supported actions 34 | const ( 35 | CREATE_ACTION string = "create" 36 | DELETE_ACTION string = "delete" 37 | LIST_ACTION string = "list" 38 | GET_ACTION string = "get" 39 | UPDATE_ACTION string = "update" 40 | SCALE_ACTION string = "scale" 41 | RUN_ACTION string = "run" 42 | COPY_ACTION string = "copy" 43 | ) 44 | 45 | // Supported k8s resource kinds 46 | const ( 47 | POD string = "Pod" 48 | DEPLOYMENT string = "Deployment" 49 | STATEFUL_SET string = "StatefulSet" 50 | REPLICATION_CONTROLLER string = "ReplicationController" 51 | SERVICE string = "Service" 52 | NAMESPACE string = "Namespace" 53 | CONFIG_MAP string = "ConfigMap" 54 | ENDPOINTS string = "Endpoints" 55 | EVENT string = "Event" 56 | COMPONENT_STATUS string = "ComponentStatus" 57 | NODE string = "Node" 58 | LIMIT_RANGE string = "LimitRange" 59 | PERSISTENT_VOLUME_CLAIM string = "PersistentVolumeClaim" 60 | PERSISTENT_VOLUME string = "PersistentVolume" 61 | POD_TEMPLATE string = "PodTemplate" 62 | RESOURCE_QUOTA string = "ResourceQuota" 63 | SECRET string = "Secret" 64 | SERVICE_ACCOUNT string = "ServiceAccount" 65 | ROLE string = "Role" 66 | ROLE_BINDING string = "RoleBinding" 67 | CLUSTER_ROLE string = "ClusterRole" 68 | CLUSTER_ROLE_BINDING string = "ClusterRoleBinding" 69 | ) 70 | 71 | const ( 72 | ALL_OPERATION string = "all" 73 | CURR_OPERATION string = "current" 74 | ) 75 | 76 | var PodRelatedResources = map[string]bool{ 77 | POD: true, 78 | DEPLOYMENT: true, 79 | REPLICATION_CONTROLLER: true, 80 | STATEFUL_SET: true, 81 | } 82 | 83 | const AppName string = "kbench" 84 | 85 | type Manager interface { 86 | // Create the specified resource 87 | Create(spec interface{}) error 88 | 89 | // Delete the specified resource 90 | Delete(name interface{}) error 91 | 92 | // Calculate metrics 93 | CalculateStats() 94 | 95 | // Log stats for the manager 96 | LogStats() 97 | 98 | SendMetricToWavefront(now time.Time, wfTags []perf_util.WavefrontTag, wavefrontPathDir string, prefix string) 99 | 100 | // Delete all the resources created by this manager 101 | DeleteAll() error 102 | 103 | // TODO: add a method to report stats 104 | 105 | CalculateSuccessRate() int 106 | } 107 | 108 | type NewManagerFunc func() Manager 109 | 110 | var Managers = map[string]NewManagerFunc{ 111 | "Pod": NewPodManager, 112 | "Deployment": NewDeploymentManager, 113 | "StatefulSet": NewStatefulSetManager, 114 | "Namespace": NewNamespaceManager, 115 | "Service": NewServiceManager, 116 | "ReplicationController": NewReplicationControllerManager, 117 | "Resource": NewResourceManager, 118 | } 119 | 120 | type ActionSpec struct { 121 | Name string 122 | Tid int 123 | Oid int 124 | Namespace string 125 | LabelKey string 126 | LabelValue string 127 | MatchGoroutine bool 128 | MatchOperation string 129 | Kind string 130 | } 131 | 132 | type RunSpec struct { 133 | RunCommand string 134 | ActionFilter ActionSpec 135 | } 136 | 137 | type CopySpec struct { 138 | ParentOutDir string 139 | LocalPath string 140 | ContainerPath string 141 | Upload bool 142 | ActionFilter ActionSpec 143 | } 144 | 145 | func GetListOptions(s ActionSpec) metav1.ListOptions { 146 | filters := make(map[string]string, 0) 147 | if s.LabelKey != "" && s.LabelValue != "" { 148 | filters[s.LabelKey] = s.LabelValue 149 | } 150 | if s.MatchGoroutine == true { 151 | filters["tid"] = strconv.Itoa(s.Tid) 152 | } 153 | if strings.ToLower(s.MatchOperation) == CURR_OPERATION { 154 | filters["opnum"] = strconv.Itoa(s.Oid) 155 | } else if strings.ToLower(s.MatchOperation) == ALL_OPERATION { 156 | filters["app"] = AppName 157 | } 158 | if len(filters) == 0 { 159 | selector := fields.Set{"metadata.name": s.Name}.AsSelector().String() 160 | options := metav1.ListOptions{FieldSelector: selector} 161 | return options 162 | } else { 163 | selector := labels.Set(filters).AsSelector().String() 164 | options := metav1.ListOptions{LabelSelector: selector} 165 | return options 166 | } 167 | } 168 | 169 | func GetManager(managerName string) (Manager, error) { 170 | newManagerFn, ok := Managers[managerName] 171 | 172 | if !ok { 173 | log.Errorf("%s manager not found.", managerName) 174 | return nil, fmt.Errorf("No such provider: %s", managerName) 175 | } 176 | 177 | log.Infof("Created a new %s manager.", managerName) 178 | return newManagerFn(), nil 179 | } 180 | -------------------------------------------------------------------------------- /perf_util/perf_data.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package perf_util 18 | 19 | import ( 20 | "fmt" 21 | "strconv" 22 | "time" 23 | ) 24 | 25 | type WavefrontTag struct { 26 | Key string 27 | Value string 28 | } 29 | 30 | type WavefrontDataPoint struct { 31 | MetricName string 32 | MetricValue float32 33 | Timestamp time.Time 34 | Source string 35 | Tags []WavefrontTag 36 | } 37 | 38 | func (this WavefrontDataPoint) String() string { 39 | str := this.MetricName + " " + fmt.Sprintf("%.3f", this.MetricValue) + " " + strconv.FormatInt(this.Timestamp.Unix(), 10) + " source=" + this.Source 40 | for _, tag := range this.Tags { 41 | str += " " + tag.Key + "=" + tag.Value 42 | } 43 | return str 44 | } 45 | 46 | type LatencyMetric struct { 47 | Mid float32 48 | Min float32 49 | Max float32 50 | P99 float32 51 | } 52 | 53 | type OperationLatencyMetric struct { 54 | Latency LatencyMetric 55 | Valid bool 56 | } 57 | -------------------------------------------------------------------------------- /perf_util/perf_util.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package perf_util 18 | 19 | import ( 20 | log "github.com/sirupsen/logrus" 21 | "os" 22 | "strings" 23 | "time" 24 | ) 25 | 26 | const wavefrontInputFileNamePrefix = "kbench-wavefront-" 27 | 28 | func WriteDataPoints(now time.Time, points []WavefrontDataPoint, wavefrontPathDir string, prefix string) { 29 | //compose filename with timestamp 30 | timeFormat := now.Format("2006-01-02_15:04:05") 31 | wavefrontInputFileName := wavefrontPathDir + "/" + wavefrontInputFileNamePrefix + timeFormat + ".log" 32 | // fmt.Printf("Sending to wavefront ...\n") 33 | // Write performance metrics to telegraf file which will be processed by wavefront agent 34 | file, err := os.OpenFile(wavefrontInputFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) 35 | if err != nil { 36 | log.Fatal(err) 37 | } 38 | 39 | var metricLines []string 40 | for _, point := range points { 41 | metricLine := point.String() 42 | if len(prefix) > 0 { 43 | metricLine = prefix + metricLine 44 | } 45 | metricLines = append(metricLines, metricLine) 46 | metricLines = append(metricLines, "\n") 47 | } 48 | file.WriteString(strings.Join(metricLines, "")) 49 | defer file.Close() 50 | } 51 | 52 | //Get hostname from url, for example, https://master.eng.vmware.com:8443, extract master.eng.vmware.com 53 | func GetHostnameFromUrl(url string) string { 54 | urlComponents := strings.Split(url, "https://") 55 | hostnameAndPorts := strings.Split(urlComponents[1], ":") 56 | hostname := hostnameAndPorts[0] 57 | return hostname 58 | } 59 | -------------------------------------------------------------------------------- /pkg/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Prometheus Integration 2 | This directory contains coreos' prometheus for monitoring kubernetes clusters using 3 | Prometheus Operators. To setup and enable prometheus monitoring before running your 4 | workload, include the below _PrometheusManifestPaths_ configuration option in your 5 | benchmark config file: 6 | 7 | ``` 8 | "PrometheusManifestPaths": [ 9 | "pkg/prometheus/manifests/setup", 10 | "pkg/prometheus/manifests" 11 | ], 12 | ``` 13 | 14 | For general instructions and more details on kube-prometheus, please check: 15 | [kube-prometheus](https://github.com/coreos/kube-prometheus). 16 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | image: quay.io/prometheus/alertmanager:v0.20.0 10 | nodeSelector: 11 | kubernetes.io/os: linux 12 | replicas: 3 13 | securityContext: 14 | fsGroup: 2000 15 | runAsNonRoot: true 16 | runAsUser: 1000 17 | serviceAccountName: alertmanager-main 18 | version: v0.20.0 19 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {} 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-main 6 | namespace: monitoring 7 | stringData: 8 | alertmanager.yaml: |- 9 | "global": 10 | "resolve_timeout": "5m" 11 | "inhibit_rules": 12 | - "equal": 13 | - "namespace" 14 | - "alertname" 15 | "source_match": 16 | "severity": "critical" 17 | "target_match_re": 18 | "severity": "warning|info" 19 | - "equal": 20 | - "namespace" 21 | - "alertname" 22 | "source_match": 23 | "severity": "warning" 24 | "target_match_re": 25 | "severity": "info" 26 | "receivers": 27 | - "name": "Default" 28 | - "name": "Watchdog" 29 | - "name": "Critical" 30 | "route": 31 | "group_by": 32 | - "namespace" 33 | "group_interval": "5m" 34 | "group_wait": "30s" 35 | "receiver": "Default" 36 | "repeat_interval": "12h" 37 | "routes": 38 | - "match": 39 | "alertname": "Watchdog" 40 | "receiver": "Watchdog" 41 | - "match": 42 | "severity": "critical" 43 | "receiver": "Critical" 44 | type: Opaque 45 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "Default", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - secrets 14 | - nodes 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - namespaces 23 | - endpoints 24 | verbs: 25 | - list 26 | - watch 27 | - apiGroups: 28 | - extensions 29 | resources: 30 | - daemonsets 31 | - deployments 32 | - replicasets 33 | - ingresses 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | - daemonsets 42 | - deployments 43 | - replicasets 44 | verbs: 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - cronjobs 51 | - jobs 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - autoscaling 57 | resources: 58 | - horizontalpodautoscalers 59 | verbs: 60 | - list 61 | - watch 62 | - apiGroups: 63 | - authentication.k8s.io 64 | resources: 65 | - tokenreviews 66 | verbs: 67 | - create 68 | - apiGroups: 69 | - authorization.k8s.io 70 | resources: 71 | - subjectaccessreviews 72 | verbs: 73 | - create 74 | - apiGroups: 75 | - policy 76 | resources: 77 | - poddisruptionbudgets 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - certificates.k8s.io 83 | resources: 84 | - certificatesigningrequests 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | verbs: 109 | - list 110 | - watch 111 | - apiGroups: 112 | - coordination.k8s.io 113 | resources: 114 | - leases 115 | verbs: 116 | - list 117 | - watch 118 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kube-state-metrics 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kube-state-metrics 15 | namespace: monitoring 16 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kube-state-metrics 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: kube-state-metrics 18 | app.kubernetes.io/version: 1.9.5 19 | spec: 20 | containers: 21 | - args: 22 | - --host=127.0.0.1 23 | - --port=8081 24 | - --telemetry-host=127.0.0.1 25 | - --telemetry-port=8082 26 | image: quay.io/coreos/kube-state-metrics:v1.9.5 27 | name: kube-state-metrics 28 | securityContext: 29 | runAsUser: 65534 30 | - args: 31 | - --logtostderr 32 | - --secure-listen-address=:8443 33 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 34 | - --upstream=http://127.0.0.1:8081/ 35 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 36 | name: kube-rbac-proxy-main 37 | ports: 38 | - containerPort: 8443 39 | name: https-main 40 | securityContext: 41 | runAsUser: 65534 42 | - args: 43 | - --logtostderr 44 | - --secure-listen-address=:9443 45 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 46 | - --upstream=http://127.0.0.1:8082/ 47 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 48 | name: kube-rbac-proxy-self 49 | ports: 50 | - containerPort: 9443 51 | name: https-self 52 | securityContext: 53 | runAsUser: 65534 54 | nodeSelector: 55 | kubernetes.io/os: linux 56 | serviceAccountName: kube-state-metrics 57 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https-main 13 | port: 8443 14 | targetPort: https-main 15 | - name: https-self 16 | port: 9443 17 | targetPort: https-self 18 | selector: 19 | app.kubernetes.io/name: kube-state-metrics 20 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | honorLabels: true 13 | interval: 30s 14 | port: https-main 15 | relabelings: 16 | - action: labeldrop 17 | regex: (pod|service|endpoint|namespace) 18 | scheme: https 19 | scrapeTimeout: 30s 20 | tlsConfig: 21 | insecureSkipVerify: true 22 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 23 | interval: 30s 24 | port: https-self 25 | scheme: https 26 | tlsConfig: 27 | insecureSkipVerify: true 28 | jobLabel: app.kubernetes.io/name 29 | selector: 30 | matchLabels: 31 | app.kubernetes.io/name: kube-state-metrics 32 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | selector: 11 | matchLabels: 12 | app.kubernetes.io/name: node-exporter 13 | app.kubernetes.io/version: v0.18.1 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: node-exporter 18 | app.kubernetes.io/version: v0.18.1 19 | spec: 20 | containers: 21 | - args: 22 | - --web.listen-address=127.0.0.1:9100 23 | - --path.procfs=/host/proc 24 | - --path.sysfs=/host/sys 25 | - --path.rootfs=/host/root 26 | - --no-collector.wifi 27 | - --no-collector.hwmon 28 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) 29 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ 30 | image: quay.io/prometheus/node-exporter:v0.18.1 31 | name: node-exporter 32 | resources: 33 | limits: 34 | cpu: 250m 35 | memory: 180Mi 36 | requests: 37 | cpu: 102m 38 | memory: 180Mi 39 | volumeMounts: 40 | - mountPath: /host/proc 41 | name: proc 42 | readOnly: false 43 | - mountPath: /host/sys 44 | name: sys 45 | readOnly: false 46 | - mountPath: /host/root 47 | mountPropagation: HostToContainer 48 | name: root 49 | readOnly: true 50 | - args: 51 | - --logtostderr 52 | - --secure-listen-address=[$(IP)]:9100 53 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 54 | - --upstream=http://127.0.0.1:9100/ 55 | env: 56 | - name: IP 57 | valueFrom: 58 | fieldRef: 59 | fieldPath: status.podIP 60 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 61 | name: kube-rbac-proxy 62 | ports: 63 | - containerPort: 9100 64 | hostPort: 9100 65 | name: https 66 | resources: 67 | limits: 68 | cpu: 20m 69 | memory: 40Mi 70 | requests: 71 | cpu: 10m 72 | memory: 20Mi 73 | hostNetwork: true 74 | hostPID: true 75 | nodeSelector: 76 | kubernetes.io/os: linux 77 | securityContext: 78 | runAsNonRoot: true 79 | runAsUser: 65534 80 | serviceAccountName: node-exporter 81 | tolerations: 82 | - operator: Exists 83 | volumes: 84 | - hostPath: 85 | path: /proc 86 | name: proc 87 | - hostPath: 88 | path: /sys 89 | name: sys 90 | - hostPath: 91 | path: / 92 | name: root 93 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https 13 | port: 9100 14 | targetPort: https 15 | selector: 16 | app.kubernetes.io/name: node-exporter 17 | app.kubernetes.io/version: v0.18.1 18 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 15s 13 | port: https 14 | relabelings: 15 | - action: replace 16 | regex: (.*) 17 | replacement: $1 18 | sourceLabels: 19 | - __meta_kubernetes_pod_node_name 20 | targetLabel: instance 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | jobLabel: app.kubernetes.io/name 25 | selector: 26 | matchLabels: 27 | app.kubernetes.io/name: node-exporter 28 | app.kubernetes.io/version: v0.18.1 29 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | - nodes 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-configMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yaml: | 4 | resourceRules: 5 | cpu: 6 | containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>) 7 | nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>) 8 | resources: 9 | overrides: 10 | node: 11 | resource: node 12 | namespace: 13 | resource: namespace 14 | pod: 15 | resource: pod 16 | containerLabel: container 17 | memory: 18 | containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>) 19 | nodeQuery: sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>) 20 | resources: 21 | overrides: 22 | instance: 23 | resource: node 24 | namespace: 25 | resource: namespace 26 | pod: 27 | resource: pod 28 | containerLabel: container 29 | window: 5m 30 | kind: ConfigMap 31 | metadata: 32 | name: adapter-config 33 | namespace: monitoring 34 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus-adapter 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | labels: 18 | name: prometheus-adapter 19 | spec: 20 | containers: 21 | - args: 22 | - --cert-dir=/var/run/serving-cert 23 | - --config=/etc/adapter/config.yaml 24 | - --logtostderr=true 25 | - --metrics-relist-interval=1m 26 | - --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/ 27 | - --secure-port=6443 28 | image: quay.io/coreos/k8s-prometheus-adapter-amd64:v0.5.0 29 | name: prometheus-adapter 30 | ports: 31 | - containerPort: 6443 32 | volumeMounts: 33 | - mountPath: /tmp 34 | name: tmpfs 35 | readOnly: false 36 | - mountPath: /var/run/serving-cert 37 | name: volume-serving-cert 38 | readOnly: false 39 | - mountPath: /etc/adapter 40 | name: config 41 | readOnly: false 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: prometheus-adapter 45 | volumes: 46 | - emptyDir: {} 47 | name: tmpfs 48 | - emptyDir: {} 49 | name: volume-serving-cert 50 | - configMap: 51 | name: adapter-config 52 | name: config 53 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | honorLabels: true 14 | port: https 15 | scheme: https 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: controller 21 | app.kubernetes.io/name: prometheus-operator 22 | app.kubernetes.io/version: v0.38.0 23 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | image: quay.io/prometheus/prometheus:v2.15.2 15 | nodeSelector: 16 | kubernetes.io/os: linux 17 | podMonitorNamespaceSelector: {} 18 | podMonitorSelector: {} 19 | replicas: 2 20 | resources: 21 | requests: 22 | memory: 400Mi 23 | ruleSelector: 24 | matchLabels: 25 | prometheus: k8s 26 | role: alert-rules 27 | securityContext: 28 | fsGroup: 2000 29 | runAsNonRoot: true 30 | runAsUser: 1000 31 | serviceAccountName: prometheus-k8s 32 | serviceMonitorNamespaceSelector: {} 33 | serviceMonitorSelector: {} 34 | version: v2.15.2 35 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: prometheus-k8s 12 | subjects: 13 | - kind: ServiceAccount 14 | name: prometheus-k8s 15 | namespace: monitoring 16 | - apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: prometheus-k8s 20 | namespace: kube-system 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: prometheus-k8s 25 | subjects: 26 | - kind: ServiceAccount 27 | name: prometheus-k8s 28 | namespace: monitoring 29 | - apiVersion: rbac.authorization.k8s.io/v1 30 | kind: RoleBinding 31 | metadata: 32 | name: prometheus-k8s 33 | namespace: monitoring 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: Role 37 | name: prometheus-k8s 38 | subjects: 39 | - kind: ServiceAccount 40 | name: prometheus-k8s 41 | namespace: monitoring 42 | kind: RoleBindingList 43 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiVersion: rbac.authorization.k8s.io/v1 20 | kind: Role 21 | metadata: 22 | name: prometheus-k8s 23 | namespace: kube-system 24 | rules: 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - services 29 | - endpoints 30 | - pods 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiVersion: rbac.authorization.k8s.io/v1 36 | kind: Role 37 | metadata: 38 | name: prometheus-k8s 39 | namespace: monitoring 40 | rules: 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - services 45 | - endpoints 46 | - pods 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | kind: RoleList 52 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | metricRelabelings: 12 | - action: drop 13 | regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) 14 | sourceLabels: 15 | - __name__ 16 | - action: drop 17 | regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) 18 | sourceLabels: 19 | - __name__ 20 | - action: drop 21 | regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) 22 | sourceLabels: 23 | - __name__ 24 | - action: drop 25 | regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) 26 | sourceLabels: 27 | - __name__ 28 | - action: drop 29 | regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) 30 | sourceLabels: 31 | - __name__ 32 | - action: drop 33 | regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) 34 | sourceLabels: 35 | - __name__ 36 | - action: drop 37 | regex: transformation_(transformation_latencies_microseconds|failures_total) 38 | sourceLabels: 39 | - __name__ 40 | - action: drop 41 | regex: (admission_quota_controller_adds|crd_autoregistration_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|AvailableConditionController_retries|crd_openapi_controller_unfinished_work_seconds|APIServiceRegistrationController_retries|admission_quota_controller_longest_running_processor_microseconds|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_unfinished_work_seconds|crd_openapi_controller_adds|crd_autoregistration_controller_retries|crd_finalizer_queue_latency|AvailableConditionController_work_duration|non_structural_schema_condition_controller_depth|crd_autoregistration_controller_unfinished_work_seconds|AvailableConditionController_adds|DiscoveryController_longest_running_processor_microseconds|autoregister_queue_latency|crd_autoregistration_controller_adds|non_structural_schema_condition_controller_work_duration|APIServiceRegistrationController_adds|crd_finalizer_work_duration|crd_naming_condition_controller_unfinished_work_seconds|crd_openapi_controller_longest_running_processor_microseconds|DiscoveryController_adds|crd_autoregistration_controller_longest_running_processor_microseconds|autoregister_unfinished_work_seconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|non_structural_schema_condition_controller_queue_latency|crd_naming_condition_controller_depth|AvailableConditionController_longest_running_processor_microseconds|crdEstablishing_depth|crd_finalizer_longest_running_processor_microseconds|crd_naming_condition_controller_adds|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_unfinished_work_seconds|crd_openapi_controller_depth|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|DiscoveryController_work_duration|autoregister_adds|crd_autoregistration_controller_queue_latency|crd_finalizer_retries|AvailableConditionController_unfinished_work_seconds|autoregister_longest_running_processor_microseconds|non_structural_schema_condition_controller_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_depth|AvailableConditionController_depth|DiscoveryController_retries|admission_quota_controller_depth|crdEstablishing_adds|APIServiceOpenAPIAggregationControllerQueue1_retries|crdEstablishing_queue_latency|non_structural_schema_condition_controller_longest_running_processor_microseconds|autoregister_work_duration|crd_openapi_controller_retries|APIServiceRegistrationController_work_duration|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_openapi_controller_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_queue_latency|crd_autoregistration_controller_depth|AvailableConditionController_queue_latency|admission_quota_controller_queue_latency|crd_naming_condition_controller_work_duration|crd_openapi_controller_work_duration|DiscoveryController_depth|crd_naming_condition_controller_longest_running_processor_microseconds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|crd_finalizer_unfinished_work_seconds|crdEstablishing_retries|admission_quota_controller_unfinished_work_seconds|non_structural_schema_condition_controller_adds|APIServiceRegistrationController_unfinished_work_seconds|admission_quota_controller_work_duration|autoregister_depth|autoregister_retries|kubeproxy_sync_proxy_rules_latency_microseconds|rest_client_request_latency_seconds|non_structural_schema_condition_controller_retries) 42 | sourceLabels: 43 | - __name__ 44 | - action: drop 45 | regex: etcd_(debugging|disk|request|server).* 46 | sourceLabels: 47 | - __name__ 48 | port: http-metrics 49 | jobLabel: k8s-app 50 | namespaceSelector: 51 | matchNames: 52 | - kube-system 53 | selector: 54 | matchLabels: 55 | k8s-app: kube-controller-manager 56 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: http-metrics 12 | jobLabel: k8s-app 13 | namespaceSelector: 14 | matchNames: 15 | - kube-system 16 | selector: 17 | matchLabels: 18 | k8s-app: kube-scheduler 19 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/0namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | creationTimestamp: null 7 | name: prometheusrules.monitoring.coreos.com 8 | spec: 9 | group: monitoring.coreos.com 10 | names: 11 | kind: PrometheusRule 12 | listKind: PrometheusRuleList 13 | plural: prometheusrules 14 | singular: prometheusrule 15 | scope: Namespaced 16 | validation: 17 | openAPIV3Schema: 18 | description: PrometheusRule defines alerting rules for a Prometheus instance 19 | properties: 20 | apiVersion: 21 | description: 'APIVersion defines the versioned schema of this representation 22 | of an object. Servers should convert recognized schemas to the latest 23 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 24 | type: string 25 | kind: 26 | description: 'Kind is a string value representing the REST resource this 27 | object represents. Servers may infer this from the endpoint the client 28 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 29 | type: string 30 | metadata: 31 | type: object 32 | spec: 33 | description: Specification of desired alerting rule definitions for Prometheus. 34 | properties: 35 | groups: 36 | description: Content of Prometheus rule file 37 | items: 38 | description: 'RuleGroup is a list of sequentially evaluated recording 39 | and alerting rules. Note: PartialResponseStrategy is only used by 40 | ThanosRuler and will be ignored by Prometheus instances. Valid 41 | values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' 42 | properties: 43 | interval: 44 | type: string 45 | name: 46 | type: string 47 | partial_response_strategy: 48 | type: string 49 | rules: 50 | items: 51 | description: Rule describes an alerting or recording rule. 52 | properties: 53 | alert: 54 | type: string 55 | annotations: 56 | additionalProperties: 57 | type: string 58 | type: object 59 | expr: 60 | anyOf: 61 | - type: integer 62 | - type: string 63 | x-kubernetes-int-or-string: true 64 | for: 65 | type: string 66 | labels: 67 | additionalProperties: 68 | type: string 69 | type: object 70 | record: 71 | type: string 72 | required: 73 | - expr 74 | type: object 75 | type: array 76 | required: 77 | - name 78 | - rules 79 | type: object 80 | type: array 81 | type: object 82 | required: 83 | - spec 84 | type: object 85 | version: v1 86 | versions: 87 | - name: v1 88 | served: true 89 | storage: true 90 | status: 91 | acceptedNames: 92 | kind: "" 93 | plural: "" 94 | conditions: [] 95 | storedVersions: [] 96 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | rules: 10 | - apiGroups: 11 | - apiextensions.k8s.io 12 | resources: 13 | - customresourcedefinitions 14 | verbs: 15 | - create 16 | - apiGroups: 17 | - apiextensions.k8s.io 18 | resourceNames: 19 | - alertmanagers.monitoring.coreos.com 20 | - podmonitors.monitoring.coreos.com 21 | - prometheuses.monitoring.coreos.com 22 | - prometheusrules.monitoring.coreos.com 23 | - servicemonitors.monitoring.coreos.com 24 | - thanosrulers.monitoring.coreos.com 25 | resources: 26 | - customresourcedefinitions 27 | verbs: 28 | - get 29 | - update 30 | - apiGroups: 31 | - monitoring.coreos.com 32 | resources: 33 | - alertmanagers 34 | - alertmanagers/finalizers 35 | - prometheuses 36 | - prometheuses/finalizers 37 | - thanosrulers 38 | - thanosrulers/finalizers 39 | - servicemonitors 40 | - podmonitors 41 | - prometheusrules 42 | verbs: 43 | - '*' 44 | - apiGroups: 45 | - apps 46 | resources: 47 | - statefulsets 48 | verbs: 49 | - '*' 50 | - apiGroups: 51 | - "" 52 | resources: 53 | - configmaps 54 | - secrets 55 | verbs: 56 | - '*' 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - pods 61 | verbs: 62 | - list 63 | - delete 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - services 68 | - services/finalizers 69 | - endpoints 70 | verbs: 71 | - get 72 | - create 73 | - update 74 | - delete 75 | - apiGroups: 76 | - "" 77 | resources: 78 | - nodes 79 | verbs: 80 | - list 81 | - watch 82 | - apiGroups: 83 | - "" 84 | resources: 85 | - namespaces 86 | verbs: 87 | - get 88 | - list 89 | - watch 90 | - apiGroups: 91 | - authentication.k8s.io 92 | resources: 93 | - tokenreviews 94 | verbs: 95 | - create 96 | - apiGroups: 97 | - authorization.k8s.io 98 | resources: 99 | - subjectaccessreviews 100 | verbs: 101 | - create 102 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/component: controller 15 | app.kubernetes.io/name: prometheus-operator 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/component: controller 20 | app.kubernetes.io/name: prometheus-operator 21 | app.kubernetes.io/version: v0.38.0 22 | spec: 23 | containers: 24 | - args: 25 | - --kubelet-service=kube-system/kubelet 26 | - --logtostderr=true 27 | - --config-reloader-image=jimmidyson/configmap-reload:v0.3.0 28 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.38.0 29 | image: quay.io/coreos/prometheus-operator:v0.38.0 30 | name: prometheus-operator 31 | ports: 32 | - containerPort: 8080 33 | name: http 34 | resources: 35 | limits: 36 | cpu: 200m 37 | memory: 200Mi 38 | requests: 39 | cpu: 100m 40 | memory: 100Mi 41 | securityContext: 42 | allowPrivilegeEscalation: false 43 | - args: 44 | - --logtostderr 45 | - --secure-listen-address=:8443 46 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 47 | - --upstream=http://127.0.0.1:8080/ 48 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 49 | name: kube-rbac-proxy 50 | ports: 51 | - containerPort: 8443 52 | name: https 53 | securityContext: 54 | runAsUser: 65534 55 | nodeSelector: 56 | beta.kubernetes.io/os: linux 57 | securityContext: 58 | runAsNonRoot: true 59 | runAsUser: 65534 60 | serviceAccountName: prometheus-operator 61 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: https 14 | port: 8443 15 | targetPort: https 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /pkg/prometheus/manifests/setup/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.38.0 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /pkg/prometheus/prometheus.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019-2020 VMware, Inc. 3 | 4 | SPDX-License-Identifier: Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package prometheus 18 | 19 | import ( 20 | log "github.com/sirupsen/logrus" 21 | "k-bench/util" 22 | "k8s.io/client-go/dynamic" 23 | "k8s.io/client-go/kubernetes" 24 | restclient "k8s.io/client-go/rest" 25 | "os/exec" 26 | "strings" 27 | "time" 28 | ) 29 | 30 | /* 31 | * PrometheusController controls prometheus stack for the cluster 32 | */ 33 | type PrometheusController struct { 34 | // This is the client used to manage prometheus stack on the cluster 35 | client *kubernetes.Clientset 36 | 37 | dynClient *dynamic.Interface 38 | 39 | kubeConfig *restclient.Config 40 | 41 | testConfig *util.TestConfig 42 | } 43 | 44 | func NewPrometheusController(c *kubernetes.Clientset, 45 | dc *dynamic.Interface, kc *restclient.Config, 46 | tc *util.TestConfig) PrometheusController { 47 | return PrometheusController{ 48 | client: c, 49 | dynClient: dc, 50 | kubeConfig: kc, 51 | testConfig: tc, 52 | } 53 | } 54 | 55 | /* 56 | * This function setup prometheus stack by applying the specified prometheus manifests using kubectl. 57 | * TODO: Add a way to create prometheus objects using dynamic client in generic resource manager 58 | */ 59 | func (controller *PrometheusController) EnablePrometheus() { 60 | manifests := controller.testConfig.PrometheusManifestPaths 61 | log.Info("Setting up Prometheus on the cluster...") 62 | 63 | prometheusPred := util.PredicateSpec{Command: "kubectl get pods --all-namespaces", 64 | Expect: "!contains:prometheus"} 65 | noPrometheus := util.HandlePredicate(controller.client, *controller.dynClient, 66 | controller.kubeConfig, prometheusPred, 1000, 2000) 67 | 68 | if !noPrometheus { 69 | log.Warnf("Prometheus may be already installed, skipped prometheus enabling.") 70 | return 71 | } 72 | 73 | for _, mf := range manifests { 74 | cmd := exec.Command("kubectl", "create", "-f", mf) 75 | out, err := cmd.CombinedOutput() 76 | outStr := strings.ToLower(string(out)) 77 | if err != nil || strings.Contains(outStr, "error") { 78 | log.Errorf("Error while enabling prometheus, %v", err) 79 | break 80 | } 81 | // TODO: replace below with predicate 82 | log.Info("Sleep 5 seconds after each step...") 83 | time.Sleep(time.Duration(5) * time.Second) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /pkg/waverunner/WR_wcpwrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | usage () { 4 | echo "Usage: $0 -r -i -w [-o -k -p ]"; 5 | echo "run_tag is a tag you give to tag your data for each run on Wavefront" 6 | echo "Host_IP_String is a comma separated list of host IPs that you want to monitor" 7 | echo "Wavefront source is a unique string you provide to identify your data source on Wavefront" 8 | echo "Defaults to /tmp for output folder and a null host password" 9 | exit; 10 | } 11 | 12 | post_run() 13 | { 14 | if [ "${HOST_OS}" == "ESXi" ]; then 15 | kill -TERM ${WCPID[${host_ip_arr[0]}]}; 16 | echo "Waiting on ESX monitor process to terminate"; 17 | wait ${WCPID[${host_ip_arr[0]}]}; 18 | else 19 | echo "Terminating monitoring processes"; 20 | for((num=0;num < $tot_hosts;num++)) 21 | { 22 | $SSHCMD ${USER}@${host_ip_arr[$num]} "ps -elf | grep collect_guest | grep -v grep | awk {'print \$4'} | xargs kill -TERM & > /dev/null" 23 | kill -TERM ${WCPID[${host_ip_arr[$num]}]}; 24 | } 25 | fi 26 | } 27 | 28 | # MAIN SCRIPT 29 | if test $# -lt 1; then 30 | usage; 31 | exit 1 32 | fi 33 | 34 | #Default values 35 | DEBUG_MODE=0; 36 | USER="root" 37 | HOSTPASS=''; 38 | tot_hosts=0; 39 | SSHCMD="sshpass -e ssh -o LogLevel=quiet -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; 40 | SCPCMD="sshpass -e scp -o LogLevel=quiet -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; 41 | declare -A WCPID; 42 | DEBUG="tee" 43 | folder='/tmp/' 44 | runtag="kbench-run"; 45 | dir=`dirname $0`; 46 | if [ $DEBUG_MODE -eq 1 ]; then 47 | SSHCMD="sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; 48 | SCPCMD="sshpass -e scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"; 49 | DEBUG="tee /dev/tty" 50 | fi 51 | 52 | #Catch when the script gets the TERM signal and do the post processing 53 | trap "post_run" TERM SIGINT 54 | ssh_key="" 55 | 56 | while getopts "o:r:p:i:w:k:t:" ARGOPTS ; do 57 | case ${ARGOPTS} in 58 | o) folder="$OPTARG" 59 | ;; 60 | r) runtag=$OPTARG 61 | ;; 62 | k) ssh_key=$OPTARG 63 | ;; 64 | p) HOSTPASS=$OPTARG 65 | ;; 66 | i) ip_string=$OPTARG 67 | ;; 68 | w) WFsource=$OPTARG 69 | ;; 70 | t) WFtoken=$OPTARG 71 | ;; 72 | ?) usage 73 | ;; 74 | esac 75 | done; 76 | if [ "$ip_string" == "" ]; then 77 | echo "Please provide a list of hosts to monitor in a comma separated format using -i"; 78 | usage; 79 | exit; 80 | fi 81 | #Get the Host IP addresses into an array 82 | IFS=',' read -a host_ip_arr <<<"${ip_string}" 83 | tot_hosts=${#host_ip_arr[@]}; 84 | 85 | if [ "$ssh_key" != "" ]; then 86 | if ! stat $ssh_key > /dev/null 2>&1; then 87 | if stat ~/.ssh/$ssh_key > /dev/null 2>&1; then 88 | echo "Looks like the ssh_key file is not provided with an absolute path, picking up $ssh_key from ~/.ssh folder"; 89 | ssh_key="~/.ssh/$ssh_key" 90 | else 91 | echo "Looks like the config file is not provided with an absolute path. No $ssh_key in ~/.ssh folder either. I will try using the password"; 92 | fi 93 | fi 94 | SSHCMD="$SSHCMD -i $ssh_key" 95 | SCPCMD="$SCPCMD -i $ssh_key" 96 | fi 97 | 98 | export SSHPASS=$HOSTPASS; 99 | HOST_OS=`$SSHCMD root@${host_ip_arr[0]} "uname -o"`; 100 | if [ "${HOST_OS}" == "ESXi" ]; then 101 | echo "Node OS is ESXi, preparing to monitor"; 102 | if [ "${ssh_key}" != "" ]; then 103 | $dir/scripts/config_monitor_hosts.sh -r $runtag -i ${ip_string} -w $WFsource -o $folder -k ${ssh_key} -p "$HOSTPASS" & 104 | else 105 | $dir/scripts/config_monitor_hosts.sh -r $runtag -i ${ip_string} -w $WFsource -o $folder -p "$HOSTPASS" & 106 | fi 107 | WCPID[${host_ip_arr[0]}]=$!; 108 | else 109 | #Launch subprocesses too install the needed bits on each Linux node to go in parallel 110 | echo "Node OS is ${HOST_OS}, I am starting to monitor"; 111 | for((num=0;num < $tot_hosts;num++)) 112 | { 113 | #configure WF wiring 114 | $SCPCMD $dir/golden/WF/install_configure_wavefront_linux.sh ${USER}@${host_ip_arr[$num]}:/tmp/ 115 | $SCPCMD $dir/golden/WF/telegraf_linux.conf ${USER}@${host_ip_arr[$num]}:/tmp/ 116 | $SCPCMD $dir/golden/WF/waverunner_guest.pp ${USER}@${host_ip_arr[$num]}:/tmp/ 117 | $SCPCMD $dir/golden/LIN/collect_guest_stats.sh ${USER}@${host_ip_arr[$num]}:/tmp/ 118 | if [ "${WFtoken}" != "" ]; then 119 | $SSHCMD ${USER}@${host_ip_arr[$num]} "/tmp/install_configure_wavefront_linux.sh -k $WFtoken" & 120 | else 121 | $SSHCMD ${USER}@${host_ip_arr[$num]} "/tmp/install_configure_wavefront_linux.sh" & 122 | fi 123 | } 124 | 125 | #Wait until all installations are done 126 | wait; 127 | 128 | echo "Starting to monitor"; 129 | #Start stats collection 130 | for((num=0;num < $tot_hosts;num++)) 131 | { 132 | $SSHCMD ${USER}@${host_ip_arr[$num]} "ps -elf | grep collect_guest | grep -v grep | awk {'print \$4'} | xargs kill -TERM > /dev/null &" 133 | $SSHCMD ${USER}@${host_ip_arr[$num]} "/tmp/collect_guest_stats.sh -t 30 -o '/tmp/stats' -w $WFsource -d "Kubenode-stats-collector" -i Node$num -r $runtag" & 134 | 135 | WCPID[${host_ip_arr[$num]}]=$!; 136 | echo "launched, PID is ${WCPID[${host_ip_arr[$num]}]}" 137 | } 138 | fi 139 | wait; 140 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/EVENTS: -------------------------------------------------------------------------------- 1 | -c e:L2_RQSTS.MISS 2 | -c e:L2_RQSTS.CODE_RD_MISS 3 | -c e:LONGEST_LAT_CACHE.MISS 4 | -c e:L1D_PEND_MISS.PENDING 5 | -c e:L1D_PEND_MISS.PENDING_CYCLES 6 | -c e:CYCLE_ACTIVITY.CYCLES_LDM_PENDING 7 | -c e:OFFCORE_REQUESTS.DEMAND_DATA_RD 8 | -c e:MEM_LOAD_UOPS_RETIRED.L1_MISS 9 | -c e:MEM_LOAD_UOPS_RETIRED.L2_MISS 10 | -c e:MEM_LOAD_UOPS_RETIRED.L3_MISS 11 | -c e:MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM 12 | -c e:MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM 13 | -c e:MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM 14 | -c e:MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM 15 | -c e:MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD 16 | -c e:CPU_CLK_UNHALTED.THREAD_P 17 | -c e:CYCLE_ACTIVITY.STALLS_L2_MISS 18 | -c e:CYCLE_ACTIVITY.STALLS_L3_MISS 19 | -c e:CYCLE_ACTIVITY.STALLS_L1D_MISS 20 | -c e:CYCLE_ACTIVITY.STALLS_MEM_ANY 21 | -c e:CYCLE_ACTIVITY.STALLS_TOTAL 22 | -c e:ICACHE.MISSES 23 | -c e:ICACHE.IFDATA_STALL 24 | -c e:IDQ_UOPS_NOT_DELIVERED.CORE 25 | -c e:IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE 26 | -c e:IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE 27 | -c e:IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE 28 | -c e:IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE 29 | -c e:INST_RETIRED.ANY 30 | -c e:INT_MISC.RAT_STALL_CYCLES 31 | -c e:RESOURCE_STALLS.ANY 32 | -c e:RESOURCE_STALLS.ROB 33 | -c e:RESOURCE_STALLS.RS 34 | -c e:RESOURCE_STALLS.SB 35 | -c e:UOPS_ISSUED.ANY 36 | -c e:UOPS_ISSUED.STALL_CYCLES 37 | -c e:UOPS_RETIRED.RETIRE_SLOTS 38 | -c e:INT_MISC.RECOVERY_CYCLES 39 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/install_WaveCounter.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dir=`dirname $0`; 3 | 4 | check () { 5 | if ! which $1 > /dev/null; then 6 | echo "Please install $1 and try again"; 7 | exit 1; 8 | else 9 | echo "$1 is installed" 10 | fi 11 | } 12 | 13 | echo "Ensure env variable HOME is set to your home directory, proceeding under that assumption..."; 14 | 15 | check curl; 16 | check socat; 17 | check sshpass; 18 | 19 | sudo $dir/WF/install_configure_wavefront.sh; 20 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/notes: -------------------------------------------------------------------------------- 1 | Use Install_WaveCounter.sh if you did not install Waverunner's Install.sh. If not, this is redundant. 2 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-tanzu/k-bench/53a82d316effaaf562d81a7cd306bf5f0d40cfc6/pkg/waverunner/golden/ESX/WC/sample_event_files/.gitkeep -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/demoEvents: -------------------------------------------------------------------------------- 1 | ( Original requested Events: Tuesday - July 10, 2018) 2 | 3 | -c e:DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK 4 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED_4K 5 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M 6 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED_1G 7 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED 8 | -c e:DTLB_LOAD_MISSES.WALK_DURATION 9 | -c e:DTLB_LOAD_MISSES.STLB_HIT_4K 10 | -c e:DTLB_LOAD_MISSES.STLB_HIT_2M 11 | -c e:DTLB_LOAD_MISSES.STLB_HIT 12 | -c e:DTLB_STORE_MISSES.MISS_CAUSES_A_WALK 13 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED_4K 14 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M 15 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED_1G 16 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED 17 | -c e:DTLB_STORE_MISSES.WALK_DURATION 18 | -c e:DTLB_STORE_MISSES.STLB_HIT_4K 19 | -c e:DTLB_STORE_MISSES.STLB_HIT_2M 20 | -c e:DTLB_STORE_MISSES.STLB_HIT 21 | -c e:PAGE_WALKER_LOADS.DTLB_L1 22 | -c e:PAGE_WALKER_LOADS.DTLB_L2 23 | -c e:PAGE_WALKER_LOADS.DTLB_L3 24 | -c e:PAGE_WALKER_LOADS.DTLB_MEMORY 25 | -c e:PAGE_WALKER_LOADS.EPT_DTLB_L1 26 | -c e:PAGE_WALKER_LOADS.EPT_DTLB_L2 27 | -c e:PAGE_WALKER_LOADS.EPT_DTLB_L3 28 | -c e:PAGE_WALKER_LOADS.EPT_DTLB_MEMORY 29 | -c e:TLB_FLUSH.DTLB_THREAD 30 | -c e:TLB_FLUSH.STLB_ANY 31 | -c e:MEM_UOPS_RETIRED.STLB_MISS_LOADS 32 | -c e:MEM_UOPS_RETIRED.STLB_MISS_STORES 33 | 34 | ( New Events + Modifiers: Friday - July 13, 2018) 35 | 36 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED,d:u 37 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED,d:k 38 | -c e:DTLB_LOAD_MISSES.WALK_DURATION,d:u 39 | -c e:DTLB_LOAD_MISSES.WALK_DURATION,d:k 40 | -c e:DTLB_LOAD_MISSES.STLB_HIT,d:u 41 | -c e:DTLB_LOAD_MISSES.STLB_HIT,d:k 42 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED,d:u 43 | -c e:DTLB_STORE_MISSES.WALK_COMPLETED,d:k 44 | -c e:DTLB_STORE_MISSES.WALK_DURATION,d:u 45 | -c e:DTLB_STORE_MISSES.WALK_DURATION,d:k 46 | -c e:DTLB_STORE_MISSES.STLB_HIT,d:u 47 | -c e:DTLB_STORE_MISSES.STLB_HIT,d:k 48 | -c e:INST_RETIRED.ANY_P 49 | -c e;CPU_CLK_UNHALTED.THREAD_P,d:u 50 | -c e;CPU_CLK_UNHALTED.THREAD_P,d:k 51 | -c e:MEM_UOPS_RETIRED.ALL_LOADS 52 | -c e:MEM_UOPS_RETIRED.ALL_STORES 53 | -c e:EPT.WALK_CYCLES 54 | -c e:CPL_CYCLES.RING123,d:e,c:1 55 | -c e:CPL_CYCLES.RING0,d:e,c:1 56 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/events_16.txt: -------------------------------------------------------------------------------- 1 | -c e:fixed_instr_retired 2 | -c e:fixed_unhalted_core_cycles 3 | -c e:fixed_unhalted_ref_cycles 4 | -c e:unhalted_core_cycles 5 | -c e:resource_stalls 6 | -c e:uops_retired 7 | -c e:REHABQ.LD_BLOCK_ST_FORWARD 8 | -c e:REHABQ.LD_SPLITS 9 | -c e:MEM_UOPS_RETIRED.L2_HIT_LOADS 10 | -c e:MEM_UOPS_RETIRED.L2_MISS_LOADS 11 | -c e:MEM_UOPS_RETIRED.DTLB_MISS_LOADS 12 | -c e:MEM_UOPS_RETIRED.HITM 13 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M 14 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED 15 | -c e:DTLB_LOAD_MISSES.WALK_DURATION 16 | -c e:DTLB_LOAD_MISSES.STLB_HIT -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/events_24.txt: -------------------------------------------------------------------------------- 1 | -c e:fixed_instr_retired 2 | -c e:fixed_unhalted_core_cycles 3 | -c e:fixed_unhalted_ref_cycles 4 | -c e:unhalted_core_cycles 5 | -c e:resource_stalls 6 | -c e:uops_retired 7 | -c e:REHABQ.LD_BLOCK_ST_FORWARD 8 | -c e:REHABQ.LD_SPLITS 9 | -c e:MEM_UOPS_RETIRED.L2_HIT_LOADS 10 | -c e:MEM_UOPS_RETIRED.L2_MISS_LOADS 11 | -c e:MEM_UOPS_RETIRED.DTLB_MISS_LOADS 12 | -c e:MEM_UOPS_RETIRED.HITM 13 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M 14 | -c e:DTLB_LOAD_MISSES.WALK_COMPLETED 15 | -c e:DTLB_LOAD_MISSES.WALK_DURATION 16 | -c e:DTLB_LOAD_MISSES.STLB_HIT 17 | -c e:UOPS_RETIRED 18 | -c e:L2_RQSTS.CODE_RD_MISS 19 | -c e:L2_RQSTS.ALL_CODE_RD 20 | -c e:LONGEST_LAT_CACHE.MISS 21 | -c e:LONGEST_LAT_CACHE.REFERENCE 22 | -c e:UNHALTED_CORE.CYCLES_P 23 | -c e:UNHALTED_REFERENCE.CYCLES_P 24 | -c e:DTLB_STORE_MISSES.WALK_DURATION -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/events_8.txt: -------------------------------------------------------------------------------- 1 | -c e:fixed_instr_retired 2 | -c e:fixed_unhalted_core_cycles 3 | -c e:fixed_unhalted_ref_cycles 4 | -c e:unhalted_core_cycles 5 | -c e:resource_stalls 6 | -c e:uops_retired 7 | -c e:REHABQ.LD_BLOCK_ST_FORWARD 8 | -c e:REHABQ.LD_SPLITS -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/WC/sample_event_files/events_demo.txt: -------------------------------------------------------------------------------- 1 | -c e:fixed_instr_retired,d:k 2 | -c e:fixed_unhalted_core_cycles 3 | -c e:BR_INST_RETIRED.CONDITIONAL 4 | -c e:BR_MISS_RETIRED.ALL_BRANCHES 5 | -c e:BR_MISS_RETIRED.CONDITIONAL 6 | -c e:RESOURCE_STALLS.ANY 7 | -c e:MEM_LOAD_UOPS_RETIRED.L1_HIT 8 | -c e:MEM_LOAD_UOPS_RETIRED.L1_MISS -------------------------------------------------------------------------------- /pkg/waverunner/golden/ESX/esxtop_cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | dir=`dirname $0`; 3 | esxtop_op=`esxtop -b -n 1` 4 | 5 | UTIL_FIELD=`echo "$esxtop_op" | head -1 | awk -F ',' '{for (i=1; i<=NF; i++) if($i ~ "Physical Cpu._Total....Util Time") print i}'` 6 | CORE_FIELD=`echo "$esxtop_op" | head -1 | awk -F ',' '{for (i=1; i<=NF; i++) if($i ~ "Physical Cpu._Total....Core Util Time") print i}'` 7 | PROCESSOR_FIELD=`echo "$esxtop_op" | head -1 | awk -F ',' '{for (i=1; i<=NF; i++) if($i ~ "Physical Cpu._Total....Processor Time") print i}'` 8 | if test "X${CORE_FIELD}" = "X"; then 9 | echo "$esxtop_op" | awk -F ',' -v UTIL_FIELD=$UTIL_FIELD -v CORE_FIELD=$CORE_FIELD -v PROCESSOR_FIELD=$PROCESSOR_FIELD 'BEGIN{printf("%-20s%7s %7s %7s\n", "Timestamp", "pcpu-used", "pcpu-util", "core-util")}{if(NR==1)next; printf("%-22s %9s %9s %9s\n", $1, $(PROCESSOR_FIELD), $(UTIL_FIELD), 0)}' | sed -e 's/"//g' 10 | else 11 | echo "$esxtop_op" | awk -F ',' -v UTIL_FIELD=$UTIL_FIELD -v CORE_FIELD=$CORE_FIELD -v PROCESSOR_FIELD=$PROCESSOR_FIELD 'BEGIN{printf("%-20s%7s %7s %7s\n", "Timestamp", "pcpu-used", "pcpu-util", "core-util")}{if(NR==1)next; printf("%-22s %9s %9s %9s\n", $1, $(PROCESSOR_FIELD), $(UTIL_FIELD), $(CORE_FIELD))}' | sed -e 's/"//g' 12 | fi 13 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/WF/install_popVM.sh: -------------------------------------------------------------------------------- 1 | [ $# -eq 0 ] && { echo "Usage: $0 "; echo "Please specify run tag in the future"; } 2 | dir=`dirname $0`; 3 | $dir/install_configure_wavefront_linux.sh -p "localhost" -r "$1" -h "CICD-perf-host" -f "1"; 4 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/WF/python_wavefront.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 VMware, Inc. All rights reserved. -- VMware Confidential 2 | # Description: Perf CICD WaveFront Example 3 | # Group-perf: optional 4 | # Timeout: 3000 5 | 6 | from telegraf.client import TelegrafClient 7 | client = TelegrafClient(host='localhost', port=8094, tags={'host': 'diffhost'}) 8 | print 'Client created' 9 | 10 | # Records a single value with one tag 11 | client.metric('GK.testmetric', float(60), tags={'app_name_descr': 'CICD_test-app'}, timestamp=long(1528483840794000)) 12 | print 'Metric sent to Wavefront' 13 | -------------------------------------------------------------------------------- /pkg/waverunner/golden/WF/waverunner_guest.pp: -------------------------------------------------------------------------------- 1 | package { ['curl','sysstat','numactl','socat']: 2 | ensure => present, 3 | } 4 | -------------------------------------------------------------------------------- /pkg/waverunner/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dir=`dirname $0`; 3 | 4 | usage () { 5 | echo "Usage: $0 -k -u " 6 | echo "Wavefront URL defaults to https://vmware.wavefront.com" 7 | echo "To get your Wavefront API token, please refer to https://docs.wavefront.com/wavefront_api.html#generating-an-api-token" 8 | echo ""; 9 | } 10 | 11 | 12 | if [ $# -eq 0 ] 13 | then 14 | usage 15 | exit; 16 | fi 17 | 18 | url="https://vmware.wavefront.com" 19 | 20 | while getopts "k:u:" ARGOPTS ; do 21 | case ${ARGOPTS} in 22 | k) token=$OPTARG 23 | ;; 24 | u) url=$OPTARG 25 | ;; 26 | ?) usage; exit; 27 | ;; 28 | esac 29 | done 30 | 31 | 32 | check () { 33 | if ! which $1 > /dev/null; then 34 | echo "$1 is not installed, Let me try installing all dependencies using puppet"; 35 | if ! which puppet > /dev/null; then 36 | echo "Please install puppet and try again"; 37 | exit 1; 38 | else 39 | sudo puppet apply $dir/waverunner_driver.pp -v 40 | if ! which $1 > /dev/null; then 41 | echo "Please install $1 manually and try again"; 42 | exit 1; 43 | fi 44 | fi 45 | else 46 | echo "$1 is installed" 47 | fi 48 | } 49 | 50 | echo "Ensure env variable HOME is set to your home directory, proceeding under that assumption..."; 51 | 52 | check curl; 53 | check socat; 54 | check rsync; 55 | check sshpass; 56 | check bc; 57 | 58 | #sudo $dir/golden/WF/install_configure_wavefront_linux.sh $url $token; 59 | echo "sudo $dir/golden/WF/install_configure_wavefront_linux.sh -u $url -k $token"; 60 | -------------------------------------------------------------------------------- /pkg/waverunner/waverunner_driver.pp: -------------------------------------------------------------------------------- 1 | package { ['socat','curl','sshpass','rsync']: 2 | ensure => present, 3 | } 4 | -------------------------------------------------------------------------------- /recompile.sh: -------------------------------------------------------------------------------- 1 | go install cmd/kbench.go 2 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dir=`dirname $0`; 3 | 4 | usage () { 5 | echo "Usage: $0 -r [-t -o ]" 6 | echo "Example: $0 -r \"kbench-run-on-XYZ-cluster\" -t \"cp_heavy16,dp_netperf_internode,dp_fio\" -o \"./\"" 7 | echo ""; 8 | echo "Valid test names:" 9 | echo ""; 10 | echo "all|all_control_plane|all_data_plane|$tests" | sed 's/|/ || /g' 11 | } 12 | 13 | tests=`ls -b $dir/config/ | tr '\n' '|'` 14 | tag="run"; 15 | outdir="$dir"; 16 | 17 | if [ $# -eq 0 ] 18 | then 19 | usage 20 | echo "Since no tests specified, I am running the default workload: config/default"; 21 | tests="default" 22 | fi 23 | 24 | while getopts "r:t:o:h" ARGOPTS ; do 25 | case ${ARGOPTS} in 26 | t) tests=$OPTARG 27 | ;; 28 | r) tag=$OPTARG 29 | ;; 30 | o) outdir=$OPTARG 31 | ;; 32 | h) usage; exit; 33 | ;; 34 | ?) usage; exit; 35 | ;; 36 | esac 37 | done 38 | 39 | folder=`date '+%d-%b-%Y-%I-%M-%S-%P'` 40 | folder="$outdir/results_${tag}_$folder" 41 | mkdir $folder 42 | 43 | if grep -q "all_data_plane" <<< $tests; then 44 | dptests=`ls -b $dir/config/ | grep "dp_" | tr '\n' ','` 45 | tests=`echo $tests | sed "s/all_data_plane/$dptests/g"` 46 | fi 47 | 48 | if grep -q "all_control_plane" <<< $tests; then 49 | cptests=`ls -b $dir/config/ | grep "cp_" | tr '\n' ','` 50 | cptests="default,$cptests" 51 | tests=`echo $tests | sed "s/all_control_plane/$cptests/g"` 52 | fi 53 | 54 | if grep -q "all" <<< $tests; then 55 | alltests=`ls -b $dir/config/ | tr '\n' ','` 56 | tests=`echo $tests | sed "s/all/$alltests/g"` 57 | fi 58 | 59 | tests=`echo $tests | sed "s/,/ /g"` 60 | 61 | for test in $tests; do 62 | mkdir $folder/$test; 63 | cp $dir/config/$test/config.json $folder/$test/; 64 | cp $dir/config/$test/*.yaml $folder/$test/ > /dev/null 2>&1; 65 | cp $dir/config/$test/*.sh $folder/$test/ > /dev/null 2>&1; 66 | echo "Running test $test and results redirected to \"$folder/$test\""; 67 | if [ "$test" == "dp_fio" ]; then 68 | kubectl apply -f ./config/dp_fio/fio_pvc.yaml 69 | fi 70 | kbench -benchconfig="$dir/config/$test/config.json" -outdir="$folder/$test"; 71 | $dir/cleanup.sh > /dev/null 2>&1; 72 | done 73 | --------------------------------------------------------------------------------