├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .goreleaser.yml ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── Makefile ├── Readme.md ├── TODO.md ├── common ├── configFile.go └── configFile_test.go ├── examples ├── Bandwidth.png ├── Gosbench_Dashboard.jpg ├── Latency.png ├── example_config.json ├── example_config.yaml ├── example_prom_exporter.log └── grafana_dashboard.json ├── go.mod ├── go.sum ├── k8s ├── Readme.md ├── gosbench.yaml ├── gosbench_template.yaml.j2 └── monitoring.yaml ├── server ├── .gitignore └── main.go └── worker ├── .gitignore ├── main.go ├── prometheus.go ├── s3.go └── workItems.go /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: mulbc 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Started server 16 | 2. Started X workers 17 | 4. See error 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **Config** 26 | Please attach your test config so we can understand your issue 27 | 28 | **Firewall** 29 | Please attach your firewall config for the server and worker hosts 30 | 31 | **Environment (please complete the following information):** 32 | - OS: (e.g. RHEL 7) 33 | - Golang version (get this with `go -version`) 34 | - Network and firewall setup 35 | 36 | **Additional context** 37 | Add any other context about the problem here. 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/launch.json 2 | testconf.yaml 3 | modd.conf 4 | main 5 | dist/ 6 | k8s/gosbench.yaml 7 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | before: 4 | hooks: 5 | # You may remove this if you don't use go modules. 6 | - go mod tidy 7 | # you may remove this if you don't need go generate 8 | # - go generate ./... 9 | builds: 10 | - id: server 11 | dir: server 12 | binary: server 13 | goarch: 14 | - amd64 15 | - arm 16 | - arm64 17 | goarm: 18 | - 6 19 | - 7 20 | env: 21 | - CGO_ENABLED=0 22 | - id: worker 23 | dir: worker 24 | binary: worker 25 | goarch: 26 | - amd64 27 | - arm 28 | - arm64 29 | goarm: 30 | - 6 31 | - 7 32 | env: 33 | - CGO_ENABLED=0 34 | 35 | dockers: 36 | - 37 | archives: 38 | - format: tar.gz 39 | # this name template makes the OS and Arch compatible with the results of `uname`. 40 | name_template: >- 41 | {{ .ProjectName }}_ 42 | {{- title .Os }}_ 43 | {{- if eq .Arch "amd64" }}x86_64 44 | {{- else if eq .Arch "386" }}i386 45 | {{- else }}{{ .Arch }}{{ end }} 46 | {{- if .Arm }}v{{ .Arm }}{{ end }} 47 | # use zip for windows archives 48 | format_overrides: 49 | - goos: windows 50 | format: zip 51 | checksum: 52 | name_template: 'checksums.txt' 53 | snapshot: 54 | version_template: "{{ .Tag }}-next" 55 | changelog: 56 | sort: asc 57 | filters: 58 | exclude: 59 | - '^docs:' 60 | - '^test:' 61 | signs: 62 | - artifacts: checksum 63 | release: 64 | draft: false 65 | prerelease: true 66 | disable: false 67 | github: 68 | env_files: 69 | # use only one or release will fail! 70 | # github_token: ~/.path/to/my/gh_token 71 | gitlab_token: ignoreme 72 | # gitea_token: ~/.path/to/my/gitea_token 73 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v2.2.3 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-added-large-files 10 | - id: check-case-conflict 11 | - id: check-executables-have-shebangs 12 | - id: check-merge-conflict 13 | - id: check-yaml 14 | args: ['--allow-multiple-documents'] 15 | - id: detect-aws-credentials 16 | - id: trailing-whitespace 17 | - repo: https://github.com/syntaqx/git-hooks 18 | rev: v0.0.16 19 | hooks: 20 | - id: forbid-binary 21 | - id: go-fmt 22 | - id: go-mod-tidy 23 | - id: shellcheck 24 | - id: shfmt 25 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as builder 2 | ARG TYPE 3 | ENV GOBIN=/go/bin 4 | ENV GOPATH=/go/src 5 | RUN mkdir /build 6 | WORKDIR /build 7 | RUN apk add --upgrade git 8 | RUN go version 9 | # Copy and download dependency using go mod 10 | COPY go.mod . 11 | COPY go.sum . 12 | RUN go mod download 13 | 14 | ADD . /build/ 15 | RUN echo $TYPE 16 | RUN cd /build/$TYPE; go build -o main . 17 | 18 | FROM alpine 19 | ARG TYPE 20 | LABEL maintainer="Chris Blum " 21 | 22 | LABEL org.label-schema.build-date=$BUILD_DATE \ 23 | org.label-schema.name="goroom-$TYPE" \ 24 | org.label-schema.vcs-ref=$VCS_REF \ 25 | org.label-schema.vcs-url="https://github.com/mulbc/gosbench" \ 26 | org.label-schema.schema-version="1.0" 27 | 28 | RUN adduser -S -D -H -h /app appuser 29 | USER appuser 30 | COPY --from=builder /build/$TYPE/main /app/ 31 | WORKDIR /app 32 | ENTRYPOINT ["./main"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BUILD_DATE := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ") 2 | UNIX_DATE := $(shell date -u +"%s") 3 | VCS_REF := $(shell git rev-parse HEAD) 4 | 5 | build: 6 | docker pull golang:alpine 7 | docker build --tag quay.io/mulbc/gosbench-server:$(VCS_REF) --build-arg "TYPE=server" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" . 8 | docker build --tag quay.io/mulbc/gosbench-worker:$(VCS_REF) --build-arg "TYPE=worker" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" . 9 | debug-server: 10 | docker run --rm --name=gosbench-server -it quay.io/mulbc/gosbench-server:$(VCS_REF) sh 11 | debug-worker: 12 | docker run --rm --name=gosbench-worker -it quay.io/mulbc/gosbench-worker:$(VCS_REF) sh 13 | release: 14 | docker tag quay.io/mulbc/gosbench-server:$(VCS_REF) quay.io/mulbc/gosbench-server:latest 15 | docker tag quay.io/mulbc/gosbench-worker:$(VCS_REF) quay.io/mulbc/gosbench-worker:latest 16 | docker push quay.io/mulbc/gosbench-server:latest 17 | docker push quay.io/mulbc/gosbench-worker:latest 18 | push-dev: 19 | docker build --tag quay.io/mulbc/gosbench-server:$(UNIX_DATE) --build-arg "TYPE=server" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" . 20 | docker build --tag quay.io/mulbc/gosbench-worker:$(UNIX_DATE) --build-arg "TYPE=worker" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" . 21 | docker push quay.io/mulbc/gosbench-server:$(UNIX_DATE) 22 | docker push quay.io/mulbc/gosbench-worker:$(UNIX_DATE) 23 | test: 24 | go test -v `go list ./...` 25 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Gosbench 2 | 3 | Gosbench is the Golang reimplementation of [Cosbench](https://github.com/intel-cloud/cosbench). 4 | It is a distributed S3 performance benchmark tool with [Prometheus exporter](https://opencensus.io/exporters/supported-exporters/go/prometheus/) leveraging the official [Golang AWS SDK](https://aws.amazon.com/sdk-for-go/) 5 | 6 | ## Usage 7 | 8 | Gosbench consists of two parts: 9 | 10 | * Server: Coordinates Workers and general test queue 11 | * Workers: Actually connect to S3 and perform reading, writing, deleting and listing of objects 12 | 13 | INFO: `-d` activates debug logging, `-t` activates trace logging 14 | 15 | ### Running a test 16 | 17 | 1. Build the server: `go install github.com/mulbc/gosbench/server` 18 | 1. Run the server, specifying a config file: `server -c path/to/config.yaml` - you can find an example config [in the example folder](examples/example_config.yaml) 19 | 1. The server will open port 2000 for workers to connect to - make sure this port is not blocked by your firewall! 20 | 1. Build the worker: `go install github.com/mulbc/gosbench/worker` 21 | 1. Run the worker, specifying the server connection details: `worker -s 192.168.1.1:2000` 22 | 1. The worker will immediately connect to the server and will start to get to work. 23 | The worker opens port 8888 for the Prometheus exporter. Please make sure this port is allowed in your firewall and that you added the worker to the Prometheus config. 24 | 25 | #### Prometheus configuration 26 | 27 | Make sure your prometheus configuration looks similar to this: 28 | 29 | ```yaml 30 | global: 31 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 32 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 33 | scrape_configs: 34 | - job_name: 'prometheus' 35 | static_configs: 36 | - targets: 37 | - localhost:9090 38 | 39 | - job_name: 'gosbench' 40 | scrape_interval: 1s 41 | static_configs: 42 | - targets: 43 | - WORKER1.example.com:8888 44 | - WORKER2.example.com:8888 45 | ``` 46 | 47 | To reload the configuration, you can either send a SIGHUP to your prometheus server or just restart it ;) 48 | Afterwards ensure that you have your Gosbench workers listed at http://your.prometheus.server.example.com:9090/targets 49 | 50 | It is expected that the workers are in state `DOWN` most of the time... they are only scrapeable during a test run. 51 | 52 | It is Best Practice to run the [Prometheus Node Exporter](https://github.com/prometheus/node_exporter) on all hosts as well, to gather common system metrics during the tests. This will help you in identifying bottlenecks. Please consult the Node Exporter manuals on how to install and configure it on your platform. 53 | 54 | ### Evaluating a test 55 | 56 | During a test, Prometheus will scrape the performance data continuously from the workers. 57 | You can visualize this data in Grafana. To get an overview of what the provided data looks like, check out [the example scrape](examples/example_prom_exporter.log). 58 | 59 | There is also an [example Grafana dashboard](examples/grafana_dashboard.json) that you can import and use. The Dashboard has some basic overview of the most common stats that people are interested in: 60 | 61 | ![Gosbench Dashboard in action](examples/Gosbench_Dashboard.jpg) 62 | 63 | ### Docker 64 | 65 | There are now Docker container images available for easy consumption: 66 | 67 | ```shell 68 | docker pull quay.io/mulbc/goroom-server 69 | docker pull quay.io/mulbc/goroom-worker 70 | ``` 71 | 72 | In the `k8s` folder you will find example files to deploy Gosbench on Openshift and Kubernetes. 73 | Be sure to modify the ConfigMaps in `gosbench.yaml` to use your S3 endpoint credentials. 74 | 75 | ### Reading pre-existing files from buckets 76 | 77 | Due to popular demand, reading pre-existing files have been added. You activate this special mode by setting `existing_read_weight` to something higher than 0. 78 | 79 | There are some important things to consider though ;) 80 | 81 | Just like with other operations, the `bucket_prefix` value will be evaluated to determine the bucket name to search for pre-existing objects. 82 | 83 | **Example:** This is an excerpt of your config: 84 | 85 | ```yaml 86 | objects: 87 | size_min: 5 88 | size_max: 100 89 | part_size: 0 90 | # distribution: constant, random, sequential 91 | size_distribution: random 92 | unit: KB 93 | number_min: 10 94 | number_max: 100 95 | # distribution: constant, random, sequential 96 | number_distribution: constant 97 | buckets: 98 | number_min: 2 99 | number_max: 10 100 | # distribution: constant, random, sequential 101 | number_distribution: constant 102 | bucket_prefix: myBucket- 103 | ``` 104 | 105 | Note: Due to the constant distribution, we will only consider the `_min` values. 106 | 107 | This will cause each workers to search for pre-existing files in the buckets `myBucket-0` and `myBucket-1` and read 10 objects from these buckets. If there are less than 10 objects in any of these buckets, some objects will be read multiple times. The object size given in your config will be ignored when reading pre-existing files. 108 | 109 | ## Cosbench vs Gosbench benchmark comparision 110 | When a new tool is presented, it’s essential to compare it to existing tools for accuracy. For this reason, we ran a comparision between Cosbench and Gosbench. Both benchmarks were tasked to do a 100% write test and 100% read test on 4KB, 16KB, 256KB, 1MB, 4MB objects for 60 seconds each. The tests were to run on one RGW using S3 protocol in ceph storage clusteri, also run in the test configuration in parallel. Figure below show writing and reading, respectively. From these charts, it’s apparent that the performance metrics for all objects are similar for both tools. 111 | 112 | ![Latency](examples/Latency.png) 113 | 114 | ![Bandwidth](examples/Bandwidth.png) 115 | 116 | ## Contributing 117 | 118 | * Be aware that this repo uses pre-commit hooks - install them via `pre-commit install` 119 | * [More info](https://pre-commit.com/) 120 | * We are using Go modules in this repository - read up on it [here](https://blog.golang.org/using-go-modules) 121 | * Check out the open [TODOs](TODO.md) for hints on what to work on 122 | 123 | ## Known issues 124 | 125 | * Workers will error out when the config's min value is larger than the max value (even for a constant distribution) 126 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # Open TODOs 2 | 3 | ## Worker TODOs 4 | 5 | * Never exit when in preparation step as this could deadlock the server 6 | * Implement S3 timeout variable 7 | * ~~Change S3 config to generic []aws.Config{} type~~ Not parseable from Yaml 8 | * Add second exporter that is measuring exec time of AWS functions instead of using the HTTP client 9 | 10 | ## Server TODOs 11 | 12 | * Set Grafana annotations when tests start and when they end (at best as region) 13 | * Add timeout when waiting for workers (or whenever we could deadlock) 14 | 15 | ## Misc 16 | 17 | * Convert the above TODOs to Github tasks 18 | -------------------------------------------------------------------------------- /common/configFile.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "os" 9 | "strings" 10 | "time" 11 | 12 | "gopkg.in/yaml.v3" 13 | 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // This uses the Base 2 calculation where 18 | // 1 kB = 1024 Byte 19 | const ( 20 | BYTE = 1 << (10 * iota) 21 | KILOBYTE 22 | MEGABYTE 23 | GIGABYTE 24 | TERABYTE 25 | ) 26 | 27 | // S3Configuration contains all information to connect to a certain S3 endpoint 28 | type S3Configuration struct { 29 | AccessKey string `yaml:"access_key" json:"access_key"` 30 | SecretKey string `yaml:"secret_key" json:"secret_key"` 31 | Region string `yaml:"region" json:"region"` 32 | Endpoint string `yaml:"endpoint" json:"endpoint"` 33 | Timeout time.Duration `yaml:"timeout" json:"timeout"` 34 | SkipSSLVerify bool `yaml:"skipSSLverify" json:"skipSSLverify"` 35 | UsePathStyle bool `yaml:"usePathStyle" json:"usePathStyle"` 36 | } 37 | 38 | // GrafanaConfiguration contains all information necessary to add annotations 39 | // via the Grafana HTTP API 40 | type GrafanaConfiguration struct { 41 | Username string `yaml:"username" json:"username"` 42 | Password string `yaml:"password" json:"password"` 43 | Endpoint string `yaml:"endpoint" json:"endpoint"` 44 | } 45 | 46 | // TestCaseConfiguration is the configuration of a performance test 47 | type TestCaseConfiguration struct { 48 | Objects struct { 49 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 50 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 51 | PartSize uint64 `yaml:"part_size" json:"part_size"` 52 | SizeLast uint64 53 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 54 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 55 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 56 | NumberLast uint64 57 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 58 | Unit string `yaml:"unit" json:"unit"` 59 | } `yaml:"objects" json:"objects"` 60 | Buckets struct { 61 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 62 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 63 | NumberLast uint64 64 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 65 | } `yaml:"buckets" json:"buckets"` 66 | Name string `yaml:"name" json:"name"` 67 | BucketPrefix string `yaml:"bucket_prefix" json:"bucket_prefix"` 68 | ObjectPrefix string `yaml:"object_prefix" json:"object_prefix"` 69 | Runtime Duration `yaml:"stop_with_runtime" json:"stop_with_runtime"` 70 | OpsDeadline uint64 `yaml:"stop_with_ops" json:"stop_with_ops"` 71 | Workers int `yaml:"workers" json:"workers"` 72 | WorkerShareBuckets bool `yaml:"workers_share_buckets" json:"workers_share_buckets"` 73 | ParallelClients int `yaml:"parallel_clients" json:"parallel_clients"` 74 | CleanAfter bool `yaml:"clean_after" json:"clean_after"` 75 | ReadWeight int `yaml:"read_weight" json:"read_weight"` 76 | ExistingReadWeight int `yaml:"existing_read_weight" json:"existing_read_weight"` 77 | WriteWeight int `yaml:"write_weight" json:"write_weight"` 78 | ListWeight int `yaml:"list_weight" json:"list_weight"` 79 | DeleteWeight int `yaml:"delete_weight" json:"delete_weight"` 80 | } 81 | 82 | // Testconf contains all the information necessary to set up a distributed test 83 | type Testconf struct { 84 | S3Config []*S3Configuration `yaml:"s3_config" json:"s3_config"` 85 | GrafanaConfig *GrafanaConfiguration `yaml:"grafana_config" json:"grafana_config"` 86 | Tests []*TestCaseConfiguration `yaml:"tests" json:"tests"` 87 | } 88 | 89 | // WorkerConf is the configuration that is sent to each worker 90 | // It includes a subset of information from the Testconf 91 | type WorkerConf struct { 92 | S3Config *S3Configuration 93 | Test *TestCaseConfiguration 94 | WorkerID string 95 | } 96 | 97 | // BenchResult is the struct that will contain the benchmark results from a 98 | // worker after it has finished its benchmark 99 | type BenchmarkResult struct { 100 | TestName string 101 | Operations float64 102 | Bytes float64 103 | // Bandwidth is the amount of Bytes per second of runtime 104 | Bandwidth float64 105 | LatencyAvg float64 106 | Duration time.Duration 107 | } 108 | 109 | // WorkerMessage is the struct that is exchanged in the communication between 110 | // server and worker. It usually only contains a message, but during the init 111 | // phase, also contains the config for the worker 112 | type WorkerMessage struct { 113 | Message string 114 | Config *WorkerConf 115 | BenchResult BenchmarkResult 116 | } 117 | 118 | // CheckConfig checks the global config 119 | func CheckConfig(config *Testconf) { 120 | for _, testcase := range config.Tests { 121 | // log.Debugf("Checking testcase with prefix %s", testcase.BucketPrefix) 122 | err := checkTestCase(testcase) 123 | if err != nil { 124 | log.WithError(err).Fatalf("Issue detected when scanning through the config file:") 125 | } 126 | } 127 | } 128 | 129 | func checkTestCase(testcase *TestCaseConfiguration) error { 130 | if testcase.Runtime == 0 && testcase.OpsDeadline == 0 { 131 | return fmt.Errorf("Either stop_with_runtime or stop_with_ops needs to be set") 132 | } 133 | if testcase.ReadWeight == 0 && testcase.WriteWeight == 0 && testcase.ListWeight == 0 && testcase.DeleteWeight == 0 && testcase.ExistingReadWeight == 0 { 134 | return fmt.Errorf("At least one weight needs to be set - Read / Write / List / Delete") 135 | } 136 | if testcase.ExistingReadWeight != 0 && testcase.BucketPrefix == "" { 137 | return fmt.Errorf("When using existing_read_weight, setting the bucket_prefix is mandatory") 138 | } 139 | if testcase.Buckets.NumberMin == 0 { 140 | return fmt.Errorf("Please set minimum number of Buckets") 141 | } 142 | if testcase.Objects.SizeMin == 0 { 143 | return fmt.Errorf("Please set minimum size of Objects") 144 | } 145 | if testcase.Objects.SizeMax == 0 { 146 | return fmt.Errorf("Please set maximum size of Objects") 147 | } 148 | if testcase.Objects.NumberMin == 0 { 149 | return fmt.Errorf("Please set minimum number of Objects") 150 | } 151 | if err := checkDistribution(testcase.Objects.SizeDistribution, "Object size_distribution"); err != nil { 152 | return err 153 | } 154 | if err := checkDistribution(testcase.Objects.NumberDistribution, "Object number_distribution"); err != nil { 155 | return err 156 | } 157 | if err := checkDistribution(testcase.Buckets.NumberDistribution, "Bucket number_distribution"); err != nil { 158 | return err 159 | } 160 | if testcase.Objects.Unit == "" { 161 | return fmt.Errorf("Please set the Objects unit") 162 | } 163 | 164 | var toByteMultiplicator uint64 165 | switch strings.ToUpper(testcase.Objects.Unit) { 166 | case "B": 167 | toByteMultiplicator = BYTE 168 | case "KB", "K": 169 | toByteMultiplicator = KILOBYTE 170 | case "MB", "M": 171 | toByteMultiplicator = MEGABYTE 172 | case "GB", "G": 173 | toByteMultiplicator = GIGABYTE 174 | case "TB", "T": 175 | toByteMultiplicator = TERABYTE 176 | default: 177 | return fmt.Errorf("Could not parse unit size - please use one of B/KB/MB/GB/TB") 178 | } 179 | 180 | testcase.Objects.SizeMin = testcase.Objects.SizeMin * toByteMultiplicator 181 | testcase.Objects.SizeMax = testcase.Objects.SizeMax * toByteMultiplicator 182 | testcase.Objects.PartSize = testcase.Objects.PartSize * toByteMultiplicator 183 | return nil 184 | } 185 | 186 | // Checks if a given string is of type distribution 187 | func checkDistribution(distribution string, keyname string) error { 188 | switch distribution { 189 | case "constant", "random", "sequential": 190 | return nil 191 | } 192 | return fmt.Errorf("%s is not a valid distribution. Allowed options are constant, random, sequential", keyname) 193 | } 194 | 195 | // EvaluateDistribution looks at the given distribution and returns a meaningful next number 196 | func EvaluateDistribution(min uint64, max uint64, lastNumber *uint64, increment uint64, distribution string) uint64 { 197 | switch distribution { 198 | case "constant": 199 | return min 200 | case "random": 201 | validSize := max - min 202 | return ((rand.Uint64() % validSize) + min) 203 | case "sequential": 204 | if *lastNumber+increment > max { 205 | return max 206 | } 207 | *lastNumber = *lastNumber + increment 208 | return *lastNumber 209 | } 210 | return 0 211 | } 212 | 213 | // JSON package does not currently marshal/unmarshal time.Duration so we provide a way to do it here 214 | type Duration time.Duration 215 | 216 | func (d Duration) MarshalJSON() ([]byte, error) { 217 | return json.Marshal(time.Duration(d).String()) 218 | } 219 | 220 | func (d *Duration) UnmarshalJSON(b []byte) error { 221 | var v interface{} 222 | if err := json.Unmarshal(b, &v); err != nil { 223 | return err 224 | } 225 | switch value := v.(type) { 226 | case int: 227 | *d = Duration(time.Duration(value)) 228 | case float64: 229 | *d = Duration(time.Duration(value)) 230 | case string: 231 | tmp, err := time.ParseDuration(value) 232 | if err != nil { 233 | return err 234 | } 235 | *d = Duration(tmp) 236 | default: 237 | return errors.New("invalid duration") 238 | } 239 | return nil 240 | } 241 | 242 | func (d Duration) MarshalYAML() ([]byte, error) { 243 | return yaml.Marshal(time.Duration(d).String()) 244 | } 245 | 246 | func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { 247 | var v interface{} 248 | err := unmarshal(&v) 249 | if err != nil { 250 | return err 251 | } 252 | switch value := v.(type) { 253 | case int: 254 | *d = Duration(time.Duration(value)) 255 | case float64: 256 | *d = Duration(time.Duration(value)) 257 | case string: 258 | tmp, err := time.ParseDuration(value) 259 | if err != nil { 260 | return err 261 | } 262 | *d = Duration(tmp) 263 | default: 264 | return errors.New("invalid duration") 265 | } 266 | return nil 267 | } 268 | 269 | var ReadFile = os.ReadFile 270 | 271 | func LoadConfigFromFile(configFile string) *Testconf { 272 | configFileContent, err := ReadFile(configFile) 273 | if err != nil { 274 | log.WithError(err).Fatalf("Error reading config file:") 275 | } 276 | var config Testconf 277 | 278 | if strings.HasSuffix(configFile, ".yaml") || strings.HasSuffix(configFile, ".yml") { 279 | err = yaml.Unmarshal(configFileContent, &config) 280 | if err != nil { 281 | log.WithError(err).Fatalf("Error unmarshaling yaml config file:") 282 | } 283 | } else if strings.HasSuffix(configFile, ".json") { 284 | err = json.Unmarshal(configFileContent, &config) 285 | if err != nil { 286 | log.WithError(err).Fatalf("Error unmarshaling json config file:") 287 | } 288 | } else { 289 | log.WithError(err).Fatalf("Configuration file must be a yaml or json formatted file") 290 | } 291 | 292 | return &config 293 | } 294 | -------------------------------------------------------------------------------- /common/configFile_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "os" 5 | "reflect" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func Test_checkTestCase(t *testing.T) { 11 | type args struct { 12 | testcase *TestCaseConfiguration 13 | } 14 | tests := []struct { 15 | name string 16 | args args 17 | wantErr bool 18 | }{ 19 | {"No end defined", args{new(TestCaseConfiguration)}, true}, 20 | {"No weights defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10}}, true}, 21 | {"No Bucket Numbers defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1}}, true}, 22 | {"No Object size min defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 23 | Buckets: struct { 24 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 25 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 26 | NumberLast uint64 27 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 28 | }{ 29 | NumberMin: 1, 30 | }}}, true}, 31 | {"No Object size max defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 32 | Buckets: struct { 33 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 34 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 35 | NumberLast uint64 36 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 37 | }{ 38 | NumberMin: 1, 39 | }, 40 | Objects: struct { 41 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 42 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 43 | PartSize uint64 `yaml:"part_size" json:"part_size"` 44 | SizeLast uint64 45 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 46 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 47 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 48 | NumberLast uint64 49 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 50 | Unit string `yaml:"unit" json:"unit"` 51 | }{ 52 | SizeMin: 1, 53 | }}}, true}, 54 | {"No Object number min defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 55 | Buckets: struct { 56 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 57 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 58 | NumberLast uint64 59 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 60 | }{ 61 | NumberMin: 1, 62 | }, 63 | Objects: struct { 64 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 65 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 66 | PartSize uint64 `yaml:"part_size" json:"part_size"` 67 | SizeLast uint64 68 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 69 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 70 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 71 | NumberLast uint64 72 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 73 | Unit string `yaml:"unit" json:"unit"` 74 | }{ 75 | SizeMin: 1, 76 | SizeMax: 2, 77 | }}}, true}, 78 | {"No Object size distributions defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 79 | Buckets: struct { 80 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 81 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 82 | NumberLast uint64 83 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 84 | }{ 85 | NumberMin: 1, 86 | }, 87 | Objects: struct { 88 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 89 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 90 | PartSize uint64 `yaml:"part_size" json:"part_size"` 91 | SizeLast uint64 92 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 93 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 94 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 95 | NumberLast uint64 96 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 97 | Unit string `yaml:"unit" json:"unit"` 98 | }{ 99 | SizeMin: 1, 100 | SizeMax: 2, 101 | NumberMin: 3, 102 | }}}, true}, 103 | {"No Object number distributions defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 104 | Buckets: struct { 105 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 106 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 107 | NumberLast uint64 108 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 109 | }{ 110 | NumberMin: 1, 111 | }, 112 | Objects: struct { 113 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 114 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 115 | PartSize uint64 `yaml:"part_size" json:"part_size"` 116 | SizeLast uint64 117 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 118 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 119 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 120 | NumberLast uint64 121 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 122 | Unit string `yaml:"unit" json:"unit"` 123 | }{ 124 | SizeMin: 1, 125 | SizeMax: 2, 126 | NumberMin: 3, 127 | SizeDistribution: "constant", 128 | }}}, true}, 129 | {"No Bucket distribution defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 130 | Buckets: struct { 131 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 132 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 133 | NumberLast uint64 134 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 135 | }{ 136 | NumberMin: 1, 137 | }, 138 | Objects: struct { 139 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 140 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 141 | PartSize uint64 `yaml:"part_size" json:"part_size"` 142 | SizeLast uint64 143 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 144 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 145 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 146 | NumberLast uint64 147 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 148 | Unit string `yaml:"unit" json:"unit"` 149 | }{ 150 | SizeMin: 1, 151 | SizeMax: 2, 152 | NumberMin: 3, 153 | SizeDistribution: "constant", 154 | NumberDistribution: "constant", 155 | }}}, true}, 156 | {"No Object Unit defined", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 157 | Buckets: struct { 158 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 159 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 160 | NumberLast uint64 161 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 162 | }{ 163 | NumberMin: 1, 164 | NumberDistribution: "constant", 165 | }, 166 | Objects: struct { 167 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 168 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 169 | PartSize uint64 `yaml:"part_size" json:"part_size"` 170 | SizeLast uint64 171 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 172 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 173 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 174 | NumberLast uint64 175 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 176 | Unit string `yaml:"unit" json:"unit"` 177 | }{ 178 | SizeMin: 1, 179 | SizeMax: 2, 180 | NumberMin: 3, 181 | SizeDistribution: "constant", 182 | NumberDistribution: "constant", 183 | }}}, true}, 184 | {"Wrong object unit", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 185 | Buckets: struct { 186 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 187 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 188 | NumberLast uint64 189 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 190 | }{ 191 | NumberMin: 1, 192 | NumberDistribution: "constant", 193 | }, 194 | Objects: struct { 195 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 196 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 197 | PartSize uint64 `yaml:"part_size" json:"part_size"` 198 | SizeLast uint64 199 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 200 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 201 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 202 | NumberLast uint64 203 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 204 | Unit string `yaml:"unit" json:"unit"` 205 | }{ 206 | SizeMin: 1, 207 | SizeMax: 2, 208 | NumberMin: 3, 209 | SizeDistribution: "constant", 210 | NumberDistribution: "constant", 211 | Unit: "XB", 212 | }}}, true}, 213 | {"Existing object read without bucket prefix", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ExistingReadWeight: 1, 214 | Buckets: struct { 215 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 216 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 217 | NumberLast uint64 218 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 219 | }{ 220 | NumberMin: 1, 221 | NumberDistribution: "constant", 222 | }, 223 | Objects: struct { 224 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 225 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 226 | PartSize uint64 `yaml:"part_size" json:"part_size"` 227 | SizeLast uint64 228 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 229 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 230 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 231 | NumberLast uint64 232 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 233 | Unit string `yaml:"unit" json:"unit"` 234 | }{ 235 | SizeMin: 1, 236 | SizeMax: 2, 237 | NumberMin: 3, 238 | SizeDistribution: "constant", 239 | NumberDistribution: "constant", 240 | Unit: "XB", 241 | }}}, true}, 242 | {"All good", args{&TestCaseConfiguration{Runtime: Duration(time.Second), OpsDeadline: 10, ReadWeight: 1, 243 | Buckets: struct { 244 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 245 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 246 | NumberLast uint64 247 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 248 | }{ 249 | NumberMin: 1, 250 | NumberDistribution: "constant", 251 | }, 252 | Objects: struct { 253 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 254 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 255 | PartSize uint64 `yaml:"part_size" json:"part_size"` 256 | SizeLast uint64 257 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 258 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 259 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 260 | NumberLast uint64 261 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 262 | Unit string `yaml:"unit" json:"unit"` 263 | }{ 264 | SizeMin: 1, 265 | SizeMax: 2, 266 | NumberMin: 3, 267 | SizeDistribution: "constant", 268 | NumberDistribution: "constant", 269 | Unit: "KB", 270 | }}}, false}, 271 | } 272 | for _, tt := range tests { 273 | t.Run(tt.name, func(t *testing.T) { 274 | if err := checkTestCase(tt.args.testcase); (err != nil) != tt.wantErr { 275 | t.Errorf("checkTestCase() error = %v, wantErr %v", err, tt.wantErr) 276 | } 277 | }) 278 | } 279 | } 280 | 281 | func Test_checkDistribution(t *testing.T) { 282 | type args struct { 283 | distribution string 284 | keyname string 285 | } 286 | tests := []struct { 287 | name string 288 | args args 289 | wantErr bool 290 | }{ 291 | {"constant distribution", args{"constant", "test"}, false}, 292 | {"random distribution", args{"random", "test"}, false}, 293 | {"sequential distribution", args{"sequential", "test"}, false}, 294 | {"wrong distribution", args{"wrong", "test"}, true}, 295 | } 296 | for _, tt := range tests { 297 | t.Run(tt.name, func(t *testing.T) { 298 | if err := checkDistribution(tt.args.distribution, tt.args.keyname); (err != nil) != tt.wantErr { 299 | t.Errorf("checkDistribution() error = %v, wantErr %v", err, tt.wantErr) 300 | } 301 | }) 302 | } 303 | } 304 | 305 | func TestEvaluateDistribution(t *testing.T) { 306 | type args struct { 307 | min uint64 308 | max uint64 309 | lastNumber *uint64 310 | increment uint64 311 | distribution string 312 | } 313 | lastArgumentNumber := uint64(1) 314 | tests := []struct { 315 | name string 316 | args args 317 | want uint64 318 | }{ 319 | {"constant distribution", args{5, 100, &lastArgumentNumber, 1, "constant"}, 5}, 320 | {"random distribution", args{1, 2, &lastArgumentNumber, 1, "random"}, 1}, 321 | {"sequential distribution", args{1, 10, &lastArgumentNumber, 1, "sequential"}, 2}, 322 | {"last number in sequential distribution", args{1, 10, &lastArgumentNumber, 10, "sequential"}, 10}, 323 | {"wrong distribution", args{1, 10, &lastArgumentNumber, 1, "wrong"}, 0}, 324 | } 325 | for _, tt := range tests { 326 | t.Run(tt.name, func(t *testing.T) { 327 | if got := EvaluateDistribution(tt.args.min, tt.args.max, tt.args.lastNumber, tt.args.increment, tt.args.distribution); got != tt.want { 328 | t.Errorf("EvaluateDistribution() = %v, want %v", got, tt.want) 329 | } 330 | }) 331 | } 332 | } 333 | 334 | func Test_loadConfigFromFile(t *testing.T) { 335 | read := func(content []byte) func(string) ([]byte, error) { 336 | return func(string) ([]byte, error) { 337 | return content, nil 338 | } 339 | } 340 | defer func() { 341 | ReadFile = os.ReadFile 342 | }() 343 | type args struct { 344 | configFileContent []byte 345 | } 346 | tests := []struct { 347 | name string 348 | args args 349 | want *Testconf 350 | }{ 351 | {"empty file", args{[]byte{}}, &Testconf{}}, 352 | // TODO discover how to handle log.Fatal with logrus here 353 | // https://github.com/sirupsen/logrus#fatal-handlers 354 | // {"unparsable", args{[]byte(`corrupt!`)}, common.Testconf{}}, 355 | {"S3Config", args{[]byte(`s3_config: 356 | - access_key: secretKey 357 | secret_key: secretSecret 358 | region: us-east-1 359 | endpoint: http://10.9.8.72:80 360 | skipSSLverify: true 361 | tests: 362 | - name: clean-4k 363 | delete_weight: 100 364 | objects: 365 | size_min: 4 366 | size_max: 4 367 | size_distribution: constant 368 | unit: KB 369 | number_min: 1 370 | number_max: 100000 371 | number_distribution: sequential 372 | buckets: 373 | number_min: 1 374 | number_max: 10 375 | number_distribution: sequential 376 | bucket_prefix: gosbench-prefix- 377 | object_prefix: obj- 378 | stop_with_ops: 10 379 | stop_with_runtime: 36000s # Example with 60 seconds runtime 380 | workers: 3 381 | workers_share_buckets: False 382 | parallel_clients: 3 383 | clean_after: True 384 | `)}, &Testconf{ 385 | S3Config: []*S3Configuration{ 386 | { 387 | Endpoint: "http://10.9.8.72:80", 388 | AccessKey: "secretKey", 389 | SecretKey: "secretSecret", 390 | Region: "us-east-1", 391 | SkipSSLVerify: true, 392 | UsePathStyle: false, 393 | }, 394 | }, 395 | Tests: []*TestCaseConfiguration{ 396 | { 397 | Name: "clean-4k", 398 | DeleteWeight: 100, 399 | BucketPrefix: "gosbench-prefix-", 400 | ObjectPrefix: "obj-", 401 | Runtime: Duration(36000 * time.Second), 402 | OpsDeadline: 10, 403 | Workers: 3, 404 | WorkerShareBuckets: false, 405 | ParallelClients: 3, 406 | CleanAfter: true, 407 | Buckets: struct { 408 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 409 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 410 | NumberLast uint64 411 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 412 | }{ 413 | NumberMin: 1, 414 | NumberMax: 10, 415 | NumberDistribution: "sequential", 416 | }, 417 | Objects: struct { 418 | SizeMin uint64 `yaml:"size_min" json:"size_min"` 419 | SizeMax uint64 `yaml:"size_max" json:"size_max"` 420 | PartSize uint64 `yaml:"part_size" json:"part_size"` 421 | SizeLast uint64 422 | SizeDistribution string `yaml:"size_distribution" json:"size_distribution"` 423 | NumberMin uint64 `yaml:"number_min" json:"number_min"` 424 | NumberMax uint64 `yaml:"number_max" json:"number_max"` 425 | NumberLast uint64 426 | NumberDistribution string `yaml:"number_distribution" json:"number_distribution"` 427 | Unit string `yaml:"unit" json:"unit"` 428 | }{ 429 | SizeMin: 4, 430 | SizeMax: 4, 431 | SizeDistribution: "constant", 432 | Unit: "KB", 433 | NumberMin: 1, 434 | NumberMax: 100000, 435 | NumberDistribution: "sequential", 436 | }, 437 | }, 438 | }, 439 | }}, 440 | } 441 | for _, tt := range tests { 442 | t.Run(tt.name, func(t *testing.T) { 443 | defer func() { 444 | if r := recover(); r != nil { 445 | t.Log("Recovered in f", r) 446 | } 447 | }() 448 | ReadFile = read(tt.args.configFileContent) 449 | if got := LoadConfigFromFile("configFile.yaml"); !reflect.DeepEqual(got, tt.want) { 450 | t.Errorf("loadConfigFromFile() = %v, want %v", got, tt.want) 451 | } 452 | }) 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /examples/Bandwidth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mulbc/gosbench/f99526c96e3557355f5db38ab0f19a20ccbd795a/examples/Bandwidth.png -------------------------------------------------------------------------------- /examples/Gosbench_Dashboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mulbc/gosbench/f99526c96e3557355f5db38ab0f19a20ccbd795a/examples/Gosbench_Dashboard.jpg -------------------------------------------------------------------------------- /examples/Latency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mulbc/gosbench/f99526c96e3557355f5db38ab0f19a20ccbd795a/examples/Latency.png -------------------------------------------------------------------------------- /examples/example_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "s3_config": [ 3 | { 4 | "access_key": "abc", "secret_key": "as", "region": "eu-central-1", "endpoint": "https://my.rgw.endpoint:8080", 5 | "skipSSLverify": false, "usePathStyle": false 6 | }, 7 | { 8 | "access_key": "def", "secret_key": "as", "region": "eu-central-2", "endpoint": "https://my.rgw.endpoint:8080", 9 | "skipSSLverify": false, "usePathStyle": false 10 | }, 11 | { 12 | "access_key": "ghi", "secret_key": "as", "region": "eu-central-3", "endpoint": "https://my.rgw.endpoint:8080", 13 | "skipSSLverify": false, "usePathStyle": false 14 | } 15 | ], 16 | "grafana_config": { "endpoint": "http://grafana", "username": "admin", "password": "grafana" }, 17 | "tests": [ 18 | { "name": "My first example test", "read_weight": 20, "existing_read_weight": 0, "write_weight": 80, "delete_weight": 0, 19 | "list_weight": 0, "bucket_prefix": "1255gosbench-", "object_prefix": "obj", "stop_with_runtime": "1h30m", "stop_with_ops": 10, 20 | "workers": 2, "workers_share_buckets": true, "parallel_clients": 3, "clean_after": true, 21 | "objects": {"size_min": 5, "size_max": 100, "size_distribution": "random", "unit": "KB", 22 | "number_min": 10, "number_max": 10, "number_distribution": "constant" }, 23 | "buckets": { "number_min": 1, "number_max": 10, "number_distribution": "constant" } 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /examples/example_config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | s3_config: 4 | - access_key: abc 5 | secret_key: as 6 | region: eu-central-1 7 | endpoint: https://my.rgw.endpoint:8080 8 | skipSSLverify: false 9 | usePathStyle: false 10 | - access_key: def 11 | secret_key: as 12 | region: eu-central-2 13 | endpoint: https://my.rgw.endpoint:8080 14 | skipSSLverify: false 15 | usePathStyle: false 16 | - access_key: ghi 17 | secret_key: as 18 | region: eu-central-3 19 | endpoint: https://my.rgw.endpoint:8080 20 | skipSSLverify: false 21 | usePathStyle: false 22 | 23 | # For generating annotations when we start/stop testcases 24 | # https://grafana.com/docs/http_api/annotations/#create-annotation 25 | grafana_config: 26 | endpoint: http://grafana 27 | username: admin 28 | password: grafana 29 | 30 | tests: 31 | - name: My first example test 32 | read_weight: 20 33 | existing_read_weight: 0 34 | write_weight: 80 35 | delete_weight: 0 36 | list_weight: 0 37 | objects: 38 | size_min: 5 39 | size_max: 100 40 | part_size: 0 41 | # distribution: constant, random, sequential 42 | size_distribution: random 43 | unit: KB 44 | number_min: 10 45 | number_max: 10 46 | # distribution: constant, random, sequential 47 | number_distribution: constant 48 | buckets: 49 | number_min: 1 50 | number_max: 10 51 | # distribution: constant, random, sequential 52 | number_distribution: constant 53 | # Name prefix for buckets and objects 54 | bucket_prefix: 1255gosbench- 55 | object_prefix: obj 56 | # End after a set amount of time 57 | # Runtime in time.Duration - do not forget the unit please 58 | # stop_with_runtime: 60s # Example with 60 seconds runtime 59 | stop_with_runtime: 60 | # End after a set amount of operations (per worker) 61 | stop_with_ops: 10 62 | # Number of s3 performance test servers to run in parallel 63 | workers: 2 64 | # Set wheter workers share the same buckets or not 65 | # If set to True - bucket names will have the worker # appended 66 | workers_share_buckets: True 67 | # Number of requests processed in parallel by each worker 68 | parallel_clients: 3 69 | # Remove all generated buckets and its content after run 70 | clean_after: True 71 | 72 | ... 73 | -------------------------------------------------------------------------------- /examples/example_prom_exporter.log: -------------------------------------------------------------------------------- 1 | # HELP gosbench_downloaded_bytes Downloaded bytes from S3 store 2 | # TYPE gosbench_downloaded_bytes counter 3 | gosbench_downloaded_bytes{method="GET",testName="EverythingWorks"} 4.602024e+06 4 | # HELP gosbench_finished_ops Finished S3 operations 5 | # TYPE gosbench_finished_ops counter 6 | gosbench_finished_ops{method="GET",testName="EverythingWorks"} 97 7 | gosbench_finished_ops{method="PUT",testName="EverythingWorks"} 388 8 | # HELP gosbench_opencensus_io_http_client_completed_count Count of completed requests, by HTTP method and response status 9 | # TYPE gosbench_opencensus_io_http_client_completed_count counter 10 | gosbench_opencensus_io_http_client_completed_count{http_client_method="GET",http_client_status="206",version="0.0.1"} 97 11 | gosbench_opencensus_io_http_client_completed_count{http_client_method="PUT",http_client_status="200",version="0.0.1"} 388 12 | # HELP gosbench_opencensus_io_http_client_received_bytes Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status 13 | # TYPE gosbench_opencensus_io_http_client_received_bytes histogram 14 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1024"} 0 15 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2048"} 0 16 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4096"} 0 17 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="16384"} 15 18 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="65536"} 69 19 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="262144"} 97 20 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.048576e+06"} 97 21 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4.194304e+06"} 97 22 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.6777216e+07"} 97 23 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="6.7108864e+07"} 97 24 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2.68435456e+08"} 97 25 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.073741824e+09"} 97 26 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4.294967296e+09"} 97 27 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="+Inf"} 97 28 | gosbench_opencensus_io_http_client_received_bytes_sum{http_client_method="GET",http_client_status="206",version="0.0.1"} 4.602023999999999e+06 29 | gosbench_opencensus_io_http_client_received_bytes_count{http_client_method="GET",http_client_status="206",version="0.0.1"} 97 30 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1024"} 388 31 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2048"} 388 32 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4096"} 388 33 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="16384"} 388 34 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="65536"} 388 35 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="262144"} 388 36 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.048576e+06"} 388 37 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4.194304e+06"} 388 38 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.6777216e+07"} 388 39 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="6.7108864e+07"} 388 40 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2.68435456e+08"} 388 41 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.073741824e+09"} 388 42 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4.294967296e+09"} 388 43 | gosbench_opencensus_io_http_client_received_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="+Inf"} 388 44 | gosbench_opencensus_io_http_client_received_bytes_sum{http_client_method="PUT",http_client_status="200",version="0.0.1"} 0 45 | gosbench_opencensus_io_http_client_received_bytes_count{http_client_method="PUT",http_client_status="200",version="0.0.1"} 388 46 | # HELP gosbench_opencensus_io_http_client_roundtrip_latency End-to-end latency, by HTTP method and response status 47 | # TYPE gosbench_opencensus_io_http_client_roundtrip_latency histogram 48 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1"} 0 49 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2"} 0 50 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="3"} 0 51 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4"} 0 52 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="5"} 0 53 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="6"} 0 54 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="8"} 0 55 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="10"} 0 56 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="13"} 0 57 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="16"} 1 58 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="20"} 2 59 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="25"} 4 60 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="30"} 14 61 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="40"} 49 62 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="50"} 72 63 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="65"} 87 64 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="80"} 95 65 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="100"} 97 66 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="130"} 97 67 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="160"} 97 68 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="200"} 97 69 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="250"} 97 70 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="300"} 97 71 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="400"} 97 72 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="500"} 97 73 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="650"} 97 74 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="800"} 97 75 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1000"} 97 76 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2000"} 97 77 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="5000"} 97 78 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="10000"} 97 79 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="20000"} 97 80 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="50000"} 97 81 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="100000"} 97 82 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="+Inf"} 97 83 | gosbench_opencensus_io_http_client_roundtrip_latency_sum{http_client_method="GET",http_client_status="206",version="0.0.1"} 4194.719437999998 84 | gosbench_opencensus_io_http_client_roundtrip_latency_count{http_client_method="GET",http_client_status="206",version="0.0.1"} 97 85 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1"} 0 86 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2"} 0 87 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="3"} 0 88 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4"} 0 89 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="5"} 0 90 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="6"} 0 91 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="8"} 0 92 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="10"} 0 93 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="13"} 0 94 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="16"} 0 95 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="20"} 0 96 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="25"} 0 97 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="30"} 0 98 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="40"} 0 99 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="50"} 2 100 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="65"} 73 101 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="80"} 243 102 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="100"} 353 103 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="130"} 372 104 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="160"} 375 105 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="200"} 388 106 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="250"} 388 107 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="300"} 388 108 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="400"} 388 109 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="500"} 388 110 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="650"} 388 111 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="800"} 388 112 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1000"} 388 113 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2000"} 388 114 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="5000"} 388 115 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="10000"} 388 116 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="20000"} 388 117 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="50000"} 388 118 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="100000"} 388 119 | gosbench_opencensus_io_http_client_roundtrip_latency_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="+Inf"} 388 120 | gosbench_opencensus_io_http_client_roundtrip_latency_sum{http_client_method="PUT",http_client_status="200",version="0.0.1"} 30971.663867999967 121 | gosbench_opencensus_io_http_client_roundtrip_latency_count{http_client_method="PUT",http_client_status="200",version="0.0.1"} 388 122 | # HELP gosbench_opencensus_io_http_client_sent_bytes Total bytes sent in request body (not including headers), by HTTP method and response status 123 | # TYPE gosbench_opencensus_io_http_client_sent_bytes histogram 124 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1024"} 97 125 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2048"} 97 126 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4096"} 97 127 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="16384"} 97 128 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="65536"} 97 129 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="262144"} 97 130 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.048576e+06"} 97 131 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4.194304e+06"} 97 132 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.6777216e+07"} 97 133 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="6.7108864e+07"} 97 134 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="2.68435456e+08"} 97 135 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="1.073741824e+09"} 97 136 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="4.294967296e+09"} 97 137 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="GET",http_client_status="206",version="0.0.1",le="+Inf"} 97 138 | gosbench_opencensus_io_http_client_sent_bytes_sum{http_client_method="GET",http_client_status="206",version="0.0.1"} -97 139 | gosbench_opencensus_io_http_client_sent_bytes_count{http_client_method="GET",http_client_status="206",version="0.0.1"} 97 140 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1024"} 0 141 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2048"} 0 142 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4096"} 0 143 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="16384"} 44 144 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="65536"} 235 145 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="262144"} 388 146 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.048576e+06"} 388 147 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4.194304e+06"} 388 148 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.6777216e+07"} 388 149 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="6.7108864e+07"} 388 150 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="2.68435456e+08"} 388 151 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="1.073741824e+09"} 388 152 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="4.294967296e+09"} 388 153 | gosbench_opencensus_io_http_client_sent_bytes_bucket{http_client_method="PUT",http_client_status="200",version="0.0.1",le="+Inf"} 388 154 | gosbench_opencensus_io_http_client_sent_bytes_sum{http_client_method="PUT",http_client_status="200",version="0.0.1"} 2.1936214999999993e+07 155 | gosbench_opencensus_io_http_client_sent_bytes_count{http_client_method="PUT",http_client_status="200",version="0.0.1"} 388 156 | # HELP gosbench_ops_latency Histogram latency in milliseconds of S3 operations 157 | # TYPE gosbench_ops_latency histogram 158 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="2"} 0 159 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="4"} 0 160 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="8"} 0 161 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="16"} 1 162 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="32"} 20 163 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="64"} 87 164 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="128"} 97 165 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="256"} 97 166 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="512"} 97 167 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="1024"} 97 168 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="2048"} 97 169 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="4096"} 97 170 | gosbench_ops_latency_bucket{method="GET",testName="EverythingWorks",le="+Inf"} 97 171 | gosbench_ops_latency_sum{method="GET",testName="EverythingWorks"} 4176 172 | gosbench_ops_latency_count{method="GET",testName="EverythingWorks"} 97 173 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="2"} 0 174 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="4"} 0 175 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="8"} 0 176 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="16"} 0 177 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="32"} 0 178 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="64"} 68 179 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="128"} 372 180 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="256"} 388 181 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="512"} 388 182 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="1024"} 388 183 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="2048"} 388 184 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="4096"} 388 185 | gosbench_ops_latency_bucket{method="PUT",testName="EverythingWorks",le="+Inf"} 388 186 | gosbench_ops_latency_sum{method="PUT",testName="EverythingWorks"} 31062 187 | gosbench_ops_latency_count{method="PUT",testName="EverythingWorks"} 388 188 | # HELP gosbench_test_start Determines the start time of a job for Grafana annotations 189 | # TYPE gosbench_test_start gauge 190 | gosbench_test_start{testName="EverythingWorks"} 1.592227372426e+12 191 | # HELP gosbench_uploaded_bytes Uploaded bytes to S3 store 192 | # TYPE gosbench_uploaded_bytes counter 193 | gosbench_uploaded_bytes{method="PUT",testName="EverythingWorks"} 2.1936215e+07 -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mulbc/gosbench 2 | 3 | go 1.22 4 | 5 | toolchain go1.22.6 6 | 7 | require ( 8 | contrib.go.opencensus.io/exporter/prometheus v0.4.2 9 | github.com/aws/aws-sdk-go-v2/credentials v1.17.47 10 | github.com/prometheus/client_golang v1.20.5 11 | github.com/prometheus/client_model v0.6.1 12 | github.com/sirupsen/logrus v1.9.3 13 | go.opencensus.io v0.24.0 14 | gopkg.in/yaml.v3 v3.0.1 15 | ) 16 | 17 | require ( 18 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect 19 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect 20 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect 21 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect 22 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 23 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect 24 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect 25 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect 26 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect 27 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect 28 | github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect 29 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect 30 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect 31 | github.com/aws/smithy-go v1.22.1 // indirect 32 | github.com/klauspost/compress v1.17.11 // indirect 33 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 34 | ) 35 | 36 | require ( 37 | github.com/aws/aws-sdk-go-v2 v1.32.6 38 | github.com/aws/aws-sdk-go-v2/config v1.28.6 39 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.42 40 | github.com/aws/aws-sdk-go-v2/service/s3 v1.70.0 41 | github.com/beorn7/perks v1.0.1 // indirect 42 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 43 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect 44 | github.com/prometheus/common v0.60.1 // indirect 45 | github.com/prometheus/procfs v0.15.1 // indirect 46 | github.com/prometheus/statsd_exporter v0.28.0 // indirect 47 | golang.org/x/sys v0.27.0 // indirect 48 | google.golang.org/protobuf v1.35.2 // indirect 49 | gopkg.in/yaml.v2 v2.4.0 // indirect 50 | ) 51 | -------------------------------------------------------------------------------- /k8s/Readme.md: -------------------------------------------------------------------------------- 1 | # Running Gosbench in Kubernetes and Openshift 2 | 3 | The following commands will default to using the `oc` command, but you can just as well use `kubectl` if you are running this on Kubernetes. 4 | 5 | ## Preparing the deployment 6 | 7 | 1. Clone this repository 8 | 1. cd into the `k8s` folder 9 | 1. Open `gosbench.yaml` in your favorite editor 10 | 11 | In the very top of the gosbench.yaml file, you will find a ConfigMap, which represents the Gosbench config that will be used for the test. Modify this to your liking. 12 | If you just want to test things out, at least change the s3_config parameters so that Gosbench knows how to connect to your S3 endpoint. 13 | 14 | ## Deploying Gosbench 15 | 16 | Setting everything up is as easy as: 17 | 18 | **NOTE**: The yaml files do not have a namespace defined, be sure to `oc project ...` into the namespace you want to use. 19 | 20 | 1. Run: `oc apply -f monitoring.yaml` 21 | 1. Run: `oc apply -f gosbench.yaml` 22 | 1. Expose the Grafana service `oc expose svc/grafana` 23 | 1. Get the Grafana address: `oc get route grafana` 24 | 25 | **NOTE:** The Grafana credentials are `admin`:`admin`. 26 | 27 | When you now execute `oc get all` you should see something similar to this: 28 | 29 | ```bash 30 | $ oc get all 31 | NAME READY STATUS RESTARTS AGE 32 | pod/gosbench-server-74cbdfc774-2t6tv 0/1 ContainerCreating 0 16s 33 | pod/monitoring-9b98fb-nbqk6 2/2 Running 0 59s 34 | pod/worker1-55zgh 0/1 ContainerCreating 0 17s 35 | pod/worker2-vvfbs 0/1 ContainerCreating 0 17s 36 | 37 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 38 | service/gosbench-server NodePort 172.30.215.159 2000:30230/TCP 16s 39 | service/gosbench-worker1 NodePort 172.30.187.174 8888:30314/TCP 16s 40 | service/gosbench-worker2 NodePort 172.30.149.186 8888:31499/TCP 15s 41 | service/grafana NodePort 172.30.15.243 3000:30522/TCP 7m45s 42 | service/kubernetes ClusterIP 172.30.0.1 443/TCP 3d22h 43 | service/openshift ExternalName kubernetes.default.svc.cluster.local 3d22h 44 | service/prometheus NodePort 172.30.158.169 9090:30200/TCP 7m45s 45 | 46 | NAME READY UP-TO-DATE AVAILABLE AGE 47 | deployment.apps/gosbench-server 0/1 1 0 16s 48 | deployment.apps/monitoring 1/1 1 1 59s 49 | 50 | NAME DESIRED CURRENT READY AGE 51 | replicaset.apps/gosbench-server-74cbdfc774 1 1 0 17s 52 | replicaset.apps/monitoring-9b98fb 1 1 1 60s 53 | 54 | NAME COMPLETIONS DURATION AGE 55 | job.batch/worker1 0/1 18s 18s 56 | job.batch/worker2 0/1 18s 18s 57 | 58 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 59 | route.route.openshift.io/grafana grafana-default.apps.[...] grafana 3000 None 60 | ``` 61 | 62 | 63 | ### Fix workers that finished too early 64 | 65 | **NOTE:** in some rare cases, the workers are up, before the server is ready, thus the workers will exit and complain that the server is unreachable. 66 | 67 | You can check if this is the case in your envionment by executing `oc get job` - if it looks like this: 68 | 69 | ```bash 70 | $ oc get job 71 | NAME COMPLETIONS DURATION AGE 72 | worker1 0/1 14m 14m 73 | worker2 1/1 59s 11m 74 | ``` 75 | 76 | Then you are most likely affected 77 | 78 | **FIX**: To fix this, re-add the completed worker jobs: 79 | 80 | 1. Delete the worker `oc delete jobs -l app=gosbench-worker2` 81 | 1. Readd the worker `oc apply -f gosbench.yaml` 82 | 83 | ## Evaluating Gosbench runs 84 | 85 | While the Gosbench benchmark is running, it is best to watch all gosbench logs with a tool like [stern](https://github.com/wercker/stern). 86 | If you do not have stern, you can also only watch the server log, which provides the most valuable information: `oc logs deployment.apps/gosbench-server -f` 87 | 88 | A finished run will print something similar to this in the server log: 89 | 90 | ``` 91 | time="2020-05-29T12:02:01Z" level=info msg="Ready to accept connections" 92 | time="2020-05-29T12:02:02Z" level=info msg="10.130.2.41:44678 connected to us " 93 | time="2020-05-29T12:02:02Z" level=info msg="We found worker 1 / 2 for test 0" Worker="10.130.2.41:44678" 94 | time="2020-05-29T12:05:07Z" level=info msg="10.130.2.44:59886 connected to us " 95 | time="2020-05-29T12:05:07Z" level=info msg="We found worker 2 / 2 for test 0" Worker="10.130.2.44:59886" 96 | time="2020-05-29T12:05:15Z" level=info msg="All workers have finished preparations - starting performance test" test=0 97 | time="2020-05-29T12:05:28Z" level=info msg="10.130.2.44:60004 connected to us " 98 | time="2020-05-29T12:05:31Z" level=info msg="All workers have finished the performance test - continuing with next test" test=0 99 | time="2020-05-29T12:05:31Z" level=info msg="GRAFANA: ?from=1590753915508&to=1590753931123" test=0 100 | time="2020-05-29T12:05:31Z" level=info msg="All performance tests finished" 101 | time="2020-05-29T12:05:31Z" level=info msg="10.130.2.41:45958 connected to us " 102 | ``` 103 | 104 | Important for the evaluation is always the GRAFANA line for every test, because it contains the exact timestamps during which the test was executed. 105 | 106 | Now we head over to Grafana - first fetch the URL: 107 | 108 | ```bash 109 | $ oc get route grafana 110 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 111 | grafana grafana-default.apps.oc.example.com grafana 3000 None 112 | ``` 113 | 114 | In my example I would browse to http://grafana-default.apps.oc.example.com 115 | 116 | **NOTE:** The Grafana credentials are `admin`:`admin`. 117 | 118 | Once you are logged in, you will find a pre-existing Dashboard called Gosbench. Go there. 119 | 120 | Your URL will look similar to this: 121 | http://grafana-default.apps.oc.example.com/d/R67SuKSZk/gosbench?orgId=1&from=now-30m&to=now 122 | 123 | Now we will insert the timestamps we got from the test results to go to 124 | 125 | http://grafana-default.apps.oc.example.com/d/R67SuKSZk/gosbench?from=1590753915508&to=1590753931123 126 | 127 | This will load the exact time window when our test was executed and the results are interpreted correctly. 128 | -------------------------------------------------------------------------------- /k8s/gosbench.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | data: 5 | config.yml: |- 6 | s3_config: 7 | - access_key: 8MKI7yLHF2L5Z07rabTZ 8 | secret_key: Y7quDBT3az/emhiDOiQR18hZLKzFaKUjOU4YyPCP 9 | region: eu-central-1 10 | endpoint: https://172.30.196.58:443 11 | skipSSLverify: true 12 | usePathStyle: false 13 | 14 | # For generating annotations when we start/stop testcases 15 | # https://grafana.com/docs/http_api/annotations/#create-annotation 16 | grafana_config: 17 | endpoint: http://grafana 18 | username: admin 19 | password: grafana 20 | 21 | tests: 22 | - name: EverythingWorks 23 | read_weight: 20 24 | write_weight: 80 25 | delete_weight: 30 26 | list_weight: 10 27 | objects: 28 | size_min: 5 29 | size_max: 100 30 | part_size: 0 31 | # distribution: constant, random, sequential 32 | size_distribution: random 33 | unit: KB 34 | number_min: 100 35 | number_max: 100 36 | # distribution: constant, random, sequential 37 | number_distribution: constant 38 | buckets: 39 | number_min: 1 40 | number_max: 10 41 | # distribution: constant, random, sequential 42 | number_distribution: constant 43 | # Name prefix for buckets and objects 44 | bucket_prefix: gosbench1- 45 | object_prefix: obj 46 | # End after a set amount of time 47 | # Runtime in time.Duration - do not forget the unit please 48 | # stop_with_runtime: 60s # Example with 60 seconds runtime 49 | stop_with_runtime: 50 | # End after a set amount of operations (per worker) 51 | stop_with_ops: 3000 52 | # Number of s3 performance test servers to run in parallel 53 | workers: 2 54 | # Set wheter workers share the same buckets or not 55 | # If set to True - bucket names will have the worker # appended 56 | workers_share_buckets: True 57 | # Number of requests processed in parallel by each worker 58 | parallel_clients: 3 59 | # Remove all generated buckets and its content after run 60 | clean_after: False 61 | - name: EverythingWorksAgain 62 | read_weight: 10 63 | write_weight: 40 64 | delete_weight: 80 65 | list_weight: 10 66 | objects: 67 | size_min: 5 68 | size_max: 100 69 | part_size: 0 70 | # distribution: constant, random, sequential 71 | size_distribution: random 72 | unit: KB 73 | number_min: 100 74 | number_max: 100 75 | # distribution: constant, random, sequential 76 | number_distribution: constant 77 | buckets: 78 | number_min: 1 79 | number_max: 10 80 | # distribution: constant, random, sequential 81 | number_distribution: constant 82 | # Name prefix for buckets and objects 83 | bucket_prefix: gosbench1- 84 | object_prefix: obj 85 | # End after a set amount of time 86 | # Runtime in time.Duration - do not forget the unit please 87 | # stop_with_runtime: 60s # Example with 60 seconds runtime 88 | stop_with_runtime: 89 | # End after a set amount of operations (per worker) 90 | stop_with_ops: 3000 91 | # Number of s3 performance test servers to run in parallel 92 | workers: 2 93 | # Set wheter workers share the same buckets or not 94 | # If set to True - bucket names will have the worker # appended 95 | workers_share_buckets: True 96 | # Number of requests processed in parallel by each worker 97 | parallel_clients: 3 98 | # Remove all generated buckets and its content after run 99 | clean_after: False 100 | metadata: 101 | name: gosbench-config 102 | labels: 103 | app: gosbench-monitoring 104 | stack: gosbench 105 | --- 106 | apiVersion: batch/v1 107 | kind: Job 108 | metadata: 109 | name: gosbench-worker1 110 | labels: 111 | app: gosbench-worker1 112 | stack: gosbench 113 | type: worker 114 | spec: 115 | template: 116 | metadata: 117 | labels: 118 | app: gosbench-worker 119 | stack: gosbench 120 | type: worker 121 | spec: 122 | affinity: 123 | podAntiAffinity: 124 | preferredDuringSchedulingIgnoredDuringExecution: 125 | - weight: 10 126 | podAffinityTerm: 127 | labelSelector: 128 | matchExpressions: 129 | - key: type 130 | operator: In 131 | values: 132 | - worker 133 | topologyKey: kubernetes.io/hostname 134 | containers: 135 | - name: gosbench-worker 136 | image: quay.io/mulbc/gosbench-worker:latest 137 | imagePullPolicy: Always 138 | command: ['./main', '-d', '-s', 'gosbench-server:2000'] 139 | ports: 140 | - containerPort: 8888 141 | restartPolicy: Never 142 | --- 143 | apiVersion: batch/v1 144 | kind: Job 145 | metadata: 146 | name: gosbench-worker2 147 | labels: 148 | app: gosbench-worker2 149 | stack: gosbench 150 | type: worker 151 | spec: 152 | template: 153 | metadata: 154 | labels: 155 | app: gosbench-worker 156 | stack: gosbench 157 | type: worker 158 | spec: 159 | affinity: 160 | podAntiAffinity: 161 | preferredDuringSchedulingIgnoredDuringExecution: 162 | - weight: 10 163 | podAffinityTerm: 164 | labelSelector: 165 | matchExpressions: 166 | - key: type 167 | operator: In 168 | values: 169 | - worker 170 | topologyKey: kubernetes.io/hostname 171 | containers: 172 | - name: gosbench-worker 173 | image: quay.io/mulbc/gosbench-worker:latest 174 | imagePullPolicy: Always 175 | command: ['./main', '-s', 'gosbench-server:2000'] 176 | ports: 177 | - containerPort: 8888 178 | restartPolicy: Never 179 | --- 180 | apiVersion: apps/v1 181 | kind: Deployment 182 | metadata: 183 | name: gosbench-server 184 | labels: 185 | app: gosbench-server 186 | stack: gosbench 187 | type: server 188 | spec: 189 | replicas: 1 190 | selector: 191 | matchLabels: 192 | app: gosbench-server 193 | template: 194 | metadata: 195 | labels: 196 | app: gosbench-server 197 | stack: gosbench 198 | type: server 199 | spec: 200 | containers: 201 | - name: server 202 | image: quay.io/mulbc/gosbench-server:latest 203 | imagePullPolicy: Always 204 | command: ['./main', '-c', '/app/config/config.yml'] 205 | ports: 206 | - containerPort: 2000 207 | volumeMounts: 208 | - name: gosbench-config 209 | mountPath: /app/config 210 | volumes: 211 | - name: gosbench-config 212 | configMap: 213 | name: gosbench-config 214 | --- 215 | apiVersion: v1 216 | kind: Service 217 | metadata: 218 | name: gosbench-server 219 | labels: 220 | app: gosbench-server 221 | stack: gosbench 222 | spec: 223 | type: NodePort 224 | ports: 225 | - port: 2000 226 | targetPort: 2000 227 | selector: 228 | app: gosbench-server 229 | --- 230 | apiVersion: v1 231 | kind: Service 232 | metadata: 233 | name: gosbench-worker1 234 | labels: 235 | app: gosbench-worker1 236 | stack: gosbench 237 | spec: 238 | type: NodePort 239 | ports: 240 | - port: 8888 241 | targetPort: 8888 242 | selector: 243 | job-name: gosbench-worker1 244 | --- 245 | apiVersion: v1 246 | kind: Service 247 | metadata: 248 | name: gosbench-worker2 249 | labels: 250 | app: gosbench-worker2 251 | stack: gosbench 252 | spec: 253 | type: NodePort 254 | ports: 255 | - port: 8888 256 | targetPort: 8888 257 | selector: 258 | job-name: gosbench-worker2 259 | ... 260 | -------------------------------------------------------------------------------- /k8s/gosbench_template.yaml.j2: -------------------------------------------------------------------------------- 1 | # This is a Jinja template file with dynamic worker count. 2 | # You can generate a valid YAML config with this for example with j2cli https://github.com/kolypto/j2cli 3 | # NUMWORKERS=3 j2 k8s/gosbench_template.yaml.j2 > /tmp/gosbench.yaml 4 | --- 5 | apiVersion: v1 6 | kind: ConfigMap 7 | data: 8 | config.yml: |- 9 | s3_config: 10 | - access_key: 8MKI7yLHF2L5Z07rabTZ 11 | secret_key: Y7quDBT3az/emhiDOiQR18hZLKzFaKUjOU4YyPCP 12 | region: eu-central-1 13 | endpoint: https://172.30.196.58:443 14 | skipSSLverify: true 15 | usePathStyle: false 16 | 17 | # For generating annotations when we start/stop testcases 18 | # https://grafana.com/docs/http_api/annotations/#create-annotation 19 | grafana_config: 20 | endpoint: http://grafana 21 | username: admin 22 | password: grafana 23 | 24 | tests: 25 | - name: EverythingWorks 26 | read_weight: 20 27 | write_weight: 80 28 | delete_weight: 30 29 | list_weight: 10 30 | objects: 31 | size_min: 5 32 | size_max: 100 33 | part_size: 0 34 | # distribution: constant, random, sequential 35 | size_distribution: random 36 | unit: KB 37 | number_min: 100 38 | number_max: 100 39 | # distribution: constant, random, sequential 40 | number_distribution: constant 41 | buckets: 42 | number_min: 1 43 | number_max: 10 44 | # distribution: constant, random, sequential 45 | number_distribution: constant 46 | # Name prefix for buckets and objects 47 | bucket_prefix: gosbench1- 48 | object_prefix: obj 49 | # End after a set amount of time 50 | # Runtime in time.Duration - do not forget the unit please 51 | # stop_with_runtime: 60s # Example with 60 seconds runtime 52 | stop_with_runtime: 53 | # End after a set amount of operations (per worker) 54 | stop_with_ops: 3000 55 | # Number of s3 performance test servers to run in parallel 56 | workers: {{ NUMWORKERS }} 57 | # Set wheter workers share the same buckets or not 58 | # If set to True - bucket names will have the worker # appended 59 | workers_share_buckets: True 60 | # Number of requests processed in parallel by each worker 61 | parallel_clients: 3 62 | # Remove all generated buckets and its content after run 63 | clean_after: False 64 | - name: EverythingWorksAgain 65 | read_weight: 10 66 | write_weight: 40 67 | delete_weight: 80 68 | list_weight: 10 69 | objects: 70 | size_min: 5 71 | size_max: 100 72 | part_size: 0 73 | # distribution: constant, random, sequential 74 | size_distribution: random 75 | unit: KB 76 | number_min: 100 77 | number_max: 100 78 | # distribution: constant, random, sequential 79 | number_distribution: constant 80 | buckets: 81 | number_min: 1 82 | number_max: 10 83 | # distribution: constant, random, sequential 84 | number_distribution: constant 85 | # Name prefix for buckets and objects 86 | bucket_prefix: gosbench1- 87 | object_prefix: obj 88 | # End after a set amount of time 89 | # Runtime in time.Duration - do not forget the unit please 90 | # stop_with_runtime: 60s # Example with 60 seconds runtime 91 | stop_with_runtime: 92 | # End after a set amount of operations (per worker) 93 | stop_with_ops: 3000 94 | # Number of s3 performance test servers to run in parallel 95 | workers: {{ NUMWORKERS }} 96 | # Set wheter workers share the same buckets or not 97 | # If set to True - bucket names will have the worker # appended 98 | workers_share_buckets: True 99 | # Number of requests processed in parallel by each worker 100 | parallel_clients: 3 101 | # Remove all generated buckets and its content after run 102 | clean_after: False 103 | metadata: 104 | name: gosbench-config 105 | labels: 106 | stack: gosbench 107 | app: gosbench-monitoring 108 | --- 109 | apiVersion: apps/v1 110 | kind: Deployment 111 | metadata: 112 | name: gosbench-server 113 | labels: 114 | app: gosbench-server 115 | stack: gosbench 116 | type: server 117 | spec: 118 | replicas: 1 119 | selector: 120 | matchLabels: 121 | app: gosbench-server 122 | template: 123 | metadata: 124 | labels: 125 | app: gosbench-server 126 | stack: gosbench 127 | type: server 128 | spec: 129 | containers: 130 | - name: server 131 | image: quay.io/mulbc/gosbench-server:latest 132 | imagePullPolicy: Always 133 | command: ['./main', '-c', '/app/config/config.yml'] 134 | ports: 135 | - containerPort: 2000 136 | volumeMounts: 137 | - name: gosbench-config 138 | mountPath: /app/config 139 | volumes: 140 | - name: gosbench-config 141 | configMap: 142 | name: gosbench-config 143 | --- 144 | apiVersion: v1 145 | kind: Service 146 | metadata: 147 | name: gosbench-server 148 | labels: 149 | stack: gosbench 150 | app: gosbench-server 151 | spec: 152 | type: NodePort 153 | ports: 154 | - port: 2000 155 | targetPort: 2000 156 | selector: 157 | app: gosbench-server 158 | {% for worker in range(NUMWORKERS | int) %} 159 | --- 160 | apiVersion: batch/v1 161 | kind: Job 162 | metadata: 163 | name: gosbench-worker{{ worker+1 }} 164 | labels: 165 | app: gosbench-worker{{ worker+1 }} 166 | stack: gosbench 167 | type: worker 168 | spec: 169 | template: 170 | metadata: 171 | labels: 172 | app: gosbench-worker 173 | stack: gosbench 174 | type: worker 175 | spec: 176 | affinity: 177 | podAntiAffinity: 178 | preferredDuringSchedulingIgnoredDuringExecution: 179 | - weight: 10 180 | podAffinityTerm: 181 | labelSelector: 182 | matchExpressions: 183 | - key: type 184 | operator: In 185 | values: 186 | - worker 187 | topologyKey: kubernetes.io/hostname 188 | containers: 189 | - name: gosbench-worker 190 | image: quay.io/mulbc/gosbench-worker:latest 191 | imagePullPolicy: Always 192 | command: ['./main', '-s', 'gosbench-server:2000'] 193 | ports: 194 | - containerPort: 8888 195 | restartPolicy: Never 196 | --- 197 | apiVersion: v1 198 | kind: Service 199 | metadata: 200 | name: gosbench-worker{{ worker+1 }} 201 | labels: 202 | stack: gosbench 203 | app: gosbench-worker{{ worker+1 }} 204 | spec: 205 | type: NodePort 206 | ports: 207 | - port: 8888 208 | targetPort: 8888 209 | selector: 210 | job-name: gosbench-worker{{ worker+1 }} 211 | {% endfor %} 212 | ... 213 | -------------------------------------------------------------------------------- /server/.gitignore: -------------------------------------------------------------------------------- 1 | server 2 | server.test 3 | -------------------------------------------------------------------------------- /server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/csv" 5 | "encoding/json" 6 | "errors" 7 | "flag" 8 | "fmt" 9 | "math/rand" 10 | "net" 11 | "os" 12 | "time" 13 | 14 | "github.com/mulbc/gosbench/common" 15 | 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | func init() { 20 | log.SetFormatter(&log.TextFormatter{ 21 | FullTimestamp: true, 22 | }) 23 | rand.Seed(time.Now().UnixNano()) 24 | 25 | flag.StringVar(&configFileLocation, "c", "", "Config file describing test run") 26 | flag.IntVar(&serverPort, "p", 2000, "Port on which the server will be available for clients. Default: 2000") 27 | flag.BoolVar(&debug, "d", false, "enable debug log output") 28 | flag.BoolVar(&trace, "t", false, "enable trace log output") 29 | flag.Parse() 30 | // Only demand this flag if we are not running go test 31 | if configFileLocation == "" && flag.Lookup("test.v") == nil { 32 | log.Fatal("-c is a mandatory parameter - please specify the config file") 33 | } 34 | if debug { 35 | log.SetLevel(log.DebugLevel) 36 | } else if trace { 37 | log.SetLevel(log.TraceLevel) 38 | } else { 39 | log.SetLevel(log.InfoLevel) 40 | } 41 | } 42 | 43 | var configFileLocation string 44 | var serverPort int 45 | var readyWorkers chan *net.Conn 46 | var debug, trace bool 47 | 48 | func main() { 49 | config := common.LoadConfigFromFile(configFileLocation) 50 | common.CheckConfig(config) 51 | 52 | readyWorkers = make(chan *net.Conn) 53 | defer close(readyWorkers) 54 | 55 | // Listen on TCP port 2000 on all available unicast and 56 | // anycast IP addresses of the local system. 57 | l, err := net.Listen("tcp", fmt.Sprintf(":%d", serverPort)) 58 | if err != nil { 59 | log.WithError(err).Fatal("Could not open port!") 60 | } 61 | defer l.Close() 62 | log.Info("Ready to accept connections") 63 | go scheduleTests(config) 64 | for { 65 | // Wait for a connection. 66 | conn, err := l.Accept() 67 | if err != nil { 68 | log.WithError(err).Fatal("Issue when waiting for connection of clients") 69 | } 70 | // Handle the connection in a new goroutine. 71 | // The loop then returns to accepting, so that 72 | // multiple connections may be served concurrently. 73 | go func(c *net.Conn) { 74 | log.Infof("%s connected to us ", (*c).RemoteAddr()) 75 | decoder := json.NewDecoder(*c) 76 | var message string 77 | err := decoder.Decode(&message) 78 | if err != nil { 79 | log.WithField("message", message).WithError(err).Error("Could not decode message, closing connection") 80 | (*c).Close() 81 | return 82 | } 83 | if message == "ready for work" { 84 | log.Debug("We have a new worker!") 85 | readyWorkers <- c 86 | return 87 | } 88 | }(&conn) 89 | // Shut down the connection. 90 | // defer conn.Close() 91 | } 92 | } 93 | 94 | func scheduleTests(config *common.Testconf) { 95 | 96 | for testNumber, test := range config.Tests { 97 | 98 | doneChannel := make(chan bool, test.Workers) 99 | resultChannel := make(chan common.BenchmarkResult, test.Workers) 100 | continueWorkers := make(chan bool, test.Workers) 101 | defer close(doneChannel) 102 | defer close(continueWorkers) 103 | 104 | for worker := 0; worker < test.Workers; worker++ { 105 | workerConfig := &common.WorkerConf{ 106 | Test: test, 107 | S3Config: config.S3Config[worker%len(config.S3Config)], 108 | WorkerID: fmt.Sprintf("w%d", worker), 109 | } 110 | workerConnection := <-readyWorkers 111 | log.WithField("Worker", (*workerConnection).RemoteAddr()).Infof("We found worker %d / %d for test %d", worker+1, test.Workers, testNumber) 112 | go executeTestOnWorker(workerConnection, workerConfig, doneChannel, continueWorkers, resultChannel) 113 | } 114 | for worker := 0; worker < test.Workers; worker++ { 115 | // Will halt until all workers are done with preparations 116 | <-doneChannel 117 | } 118 | // Add sleep after prep phase so that drives can relax 119 | time.Sleep(5 * time.Second) 120 | log.WithField("test", test.Name).Info("All workers have finished preparations - starting performance test") 121 | startTime := time.Now().UTC() 122 | for worker := 0; worker < test.Workers; worker++ { 123 | continueWorkers <- true 124 | } 125 | var benchResults []common.BenchmarkResult 126 | for worker := 0; worker < test.Workers; worker++ { 127 | // Will halt until all workers are done with their work 128 | <-doneChannel 129 | benchResults = append(benchResults, <-resultChannel) 130 | } 131 | log.WithField("test", test.Name).Info("All workers have finished the performance test - continuing with next test") 132 | stopTime := time.Now().UTC() 133 | log.WithField("test", test.Name).Infof("GRAFANA: ?from=%d&to=%d", startTime.UnixNano()/int64(1000000), stopTime.UnixNano()/int64(1000000)) 134 | benchResult := sumBenchmarkResults(benchResults) 135 | benchResult.Duration = stopTime.Sub(startTime) 136 | log.WithField("test", test.Name). 137 | WithField("Total Operations", benchResult.Operations). 138 | WithField("Total Bytes", benchResult.Bytes). 139 | WithField("Average BW in Byte/s", benchResult.Bandwidth). 140 | WithField("Average latency in ms", benchResult.LatencyAvg). 141 | WithField("Test runtime on server", benchResult.Duration). 142 | Infof("PERF RESULTS") 143 | writeResultToCSV(benchResult) 144 | } 145 | log.Info("All performance tests finished") 146 | for { 147 | workerConnection := <-readyWorkers 148 | shutdownWorker(workerConnection) 149 | } 150 | } 151 | 152 | func executeTestOnWorker(conn *net.Conn, config *common.WorkerConf, doneChannel chan bool, continueWorkers chan bool, resultChannel chan common.BenchmarkResult) { 153 | encoder := json.NewEncoder(*conn) 154 | decoder := json.NewDecoder(*conn) 155 | _ = encoder.Encode(common.WorkerMessage{Message: "init", Config: config}) 156 | 157 | var response common.WorkerMessage 158 | for { 159 | err := decoder.Decode(&response) 160 | if err != nil { 161 | log.WithField("worker", config.WorkerID).WithField("message", response).WithError(err).Error("Worker responded unusually - dropping") 162 | (*conn).Close() 163 | return 164 | } 165 | log.Tracef("Response: %+v", response) 166 | switch response.Message { 167 | case "preparations done": 168 | doneChannel <- true 169 | <-continueWorkers 170 | _ = encoder.Encode(common.WorkerMessage{Message: "start work"}) 171 | case "work done": 172 | doneChannel <- true 173 | resultChannel <- response.BenchResult 174 | (*conn).Close() 175 | return 176 | } 177 | } 178 | } 179 | 180 | func shutdownWorker(conn *net.Conn) { 181 | encoder := json.NewEncoder(*conn) 182 | log.WithField("Worker", (*conn).RemoteAddr()).Info("Shutting down worker") 183 | _ = encoder.Encode(common.WorkerMessage{Message: "shutdown"}) 184 | } 185 | 186 | func sumBenchmarkResults(results []common.BenchmarkResult) common.BenchmarkResult { 187 | sum := common.BenchmarkResult{} 188 | bandwidthAverages := float64(0) 189 | latencyAverages := float64(0) 190 | for _, result := range results { 191 | sum.Bytes += result.Bytes 192 | sum.Operations += result.Operations 193 | latencyAverages += result.LatencyAvg 194 | bandwidthAverages += result.Bandwidth 195 | } 196 | sum.LatencyAvg = latencyAverages / float64(len(results)) 197 | sum.TestName = results[0].TestName 198 | sum.Bandwidth = bandwidthAverages 199 | return sum 200 | } 201 | 202 | func writeResultToCSV(benchResult common.BenchmarkResult) { 203 | file, created, err := getCSVFileHandle() 204 | if err != nil { 205 | log.WithError(err).Error("Could not get a file handle for the CSV results") 206 | return 207 | } 208 | defer file.Close() 209 | 210 | csvwriter := csv.NewWriter(file) 211 | 212 | if created { 213 | err = csvwriter.Write([]string{ 214 | "testName", 215 | "Total Operations", 216 | "Total Bytes", 217 | "Average Bandwidth in Bytes/s", 218 | "Average Latency in ms", 219 | "Test duration seen by server in seconds", 220 | }) 221 | if err != nil { 222 | log.WithError(err).Error("Failed writing line to results csv") 223 | return 224 | } 225 | } 226 | 227 | err = csvwriter.Write([]string{ 228 | benchResult.TestName, 229 | fmt.Sprintf("%.0f", benchResult.Operations), 230 | fmt.Sprintf("%.0f", benchResult.Bytes), 231 | fmt.Sprintf("%f", benchResult.Bandwidth), 232 | fmt.Sprintf("%f", benchResult.LatencyAvg), 233 | fmt.Sprintf("%f", benchResult.Duration.Seconds()), 234 | }) 235 | if err != nil { 236 | log.WithError(err).Error("Failed writing line to results csv") 237 | return 238 | } 239 | 240 | csvwriter.Flush() 241 | 242 | } 243 | 244 | func getCSVFileHandle() (*os.File, bool, error) { 245 | file, err := os.OpenFile("gosbench_results.csv", os.O_APPEND|os.O_WRONLY, 0755) 246 | if err == nil { 247 | return file, false, nil 248 | } 249 | file, err = os.OpenFile("/tmp/gosbench_results.csv", os.O_APPEND|os.O_WRONLY, 0755) 250 | if err == nil { 251 | return file, false, nil 252 | } 253 | 254 | file, err = os.OpenFile("gosbench_results.csv", os.O_WRONLY|os.O_CREATE, 0755) 255 | if err == nil { 256 | return file, true, nil 257 | } 258 | file, err = os.OpenFile("/tmp/gosbench_results.csv", os.O_WRONLY|os.O_CREATE, 0755) 259 | if err == nil { 260 | return file, true, nil 261 | } 262 | 263 | return nil, false, errors.New("Could not find previous CSV for appending and could not write new CSV file to current dir and /tmp/ giving up") 264 | 265 | } 266 | -------------------------------------------------------------------------------- /worker/.gitignore: -------------------------------------------------------------------------------- 1 | worker 2 | -------------------------------------------------------------------------------- /worker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "math/rand" 9 | "net" 10 | "os" 11 | "runtime" 12 | "sync" 13 | "time" 14 | 15 | "github.com/aws/aws-sdk-go-v2/service/s3/types" 16 | "github.com/mulbc/gosbench/common" 17 | log "github.com/sirupsen/logrus" 18 | ) 19 | 20 | var config common.WorkerConf 21 | var prometheusPort int 22 | var debug, trace bool 23 | 24 | func init() { 25 | runtime.GOMAXPROCS(runtime.NumCPU()) 26 | log.SetFormatter(&log.TextFormatter{ 27 | FullTimestamp: true, 28 | }) 29 | rand.Seed(time.Now().UnixNano()) 30 | } 31 | 32 | func main() { 33 | var serverAddress string 34 | flag.StringVar(&serverAddress, "s", "", "Gosbench Server IP and Port in the form '192.168.1.1:2000'") 35 | flag.IntVar(&prometheusPort, "p", 8888, "Port on which the Prometheus Exporter will be available. Default: 8888") 36 | flag.BoolVar(&debug, "d", false, "enable debug log output") 37 | flag.BoolVar(&trace, "t", false, "enable trace log output") 38 | flag.Parse() 39 | if serverAddress == "" { 40 | log.Fatal("-s is a mandatory parameter - please specify the server IP and Port") 41 | } 42 | 43 | if debug { 44 | log.SetLevel(log.DebugLevel) 45 | } else if trace { 46 | log.SetLevel(log.TraceLevel) 47 | } else { 48 | log.SetLevel(log.InfoLevel) 49 | } 50 | 51 | for { 52 | err := connectToServer(serverAddress) 53 | if err != nil { 54 | log.WithError(err).Error("Issues with server connection") 55 | time.Sleep(time.Second) 56 | } 57 | } 58 | } 59 | 60 | func connectToServer(serverAddress string) error { 61 | conn, err := net.Dial("tcp", serverAddress) 62 | if err != nil { 63 | // return errors.New("Could not establish connection to server yet") 64 | return err 65 | } 66 | encoder := json.NewEncoder(conn) 67 | decoder := json.NewDecoder(conn) 68 | 69 | _ = encoder.Encode("ready for work") 70 | 71 | var response common.WorkerMessage 72 | Workqueue := &Workqueue{ 73 | Queue: &[]WorkItem{}, 74 | } 75 | for { 76 | err := decoder.Decode(&response) 77 | if err != nil { 78 | log.WithField("message", response).WithError(err).Error("Server responded unusually - reconnecting") 79 | conn.Close() 80 | return errors.New("Issue when receiving work from server") 81 | } 82 | log.Tracef("Response: %+v", response) 83 | switch response.Message { 84 | case "init": 85 | config = *response.Config 86 | log.Info("Got config from server - starting preparations now") 87 | 88 | InitS3(*config.S3Config) 89 | fillWorkqueue(config.Test, Workqueue, config.WorkerID, config.Test.WorkerShareBuckets) 90 | 91 | for _, work := range *Workqueue.Queue { 92 | err = work.Prepare() 93 | if err != nil { 94 | log.WithError(err).Error("Error during work preparation - ignoring") 95 | } 96 | } 97 | log.Info("Preparations finished - waiting on server to start work") 98 | _ = encoder.Encode(common.WorkerMessage{Message: "preparations done"}) 99 | case "start work": 100 | if config == (common.WorkerConf{}) || len(*Workqueue.Queue) == 0 { 101 | log.Fatal("Was instructed to start work - but the preparation step is incomplete - reconnecting") 102 | return nil 103 | } 104 | log.Info("Starting to work") 105 | duration := PerfTest(config.Test, Workqueue, config.WorkerID) 106 | benchResults := getCurrentPromValues(config.Test.Name) 107 | benchResults.Duration = duration 108 | benchResults.Bandwidth = benchResults.Bytes / duration.Seconds() 109 | log.Infof("PROM VALUES %+v", benchResults) 110 | _ = encoder.Encode(common.WorkerMessage{Message: "work done", BenchResult: benchResults}) 111 | // Work is done - return to being a ready worker by reconnecting 112 | return nil 113 | case "shutdown": 114 | log.Info("Server told us to shut down - all work is done for today") 115 | os.Exit(0) 116 | } 117 | } 118 | } 119 | 120 | // PerfTest runs a performance test as configured in testConfig 121 | func PerfTest(testConfig *common.TestCaseConfiguration, Workqueue *Workqueue, workerID string) time.Duration { 122 | workChannel := make(chan WorkItem, len(*Workqueue.Queue)) 123 | notifyChan := make(chan struct{}) 124 | wg := &sync.WaitGroup{} 125 | wg.Add(testConfig.ParallelClients) 126 | 127 | startTime := time.Now().UTC() 128 | promTestStart.WithLabelValues(testConfig.Name).Set(float64(startTime.UnixNano() / int64(1000000))) 129 | // promTestGauge.WithLabelValues(testConfig.Name).Inc() 130 | for worker := 0; worker < testConfig.ParallelClients; worker++ { 131 | go DoWork(workChannel, notifyChan, wg) 132 | } 133 | log.Infof("Started %d parallel clients", testConfig.ParallelClients) 134 | if testConfig.Runtime != 0 { 135 | workUntilTimeout(Workqueue, workChannel, notifyChan, time.Duration(testConfig.Runtime)) 136 | } else { 137 | workUntilOps(Workqueue, workChannel, testConfig.OpsDeadline, testConfig.ParallelClients) 138 | } 139 | // Wait for all the goroutines to finish 140 | wg.Wait() 141 | log.Info("All clients finished") 142 | endTime := time.Now().UTC() 143 | promTestEnd.WithLabelValues(testConfig.Name).Set(float64(endTime.UnixNano() / int64(1000000))) 144 | 145 | if testConfig.CleanAfter { 146 | log.Info("Housekeeping started") 147 | for _, work := range *Workqueue.Queue { 148 | err := work.Clean() 149 | if err != nil { 150 | log.WithError(err).Error("Error during cleanup - ignoring") 151 | } 152 | } 153 | for bucket := uint64(0); bucket < testConfig.Buckets.NumberMax; bucket++ { 154 | err := deleteBucket(housekeepingSvc, fmt.Sprintf("%s%s%d", workerID, testConfig.BucketPrefix, bucket)) 155 | if err != nil { 156 | log.WithError(err).Error("Error during bucket deleting - ignoring") 157 | } 158 | } 159 | log.Info("Housekeeping finished") 160 | } 161 | // Sleep to ensure Prometheus can still scrape the last information before we restart the worker 162 | time.Sleep(10 * time.Second) 163 | return endTime.Sub(startTime) 164 | } 165 | 166 | func workUntilTimeout(Workqueue *Workqueue, workChannel chan WorkItem, notifyChan chan<- struct{}, runtime time.Duration) { 167 | timer := time.NewTimer(runtime) 168 | for { 169 | for _, work := range *Workqueue.Queue { 170 | select { 171 | case <-timer.C: 172 | log.Debug("Reached Runtime end") 173 | close(notifyChan) 174 | return 175 | case workChannel <- work: 176 | } 177 | } 178 | for _, work := range *Workqueue.Queue { 179 | switch work.(type) { 180 | case *DeleteOperation: 181 | log.Debug("Re-Running Work preparation for delete job started") 182 | err := work.Prepare() 183 | if err != nil { 184 | log.WithError(err).Error("Error during work preparation - ignoring") 185 | } 186 | log.Debug("Delete preparation re-run finished") 187 | } 188 | } 189 | } 190 | } 191 | 192 | func workUntilOps(Workqueue *Workqueue, workChannel chan WorkItem, maxOps uint64, numberOfWorker int) { 193 | currentOps := uint64(0) 194 | for { 195 | for _, work := range *Workqueue.Queue { 196 | if currentOps >= maxOps { 197 | log.Debug("Reached OpsDeadline ... waiting for workers to finish") 198 | for worker := 0; worker < numberOfWorker; worker++ { 199 | workChannel <- &Stopper{} 200 | } 201 | return 202 | } 203 | currentOps++ 204 | workChannel <- work 205 | } 206 | for _, work := range *Workqueue.Queue { 207 | switch work.(type) { 208 | case *DeleteOperation: 209 | log.Debug("Re-Running Work preparation for delete job started") 210 | err := work.Prepare() 211 | if err != nil { 212 | log.WithError(err).Error("Error during work preparation - ignoring") 213 | } 214 | log.Debug("Delete preparation re-run finished") 215 | } 216 | } 217 | } 218 | } 219 | 220 | func fillWorkqueue(testConfig *common.TestCaseConfiguration, Workqueue *Workqueue, workerID string, shareBucketName bool) { 221 | 222 | if testConfig.ReadWeight > 0 { 223 | Workqueue.OperationValues = append(Workqueue.OperationValues, KV{Key: "read"}) 224 | } 225 | if testConfig.ExistingReadWeight > 0 { 226 | Workqueue.OperationValues = append(Workqueue.OperationValues, KV{Key: "existing_read"}) 227 | } 228 | if testConfig.WriteWeight > 0 { 229 | Workqueue.OperationValues = append(Workqueue.OperationValues, KV{Key: "write"}) 230 | } 231 | if testConfig.ListWeight > 0 { 232 | Workqueue.OperationValues = append(Workqueue.OperationValues, KV{Key: "list"}) 233 | } 234 | if testConfig.DeleteWeight > 0 { 235 | Workqueue.OperationValues = append(Workqueue.OperationValues, KV{Key: "delete"}) 236 | } 237 | 238 | bucketCount := common.EvaluateDistribution(testConfig.Buckets.NumberMin, testConfig.Buckets.NumberMax, &testConfig.Buckets.NumberLast, 1, testConfig.Buckets.NumberDistribution) 239 | for bucket := uint64(0); bucket < bucketCount; bucket++ { 240 | bucketName := fmt.Sprintf("%s%s%d", workerID, testConfig.BucketPrefix, bucket) 241 | if shareBucketName { 242 | bucketName = fmt.Sprintf("%s%d", testConfig.BucketPrefix, bucket) 243 | } 244 | err := createBucket(housekeepingSvc, bucketName) 245 | if err != nil { 246 | log.WithError(err).WithField("bucket", bucketName).Error("Error when creating bucket") 247 | } 248 | var preExistingObjects []types.Object 249 | var preExistingObjectCount uint64 250 | if testConfig.ExistingReadWeight > 0 { 251 | preExistingObjects, err = listObjects(housekeepingSvc, "", bucketName) 252 | if err != nil { 253 | log.WithError(err).Fatalf("Problems when listing contents of bucket %s", bucketName) 254 | } 255 | preExistingObjectCount = uint64(len(preExistingObjects)) 256 | log.Debugf("Found %d objects in bucket %s", preExistingObjectCount, bucketName) 257 | 258 | if preExistingObjectCount <= 0 { 259 | log.Warningf("There is no objects in bucket %s", bucketName) 260 | continue 261 | } 262 | } 263 | objectCount := common.EvaluateDistribution(testConfig.Objects.NumberMin, testConfig.Objects.NumberMax, &testConfig.Objects.NumberLast, 1, testConfig.Objects.NumberDistribution) 264 | for object := uint64(0); object < objectCount; object++ { 265 | objectSize := common.EvaluateDistribution(testConfig.Objects.SizeMin, testConfig.Objects.SizeMax, &testConfig.Objects.SizeLast, 1, testConfig.Objects.SizeDistribution) 266 | 267 | nextOp := GetNextOperation(Workqueue) 268 | switch nextOp { 269 | case "read": 270 | err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ReadWeight), Workqueue) 271 | if err != nil { 272 | log.WithError(err).Error("Could not increase operational Value - ignoring") 273 | } 274 | new := &ReadOperation{ 275 | TestName: testConfig.Name, 276 | Bucket: bucketName, 277 | ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object), 278 | ObjectSize: objectSize, 279 | WorksOnPreexistingObject: false, 280 | } 281 | *Workqueue.Queue = append(*Workqueue.Queue, new) 282 | case "existing_read": 283 | err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ExistingReadWeight), Workqueue) 284 | if err != nil { 285 | log.WithError(err).Error("Could not increase operational Value - ignoring") 286 | } 287 | new := &ReadOperation{ 288 | TestName: testConfig.Name, 289 | Bucket: bucketName, 290 | ObjectName: *preExistingObjects[object%preExistingObjectCount].Key, 291 | ObjectSize: uint64(*preExistingObjects[object%preExistingObjectCount].Size), 292 | WorksOnPreexistingObject: true, 293 | } 294 | *Workqueue.Queue = append(*Workqueue.Queue, new) 295 | case "write": 296 | err := IncreaseOperationValue(nextOp, 1/float64(testConfig.WriteWeight), Workqueue) 297 | if err != nil { 298 | log.WithError(err).Error("Could not increase operational Value - ignoring") 299 | } 300 | new := &WriteOperation{ 301 | TestName: testConfig.Name, 302 | Bucket: bucketName, 303 | ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object), 304 | ObjectSize: objectSize, 305 | } 306 | *Workqueue.Queue = append(*Workqueue.Queue, new) 307 | case "list": 308 | err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ListWeight), Workqueue) 309 | if err != nil { 310 | log.WithError(err).Error("Could not increase operational Value - ignoring") 311 | } 312 | new := &ListOperation{ 313 | TestName: testConfig.Name, 314 | Bucket: bucketName, 315 | ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object), 316 | ObjectSize: objectSize, 317 | } 318 | *Workqueue.Queue = append(*Workqueue.Queue, new) 319 | case "delete": 320 | err := IncreaseOperationValue(nextOp, 1/float64(testConfig.DeleteWeight), Workqueue) 321 | if err != nil { 322 | log.WithError(err).Error("Could not increase operational Value - ignoring") 323 | } 324 | new := &DeleteOperation{ 325 | TestName: testConfig.Name, 326 | Bucket: bucketName, 327 | ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object), 328 | ObjectSize: objectSize, 329 | } 330 | *Workqueue.Queue = append(*Workqueue.Queue, new) 331 | } 332 | } 333 | } 334 | } 335 | -------------------------------------------------------------------------------- /worker/prometheus.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "contrib.go.opencensus.io/exporter/prometheus" 5 | "github.com/mulbc/gosbench/common" 6 | prom "github.com/prometheus/client_golang/prometheus" 7 | promModel "github.com/prometheus/client_model/go" 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | var pe *prometheus.Exporter 12 | var promRegistry = prom.NewRegistry() 13 | var promTestStart = prom.NewGaugeVec( 14 | prom.GaugeOpts{ 15 | Name: "test_start", 16 | Namespace: "gosbench", 17 | Help: "Determines the start time of a job for Grafana annotations", 18 | }, []string{"testName"}) 19 | var promTestEnd = prom.NewGaugeVec( 20 | prom.GaugeOpts{ 21 | Name: "test_end", 22 | Namespace: "gosbench", 23 | Help: "Determines the end time of a job for Grafana annotations", 24 | }, []string{"testName"}) 25 | var promFinishedOps = prom.NewCounterVec( 26 | prom.CounterOpts{ 27 | Name: "finished_ops", 28 | Namespace: "gosbench", 29 | Help: "Finished S3 operations", 30 | }, []string{"testName", "method"}) 31 | var promFailedOps = prom.NewCounterVec( 32 | prom.CounterOpts{ 33 | Name: "failed_ops", 34 | Namespace: "gosbench", 35 | Help: "Failed S3 operations", 36 | }, []string{"testName", "method"}) 37 | var promLatency = prom.NewHistogramVec( 38 | prom.HistogramOpts{ 39 | Name: "ops_latency", 40 | Namespace: "gosbench", 41 | Help: "Histogram latency of S3 operations", 42 | Buckets: prom.ExponentialBuckets(2, 2, 12), 43 | }, []string{"testName", "method"}) 44 | var promUploadedBytes = prom.NewCounterVec( 45 | prom.CounterOpts{ 46 | Name: "uploaded_bytes", 47 | Namespace: "gosbench", 48 | Help: "Uploaded bytes to S3 store", 49 | }, []string{"testName", "method"}) 50 | var promDownloadedBytes = prom.NewCounterVec( 51 | prom.CounterOpts{ 52 | Name: "downloaded_bytes", 53 | Namespace: "gosbench", 54 | Help: "Downloaded bytes from S3 store", 55 | }, []string{"testName", "method"}) 56 | 57 | func init() { 58 | // Then create the prometheus stat exporter 59 | var err error 60 | pe, err = prometheus.NewExporter(prometheus.Options{ 61 | Namespace: "gosbench", 62 | ConstLabels: map[string]string{ 63 | "version": "0.0.1", 64 | }, 65 | Registry: promRegistry, 66 | }) 67 | if err != nil { 68 | log.WithError(err).Fatalf("Failed to create the Prometheus exporter:") 69 | } 70 | 71 | if err = promRegistry.Register(promTestStart); err != nil { 72 | log.WithError(err).Error("Issues when adding test_start gauge to Prometheus registry") 73 | } 74 | if err = promRegistry.Register(promTestEnd); err != nil { 75 | log.WithError(err).Error("Issues when adding test_end gauge to Prometheus registry") 76 | } 77 | if err = promRegistry.Register(promFinishedOps); err != nil { 78 | log.WithError(err).Error("Issues when adding finished_ops gauge to Prometheus registry") 79 | } 80 | if err = promRegistry.Register(promFailedOps); err != nil { 81 | log.WithError(err).Error("Issues when adding failed_ops gauge to Prometheus registry") 82 | } 83 | if err = promRegistry.Register(promLatency); err != nil { 84 | log.WithError(err).Error("Issues when adding ops_latency gauge to Prometheus registry") 85 | } 86 | if err = promRegistry.Register(promUploadedBytes); err != nil { 87 | log.WithError(err).Error("Issues when adding uploaded_bytes gauge to Prometheus registry") 88 | } 89 | if err = promRegistry.Register(promDownloadedBytes); err != nil { 90 | log.WithError(err).Error("Issues when adding downloaded_bytes gauge to Prometheus registry") 91 | } 92 | } 93 | 94 | func getCurrentPromValues(testName string) common.BenchmarkResult { 95 | benchResult := common.BenchmarkResult{ 96 | TestName: testName, 97 | } 98 | result, err := promRegistry.Gather() 99 | if err != nil { 100 | log.WithError(err).Error("ERROR during PROM VALUE gathering") 101 | } 102 | resultmap := map[string][]*promModel.Metric{} 103 | for _, metric := range result { 104 | resultmap[*metric.Name] = metric.Metric 105 | } 106 | benchResult.Operations = sumCounterForTest(resultmap["gosbench_finished_ops"], testName) 107 | benchResult.Bytes = sumCounterForTest(resultmap["gosbench_uploaded_bytes"], testName) + sumCounterForTest(resultmap["gosbench_downloaded_bytes"], testName) 108 | benchResult.LatencyAvg = averageHistogramForTest(resultmap["gosbench_ops_latency"], testName) 109 | return benchResult 110 | } 111 | 112 | func sumCounterForTest(metrics []*promModel.Metric, testName string) float64 { 113 | sum := float64(0) 114 | for _, metric := range metrics { 115 | for _, label := range metric.Label { 116 | if *label.Name == "testName" && *label.Value == testName { 117 | sum += *metric.Counter.Value 118 | } 119 | } 120 | } 121 | return sum 122 | } 123 | 124 | func averageHistogramForTest(metrics []*promModel.Metric, testName string) float64 { 125 | sum := float64(0) 126 | count := float64(0) 127 | for _, metric := range metrics { 128 | for _, label := range metric.Label { 129 | if *label.Name == "testName" && *label.Value == testName { 130 | sum += *metric.Histogram.SampleSum 131 | count += float64(*metric.Histogram.SampleCount) 132 | } 133 | } 134 | } 135 | return sum / count 136 | } 137 | -------------------------------------------------------------------------------- /worker/s3.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | 11 | "github.com/aws/aws-sdk-go-v2/aws" 12 | s3config "github.com/aws/aws-sdk-go-v2/config" 13 | "github.com/aws/aws-sdk-go-v2/credentials" 14 | "github.com/aws/aws-sdk-go-v2/feature/s3/manager" 15 | "github.com/aws/aws-sdk-go-v2/service/s3" 16 | "github.com/aws/aws-sdk-go-v2/service/s3/types" 17 | log "github.com/sirupsen/logrus" 18 | 19 | "github.com/mulbc/gosbench/common" 20 | "go.opencensus.io/plugin/ochttp" 21 | "go.opencensus.io/stats/view" 22 | ) 23 | 24 | var svc, housekeepingSvc *s3.Client 25 | var ctx context.Context 26 | var hc *http.Client 27 | 28 | func init() { 29 | if err := view.Register([]*view.View{ 30 | ochttp.ClientSentBytesDistribution, 31 | ochttp.ClientReceivedBytesDistribution, 32 | ochttp.ClientRoundtripLatencyDistribution, 33 | ochttp.ClientCompletedCount, 34 | }...); err != nil { 35 | log.WithError(err).Fatalf("Failed to register HTTP client views:") 36 | } 37 | view.RegisterExporter(pe) 38 | go func() { 39 | mux := http.NewServeMux() 40 | mux.Handle("/metrics", pe) 41 | // http://localhost:8888/metrics 42 | log.Infof("Starting Prometheus Exporter on port %d", prometheusPort) 43 | if err := http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), mux); err != nil { 44 | log.WithError(err).Fatalf("Failed to run Prometheus /metrics endpoint:") 45 | } 46 | }() 47 | 48 | } 49 | 50 | // InitS3 initialises the S3 session 51 | // Also starts the Prometheus exporter on Port 8888 52 | func InitS3(config common.S3Configuration) { 53 | // All clients require a Session. The Session provides the client with 54 | // shared configuration such as region, endpoint, and credentials. A 55 | // Session should be shared where possible to take advantage of 56 | // configuration and credential caching. See the session package for 57 | // more information. 58 | tr := &http.Transport{ 59 | TLSClientConfig: &tls.Config{InsecureSkipVerify: config.SkipSSLVerify}, 60 | } 61 | tr2 := &ochttp.Transport{Base: tr} 62 | hc = &http.Client{ 63 | Transport: tr2, 64 | } 65 | 66 | // TODO Create a context with a timeout - we already use this context in all S3 calls 67 | // Usually this shouldn't be a problem ;) 68 | ctx = context.Background() 69 | 70 | cfg, err := s3config.LoadDefaultConfig(ctx, 71 | s3config.WithHTTPClient(hc), 72 | s3config.WithRegion(config.Region), 73 | s3config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, "")), 74 | s3config.WithRetryer(func() aws.Retryer { 75 | return aws.NopRetryer{} 76 | }), 77 | ) 78 | if err != nil { 79 | log.WithError(err).Fatal("Unable to build S3 config") 80 | } 81 | // Use this Session to do things that are hidden from the performance monitoring 82 | // Setting up the housekeeping S3 client 83 | hkhc := &http.Client{ 84 | Transport: tr, 85 | } 86 | 87 | hkCfg, err := s3config.LoadDefaultConfig(ctx, 88 | s3config.WithHTTPClient(hkhc), 89 | s3config.WithRegion(config.Region), 90 | s3config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(config.AccessKey, config.SecretKey, "")), 91 | s3config.WithRetryer(func() aws.Retryer { 92 | return aws.NopRetryer{} 93 | }), 94 | ) 95 | if err != nil { 96 | log.WithError(err).Fatal("Unable to build S3 housekeeping config") 97 | } 98 | 99 | // Create a new instance of the service's client with a Session. 100 | // Optional aws.Config values can also be provided as variadic arguments 101 | // to the New function. This option allows you to provide service 102 | // specific configuration. 103 | svc = s3.NewFromConfig(cfg, func(o *s3.Options) { 104 | o.BaseEndpoint = aws.String(config.Endpoint) 105 | o.UsePathStyle = config.UsePathStyle 106 | }) 107 | // Use this service to do things that are hidden from the performance monitoring 108 | housekeepingSvc = s3.NewFromConfig(hkCfg, func(o *s3.Options) { 109 | o.BaseEndpoint = aws.String(config.Endpoint) 110 | o.UsePathStyle = config.UsePathStyle 111 | }) 112 | 113 | log.Debug("S3 Init done") 114 | } 115 | 116 | func putObject(service *s3.Client, objectName string, objectContent io.ReadSeeker, bucket string) error { 117 | // Create an uploader with S3 client and custom options 118 | uploader := manager.NewUploader(service, func(d *manager.Uploader) { 119 | d.MaxUploadParts = 1 120 | }) 121 | 122 | _, err := uploader.Upload(ctx, &s3.PutObjectInput{ 123 | Bucket: &bucket, 124 | Key: &objectName, 125 | Body: objectContent, 126 | }) 127 | 128 | if err != nil { 129 | log.WithError(err).WithField("object", objectName).WithField("bucket", bucket).Errorf("Failed to upload object,") 130 | return err 131 | } 132 | 133 | log.WithField("bucket", bucket).WithField("key", objectName).Tracef("Upload successful") 134 | 135 | return err 136 | } 137 | 138 | // func getObjectProperties(service *s3.S3, objectName string, bucket string) { 139 | // service.ListObjects(&s3.ListObjectsInput{ 140 | // Bucket: &bucket, 141 | // }) 142 | // result, err := service.GetObjectWithContext(ctx, &s3.GetObjectInput{ 143 | // Bucket: &bucket, 144 | // Key: &objectName, 145 | // }) 146 | // if err != nil { 147 | // // Cast err to awserr.Error to handle specific error codes. 148 | // aerr, ok := err.(awserr.Error) 149 | // if ok && aerr.Code() == s3.ErrCodeNoSuchKey { 150 | // log.WithError(aerr).Errorf("Could not find object %s in bucket %s when querying properties", objectName, bucket) 151 | // } 152 | // } 153 | 154 | // // Make sure to close the body when done with it for S3 GetObject APIs or 155 | // // will leak connections. 156 | // defer result.Body.Close() 157 | 158 | // log.Debugf("Object Properties:\n%+v", result) 159 | // } 160 | 161 | func listObjects(service *s3.Client, prefix string, bucket string) ([]types.Object, error) { 162 | var bucketContents []types.Object 163 | p := s3.NewListObjectsV2Paginator(service, &s3.ListObjectsV2Input{Bucket: aws.String(bucket), Prefix: aws.String(prefix)}) 164 | for p.HasMorePages() { 165 | // Next Page takes a new context for each page retrieval. This is where 166 | // you could add timeouts or deadlines. 167 | page, err := p.NextPage(ctx) 168 | if err != nil { 169 | log.WithError(err).WithField("prefix", prefix).WithField("bucket", bucket).Errorf("Failed to list objects") 170 | return nil, err 171 | } 172 | bucketContents = append(bucketContents, page.Contents...) 173 | } 174 | 175 | return bucketContents, nil 176 | } 177 | 178 | func getObject(service *s3.Client, objectName string, bucket string, objectSize uint64) error { 179 | // Remove the allocation of buffer 180 | result, err := service.GetObject(ctx, &s3.GetObjectInput{ 181 | Bucket: &bucket, 182 | Key: &objectName, 183 | }) 184 | if err != nil { 185 | return err 186 | } 187 | numBytes, err := io.Copy(io.Discard, result.Body) 188 | if err != nil { 189 | return err 190 | } 191 | if numBytes != int64(objectSize) { 192 | return fmt.Errorf("Expected object length %d is not matched to actual object length %d", objectSize, numBytes) 193 | } 194 | return nil 195 | } 196 | 197 | func deleteObject(service *s3.Client, objectName string, bucket string) error { 198 | _, err := service.DeleteObject(ctx, &s3.DeleteObjectInput{ 199 | Bucket: &bucket, 200 | Key: &objectName, 201 | }) 202 | if err != nil { 203 | log.WithError(err).Errorf("Could not find object %s in bucket %s for deletion", objectName, bucket) 204 | } 205 | return err 206 | } 207 | 208 | func createBucket(service *s3.Client, bucket string) error { 209 | // Do not err when the bucket is already there... 210 | _, err := service.CreateBucket(ctx, &s3.CreateBucketInput{ 211 | Bucket: &bucket, 212 | }) 213 | if err != nil { 214 | var bne *types.BucketAlreadyExists 215 | // Ignore error if bucket already exists 216 | if errors.As(err, &bne) { 217 | return nil 218 | } 219 | log.WithError(err).Errorf("Issues when creating bucket %s", bucket) 220 | } 221 | return err 222 | } 223 | 224 | func deleteBucket(service *s3.Client, bucket string) error { 225 | // First delete all objects in the bucket 226 | input := &s3.ListObjectsV2Input{ 227 | Bucket: aws.String(bucket), 228 | } 229 | 230 | var bucketContents []types.Object 231 | isTruncated := true 232 | for isTruncated { 233 | result, err := service.ListObjectsV2(ctx, input) 234 | if err != nil { 235 | return err 236 | } 237 | bucketContents = append(bucketContents, result.Contents...) 238 | input.ContinuationToken = result.NextContinuationToken 239 | isTruncated = *result.IsTruncated 240 | } 241 | 242 | if len(bucketContents) > 0 { 243 | var objectsToDelete []types.ObjectIdentifier 244 | for _, item := range bucketContents { 245 | objectsToDelete = append(objectsToDelete, types.ObjectIdentifier{ 246 | Key: item.Key, 247 | }) 248 | } 249 | 250 | deleteObjectsInput := &s3.DeleteObjectsInput{ 251 | Bucket: aws.String(bucket), 252 | Delete: &types.Delete{ 253 | Objects: objectsToDelete, 254 | Quiet: aws.Bool(true), 255 | }, 256 | } 257 | 258 | _, err := svc.DeleteObjects(ctx, deleteObjectsInput) 259 | if err != nil { 260 | return err 261 | } 262 | } 263 | 264 | // Then delete the (now empty) bucket itself 265 | _, err := service.DeleteBucket(ctx, &s3.DeleteBucketInput{ 266 | Bucket: &bucket, 267 | }) 268 | return err 269 | } 270 | -------------------------------------------------------------------------------- /worker/workItems.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math/rand" 7 | "sort" 8 | "sync" 9 | "time" 10 | 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | // WorkItem is an interface for general work operations 15 | // They can be read,write,list,delete or a stopper 16 | type WorkItem interface { 17 | Prepare() error 18 | Do() error 19 | Clean() error 20 | } 21 | 22 | // ReadOperation stands for a read operation 23 | type ReadOperation struct { 24 | TestName string 25 | Bucket string 26 | ObjectName string 27 | ObjectSize uint64 28 | WorksOnPreexistingObject bool 29 | } 30 | 31 | // WriteOperation stands for a write operation 32 | type WriteOperation struct { 33 | TestName string 34 | Bucket string 35 | ObjectName string 36 | ObjectSize uint64 37 | } 38 | 39 | // ListOperation stands for a list operation 40 | type ListOperation struct { 41 | TestName string 42 | Bucket string 43 | ObjectName string 44 | ObjectSize uint64 45 | } 46 | 47 | // DeleteOperation stands for a delete operation 48 | type DeleteOperation struct { 49 | TestName string 50 | Bucket string 51 | ObjectName string 52 | ObjectSize uint64 53 | } 54 | 55 | // Stopper marks the end of a workqueue when using 56 | // maxOps as testCase end criterium 57 | type Stopper struct{} 58 | 59 | // KV is a simple key-value struct 60 | type KV struct { 61 | Key string 62 | Value float64 63 | } 64 | 65 | // Workqueue contains the Queue and the valid operation's 66 | // values to determine which operation should be done next 67 | // in order to satisfy the set ratios. 68 | type Workqueue struct { 69 | OperationValues []KV 70 | Queue *[]WorkItem 71 | } 72 | 73 | // GetNextOperation evaluates the operation values and returns which 74 | // operation should happen next 75 | func GetNextOperation(Queue *Workqueue) string { 76 | sort.Slice(Queue.OperationValues, func(i, j int) bool { 77 | return Queue.OperationValues[i].Value < Queue.OperationValues[j].Value 78 | }) 79 | return Queue.OperationValues[0].Key 80 | } 81 | 82 | // IncreaseOperationValue increases the given operation's value by the set amount 83 | func IncreaseOperationValue(operation string, value float64, Queue *Workqueue) error { 84 | for i := range Queue.OperationValues { 85 | if Queue.OperationValues[i].Key == operation { 86 | Queue.OperationValues[i].Value += value 87 | return nil 88 | } 89 | } 90 | return fmt.Errorf("Could not find requested operation %s", operation) 91 | } 92 | 93 | // Prepare prepares the execution of the ReadOperation 94 | func (op *ReadOperation) Prepare() error { 95 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).WithField("Preexisting?", op.WorksOnPreexistingObject).Debug("Preparing ReadOperation") 96 | if op.WorksOnPreexistingObject { 97 | return nil 98 | } 99 | return putObject(housekeepingSvc, op.ObjectName, bytes.NewReader(generateRandomBytes(op.ObjectSize)), op.Bucket) 100 | } 101 | 102 | // Prepare prepares the execution of the WriteOperation 103 | func (op *WriteOperation) Prepare() error { 104 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Preparing WriteOperation") 105 | return nil 106 | } 107 | 108 | // Prepare prepares the execution of the ListOperation 109 | func (op *ListOperation) Prepare() error { 110 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Preparing ListOperation") 111 | return putObject(housekeepingSvc, op.ObjectName, bytes.NewReader(generateRandomBytes(op.ObjectSize)), op.Bucket) 112 | } 113 | 114 | // Prepare prepares the execution of the DeleteOperation 115 | func (op *DeleteOperation) Prepare() error { 116 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Preparing DeleteOperation") 117 | return putObject(housekeepingSvc, op.ObjectName, bytes.NewReader(generateRandomBytes(op.ObjectSize)), op.Bucket) 118 | } 119 | 120 | // Prepare does nothing here 121 | func (op *Stopper) Prepare() error { 122 | return nil 123 | } 124 | 125 | // Do executes the actual work of the ReadOperation 126 | func (op *ReadOperation) Do() error { 127 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).WithField("Preexisting?", op.WorksOnPreexistingObject).Debug("Doing ReadOperation") 128 | start := time.Now() 129 | err := getObject(svc, op.ObjectName, op.Bucket, op.ObjectSize) 130 | duration := time.Since(start) 131 | promLatency.WithLabelValues(op.TestName, "GET").Observe(float64(duration.Milliseconds())) 132 | if err != nil { 133 | promFailedOps.WithLabelValues(op.TestName, "GET").Inc() 134 | } else { 135 | promFinishedOps.WithLabelValues(op.TestName, "GET").Inc() 136 | } 137 | promDownloadedBytes.WithLabelValues(op.TestName, "GET").Add(float64(op.ObjectSize)) 138 | return err 139 | } 140 | 141 | // Do executes the actual work of the WriteOperation 142 | func (op *WriteOperation) Do() error { 143 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Doing WriteOperation") 144 | start := time.Now() 145 | err := putObject(svc, op.ObjectName, bytes.NewReader(generateRandomBytes(op.ObjectSize)), op.Bucket) 146 | duration := time.Since(start) 147 | promLatency.WithLabelValues(op.TestName, "PUT").Observe(float64(duration.Milliseconds())) 148 | if err != nil { 149 | promFailedOps.WithLabelValues(op.TestName, "PUT").Inc() 150 | } else { 151 | promFinishedOps.WithLabelValues(op.TestName, "PUT").Inc() 152 | } 153 | promUploadedBytes.WithLabelValues(op.TestName, "PUT").Add(float64(op.ObjectSize)) 154 | return err 155 | } 156 | 157 | // Do executes the actual work of the ListOperation 158 | func (op *ListOperation) Do() error { 159 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Doing ListOperation") 160 | start := time.Now() 161 | _, err := listObjects(svc, op.ObjectName, op.Bucket) 162 | duration := time.Since(start) 163 | promLatency.WithLabelValues(op.TestName, "LIST").Observe(float64(duration.Milliseconds())) 164 | if err != nil { 165 | promFailedOps.WithLabelValues(op.TestName, "LIST").Inc() 166 | } else { 167 | promFinishedOps.WithLabelValues(op.TestName, "LIST").Inc() 168 | } 169 | return err 170 | } 171 | 172 | // Do executes the actual work of the DeleteOperation 173 | func (op *DeleteOperation) Do() error { 174 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).Debug("Doing DeleteOperation") 175 | start := time.Now() 176 | err := deleteObject(svc, op.ObjectName, op.Bucket) 177 | duration := time.Since(start) 178 | promLatency.WithLabelValues(op.TestName, "DELETE").Observe(float64(duration.Milliseconds())) 179 | if err != nil { 180 | promFailedOps.WithLabelValues(op.TestName, "DELETE").Inc() 181 | } else { 182 | promFinishedOps.WithLabelValues(op.TestName, "DELETE").Inc() 183 | } 184 | return err 185 | } 186 | 187 | // Do does nothing here 188 | func (op *Stopper) Do() error { 189 | return nil 190 | } 191 | 192 | // Clean removes the objects and buckets left from the previous ReadOperation 193 | func (op *ReadOperation) Clean() error { 194 | if op.WorksOnPreexistingObject { 195 | return nil 196 | } 197 | log.WithField("bucket", op.Bucket).WithField("object", op.ObjectName).WithField("Preexisting?", op.WorksOnPreexistingObject).Debug("Cleaning up ReadOperation") 198 | return deleteObject(housekeepingSvc, op.ObjectName, op.Bucket) 199 | } 200 | 201 | // Clean removes the objects and buckets left from the previous WriteOperation 202 | func (op *WriteOperation) Clean() error { 203 | return deleteObject(housekeepingSvc, op.ObjectName, op.Bucket) 204 | } 205 | 206 | // Clean removes the objects and buckets left from the previous ListOperation 207 | func (op *ListOperation) Clean() error { 208 | return deleteObject(housekeepingSvc, op.ObjectName, op.Bucket) 209 | } 210 | 211 | // Clean removes the objects and buckets left from the previous DeleteOperation 212 | func (op *DeleteOperation) Clean() error { 213 | return nil 214 | } 215 | 216 | // Clean does nothing here 217 | func (op *Stopper) Clean() error { 218 | return nil 219 | } 220 | 221 | // DoWork processes the workitems in the workChannel until 222 | // either the time runs out or a stopper is found 223 | func DoWork(workChannel <-chan WorkItem, notifyChan <-chan struct{}, wg *sync.WaitGroup) { 224 | defer wg.Done() 225 | for { 226 | select { 227 | case <-notifyChan: 228 | log.Debugf("Runtime over - Got timeout from work context") 229 | return 230 | case work := <-workChannel: 231 | switch work.(type) { 232 | case *Stopper: 233 | log.Debug("Found the end of the work Queue - stopping") 234 | return 235 | } 236 | err := work.Do() 237 | if err != nil { 238 | log.WithError(err).Error("Issues when performing work - ignoring") 239 | } 240 | } 241 | } 242 | } 243 | 244 | func generateRandomBytes(size uint64) []byte { 245 | now := time.Now() 246 | random := make([]byte, size) 247 | n, err := rand.Read(random) 248 | if err != nil { 249 | log.WithError(err).Fatal("I had issues getting my random bytes initialized") 250 | } 251 | log.Tracef("Generated %d random bytes in %v", n, time.Since(now)) 252 | return random 253 | } 254 | --------------------------------------------------------------------------------