├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── pom.xml ├── save_jmh_result.py └── src ├── main ├── java │ └── org │ │ └── apache │ │ └── flink │ │ ├── benchmark │ │ ├── AsyncWaitOperatorBenchmark.java │ │ ├── BenchmarkBase.java │ │ ├── BlockingPartitionBenchmark.java │ │ ├── CollectSink.java │ │ ├── ContinuousFileReaderOperatorBenchmark.java │ │ ├── FlinkEnvironmentContext.java │ │ ├── InputBenchmark.java │ │ ├── KeyByBenchmarks.java │ │ ├── MemoryStateBackendBenchmark.java │ │ ├── RemoteChannelThroughputBenchmark.java │ │ ├── RocksStateBackendBenchmark.java │ │ ├── SerializationFrameworkMiniBenchmarks.java │ │ ├── StateBackendBenchmarkBase.java │ │ ├── TwoInputBenchmark.java │ │ ├── WindowBenchmarks.java │ │ ├── full │ │ │ ├── PojoSerializationBenchmark.java │ │ │ ├── SerializationFrameworkAllBenchmarks.java │ │ │ ├── StringSerializationBenchmark.java │ │ │ └── package-info.java │ │ ├── functions │ │ │ ├── BaseSourceWithKeyRange.java │ │ │ ├── IntLongApplications.java │ │ │ ├── IntegerLongSource.java │ │ │ ├── LongSource.java │ │ │ ├── MultiplyByTwo.java │ │ │ ├── MultiplyIntLongByTwo.java │ │ │ ├── QueuingLongSource.java │ │ │ ├── SuccessException.java │ │ │ ├── SumReduce.java │ │ │ ├── SumReduceIntLong.java │ │ │ ├── TestUtils.java │ │ │ └── ValidatingCounter.java │ │ ├── operators │ │ │ └── MultiplyByTwoCoStreamMap.java │ │ └── thrift │ │ │ ├── MyOperation.java │ │ │ └── MyPojo.java │ │ └── state │ │ └── benchmark │ │ ├── ListStateBenchmark.java │ │ ├── MapStateBenchmark.java │ │ ├── StateBenchmarkBase.java │ │ ├── StateBenchmarkConstants.java │ │ └── ValueStateBenchmark.java └── resources │ ├── avro │ └── mypojo.avsc │ ├── log4j.properties │ ├── protobuf │ └── MyPojo.proto │ └── thrift │ └── mypojo.thrift └── test ├── java └── org │ └── apache │ └── flink │ └── benchmark │ ├── DataSkewStreamNetworkThroughputBenchmarkExecutor.java │ ├── StreamNetworkBroadcastThroughputBenchmarkExecutor.java │ ├── StreamNetworkLatencyBenchmarkExecutor.java │ └── StreamNetworkThroughputBenchmarkExecutor.java ├── log4j.properties └── resources ├── local127.keystore └── local127.truststore /.gitignore: -------------------------------------------------------------------------------- 1 | .cache 2 | .classpath 3 | .idea 4 | .metadata 5 | .settings 6 | .project 7 | .version.properties 8 | filter.properties 9 | target 10 | tmp 11 | *.class 12 | *.iml 13 | *.swp 14 | *.jar 15 | *.log 16 | .DS_Store 17 | atlassian-ide-plugin.xml 18 | out/ 19 | *.ipr 20 | *.iws 21 | jmh-result*.csv 22 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: java 2 | install: 3 | # Invoke the "travis" profile during Maven steps; see in pom.xml 4 | # files. The "mvn install" command will run by default during the "install" 5 | # phase by Travis, without the profile flag. Here we customize the install 6 | # phase to use the relevant profile. 7 | # https.protocols is a workaround for https://bugs.openjdk.java.net/browse/JDK-8213202 8 | - mvn install -P test -DskipTests=true -Dmaven.javadoc.skip=true -Dhttps.protocols=TLSv1,TLSv1.1,TLSv1.2 -B -V 9 | script: 10 | - mvn test -P test -B 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # flink-benchmarks 2 | 3 | This repository was archived while it's content was moved to http://github.com/apache/flink-benchmarks . If you ended up here you almost definitely want to just go to http://github.com/apache/flink-benchmarks . 4 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 19 | 21 | 4.0.0 22 | 23 | org.apache.flink.benchmark 24 | flink-hackathon-benchmarks 25 | 0.1 26 | jar 27 | 28 | Flink Benchmark Job 29 | http://www.myorganization.org 30 | 31 | 32 | UTF-8 33 | 1.11-SNAPSHOT 34 | 10.0 35 | 2.0.25.Final 36 | 1.8 37 | 2.11 38 | ${java.version} 39 | ${java.version} 40 | 1.7.7 41 | 1.2.17 42 | 1.19 43 | 4.12 44 | 1.8.2 45 | 2.21.0 46 | 1.6.0 47 | 3.1 48 | 0.7.6 49 | 0.13.0 50 | 3.11.4 51 | dynamic 52 | org.apache.flink.benchmark.full.*,org.apache.flink.state.benchmark.* 53 | .* 54 | 55 | 56 | 57 | 58 | apache.snapshots 59 | Apache Development Snapshot Repository 60 | https://repository.apache.org/content/repositories/snapshots/ 61 | 62 | false 63 | 64 | 65 | true 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | org.apache.flink 74 | flink-java 75 | ${flink.version} 76 | 77 | 78 | 79 | org.apache.flink 80 | flink-streaming-java_${scala.binary.version} 81 | ${flink.version} 82 | 83 | 84 | 85 | org.apache.flink 86 | flink-clients_${scala.binary.version} 87 | ${flink.version} 88 | 89 | 90 | 91 | org.apache.flink 92 | flink-runtime_${scala.binary.version} 93 | ${flink.version} 94 | test-jar 95 | test 96 | 97 | 98 | 99 | org.apache.flink 100 | flink-streaming-java_${scala.binary.version} 101 | ${flink.version} 102 | test-jar 103 | test 104 | 105 | 106 | 107 | org.apache.flink 108 | flink-test-utils-junit 109 | ${flink.version} 110 | 111 | 112 | 114 | 115 | org.slf4j 116 | slf4j-log4j12 117 | ${slf4j.version} 118 | 119 | 120 | 121 | junit 122 | junit 123 | ${junit.version} 124 | jar 125 | 126 | 127 | 128 | log4j 129 | log4j 130 | ${log4j.version} 131 | 132 | 133 | 134 | org.openjdk.jmh 135 | jmh-core 136 | ${jmh.version} 137 | 138 | 139 | 140 | org.openjdk.jmh 141 | jmh-generator-annprocess 142 | ${jmh.version} 143 | 144 | 145 | 146 | org.apache.flink 147 | flink-statebackend-rocksdb_${scala.binary.version} 148 | ${flink.version} 149 | 150 | 151 | 152 | org.apache.flink 153 | flink-statebackend-rocksdb_${scala.binary.version} 154 | ${flink.version} 155 | test-jar 156 | 157 | 158 | 159 | org.apache.flink 160 | flink-avro 161 | ${flink.version} 162 | 163 | 164 | 165 | org.apache.flink 166 | flink-table-planner_${scala.binary.version} 167 | ${flink.version} 168 | test-jar 169 | 170 | 171 | 172 | org.apache.flink 173 | flink-tests 174 | ${flink.version} 175 | test-jar 176 | 177 | 178 | 179 | org.apache.flink 180 | flink-examples-batch_${scala.binary.version} 181 | ${flink.version} 182 | test-jar 183 | 184 | 185 | 186 | org.apache.flink 187 | flink-cep_${scala.binary.version} 188 | ${flink.version} 189 | 190 | 191 | 192 | org.apache.flink 193 | flink-shaded-netty-tcnative-${netty-tcnative.flavor} 194 | ${netty.tcnative.version}-${flink.shaded.version} 195 | 196 | 197 | 198 | org.mockito 199 | mockito-core 200 | ${mockito.version} 201 | jar 202 | test 203 | 204 | 205 | 206 | 207 | 208 | 209 | com.twitter 210 | chill-thrift 211 | ${chill.version} 212 | 213 | 214 | 215 | com.esotericsoftware.kryo 216 | kryo 217 | 218 | 219 | 220 | 221 | org.apache.thrift 222 | libthrift 223 | ${thrift.version} 224 | 225 | 226 | javax.servlet 227 | servlet-api 228 | 229 | 230 | org.apache.httpcomponents 231 | httpclient 232 | 233 | 234 | 235 | 236 | 237 | com.twitter 238 | chill-protobuf 239 | ${chill.version} 240 | 241 | 242 | 243 | com.esotericsoftware.kryo 244 | kryo 245 | 246 | 247 | 248 | 249 | com.google.protobuf 250 | protobuf-java 251 | ${protobuf.version} 252 | 253 | 254 | 255 | 256 | 257 | test 258 | 259 | false 260 | 261 | 262 | 263 | 264 | org.codehaus.mojo 265 | exec-maven-plugin 266 | ${maven.exec.version} 267 | 268 | 269 | test-benchmarks 270 | test 271 | 272 | exec 273 | 274 | 275 | 276 | 277 | ${skipTests} 278 | test 279 | java 280 | 281 | -Xmx6g 282 | -classpath 283 | 284 | org.openjdk.jmh.Main 285 | 286 | -foe 287 | true 288 | 289 | -f 290 | 1 291 | -i 292 | 1 293 | -wi 294 | 0 295 | -rf 296 | csv 297 | .* 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | benchmark 307 | 308 | true 309 | 310 | benchmarks 311 | 312 | 313 | 314 | 315 | 316 | org.codehaus.mojo 317 | exec-maven-plugin 318 | ${maven.exec.version} 319 | 320 | 321 | run-benchmarks 322 | 323 | exec 324 | 325 | 326 | 327 | 328 | test 329 | java 330 | 331 | -classpath 332 | 333 | org.openjdk.jmh.Main 334 | -e 335 | ${benchmarkExcludes} 336 | -rf 337 | csv 338 | ${benchmarks} 339 | 340 | 341 | 342 | 343 | 344 | org.apache.maven.plugins 345 | maven-shade-plugin 346 | 3.2.0 347 | 348 | 349 | package 350 | 351 | shade 352 | 353 | 354 | benchmarks 355 | 356 | 357 | org.openjdk.jmh.Main 358 | 359 | 360 | reference.conf 361 | 362 | 363 | 364 | 365 | Apache Flink 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | custom-benchmark 377 | 378 | 379 | benchmarks 380 | 381 | 382 | 383 | "" 384 | 385 | 386 | 387 | 388 | generate-thrift 389 | 390 | 391 | generate-thrift 392 | 393 | 394 | 395 | 396 | 397 | 398 | org.apache.thrift 399 | thrift-maven-plugin 400 | 0.10.0 401 | 402 | ${project.basedir}/src/main/resources/thrift 403 | ${project.basedir}/src/main/java/ 404 | 405 | 406 | 407 | thrift-sources 408 | generate-sources 409 | 410 | compile 411 | 412 | 413 | 414 | thrift-test-sources 415 | generate-test-sources 416 | 417 | testCompile 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | kr.motd.maven 432 | os-maven-plugin 433 | 1.6.2 434 | 435 | 436 | 437 | 438 | org.apache.maven.plugins 439 | maven-compiler-plugin 440 | ${maven.compiler.version} 441 | 442 | ${java.version} 443 | ${java.version} 444 | 445 | 446 | 447 | org.apache.rat 448 | apache-rat-plugin 449 | 0.13 450 | false 451 | 452 | 453 | verify 454 | 455 | check 456 | 457 | 458 | 459 | 460 | false 461 | 0 462 | 463 | 479 | 480 | AL2 481 | Apache License 2.0 482 | 483 | 484 | Licensed to the Apache Software Foundation (ASF) under one 485 | 486 | 487 | 488 | 489 | 490 | Apache License 2.0 491 | 492 | 493 | 494 | 495 | **/.*/** 496 | **/*.prefs 497 | **/*.log 498 | 499 | 500 | **/README.md 501 | .github/** 502 | 503 | **/*.iml 504 | 505 | out/** 506 | **/target/** 507 | 508 | apache-maven-3.2.5/** 509 | 510 | **/.idea/** 511 | 512 | 513 | 514 | 515 | 516 | 517 | org.apache.avro 518 | avro-maven-plugin 519 | ${avro.version} 520 | 521 | 522 | generate-sources 523 | 524 | schema 525 | 526 | 527 | ${project.basedir}/src/main/resources/avro 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | org.xolstice.maven.plugins 537 | protobuf-maven-plugin 538 | 0.6.1 539 | 540 | ${project.basedir}/src/main/resources/protobuf 541 | com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} 542 | 543 | 544 | 545 | 546 | compile 547 | test-compile 548 | 549 | 550 | 551 | 552 | 553 | 554 | 555 | -------------------------------------------------------------------------------- /save_jmh_result.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | ################################################################################ 4 | # Licensed to the Apache Software Foundation (ASF) under one 5 | # or more contributor license agreements. See the NOTICE file 6 | # distributed with this work for additional information 7 | # regarding copyright ownership. The ASF licenses this file 8 | # to you under the Apache License, Version 2.0 (the 9 | # "License"); you may not use this file except in compliance 10 | # with the License. You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | ################################################################################ 20 | 21 | #################################################### 22 | # Sample script that shows how to save result data # 23 | #################################################### 24 | import datetime 25 | import os 26 | import urllib 27 | import urllib2 28 | import argparse 29 | import csv 30 | import json 31 | import random 32 | 33 | # You need to enter the real URL and have the server running 34 | DEFAULT_CODESPEED_URL = 'http://localhost:8000/' 35 | 36 | current_date = datetime.datetime.today() 37 | 38 | 39 | parser = argparse.ArgumentParser(description='Upload jmh benchmark csv results') 40 | parser.add_argument('--commit', dest='commit', required=True, 41 | help='md5') 42 | parser.add_argument('--branch', dest='branch', required=True) 43 | parser.add_argument('--input', dest='input', required=False, 44 | help='input csv file') 45 | parser.add_argument('--environment', dest='environment', required=True) 46 | parser.add_argument('--dry', dest='dry', action='store_true') 47 | parser.add_argument('--codespeed', dest='codespeed', default=DEFAULT_CODESPEED_URL, 48 | help='codespeed url, default: %s' % DEFAULT_CODESPEED_URL) 49 | parser.add_argument('--project', dest='project', default="Flink") 50 | parser.add_argument('--exec', dest='executable', default="Flink") 51 | 52 | def readData(args): 53 | results = [] 54 | if args.input: 55 | path = args.input 56 | else: 57 | path = "jmh-result.csv" 58 | modificationDate = datetime.datetime.fromtimestamp(os.path.getmtime(path)) 59 | #modificationDate = datetime.date(2016, 8, int(args.commit)) 60 | 61 | with open(path) as csvFile: 62 | reader = csv.reader(csvFile, delimiter=",") 63 | lines = [line for line in reader] 64 | header = lines[0] 65 | params = sorted(filter(lambda s : s.startswith("Param"), header)) 66 | paramIndexes = map(lambda param : header.index(param), params) 67 | benchmarkIndex = header.index("Benchmark") 68 | scoreIndex = header.index("Score") 69 | errorIndex = scoreIndex + 1 70 | 71 | for line in lines[1:]: 72 | name = line[benchmarkIndex].split(".")[-1] 73 | if len(paramIndexes) > 0: 74 | for paramIndex in paramIndexes: 75 | if len(line[paramIndex]) > 0: 76 | name += "." + line[paramIndex] 77 | 78 | results.append({ 79 | 'commitid': args.commit, 80 | 'branch': args.branch, 81 | 'project': args.project, 82 | 'executable': args.executable, 83 | 'benchmark': name, 84 | 'environment': args.environment, 85 | 'lessisbetter': False, 86 | 'units': 'records/ms', 87 | 'result_value': float(line[scoreIndex]), 88 | 89 | 'revision_date': str(modificationDate), 90 | 'result_date': str(modificationDate), 91 | 'std_dev': line[errorIndex], # Optional. Default is blank 92 | }) 93 | return results 94 | 95 | def add(data, codespeedUrl): 96 | #params = urllib.urlencode(data) 97 | response = "None" 98 | try: 99 | f = urllib2.urlopen( 100 | codespeedUrl + 'result/add/json/', urllib.urlencode(data)) 101 | except urllib2.HTTPError as e: 102 | print str(e) 103 | print e.read() 104 | return 105 | response = f.read() 106 | f.close() 107 | print "Server (%s) response: %s\n" % (codespeedUrl, response) 108 | 109 | if __name__ == "__main__": 110 | args = parser.parse_args() 111 | 112 | data = json.dumps(readData(args), indent=4, sort_keys=True) 113 | if args.dry: 114 | print data 115 | else: 116 | add({'json': data}, args.codespeed) 117 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/AsyncWaitOperatorBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.benchmark; 19 | 20 | import org.apache.flink.benchmark.functions.LongSource; 21 | import org.apache.flink.streaming.api.datastream.AsyncDataStream; 22 | import org.apache.flink.streaming.api.datastream.DataStream; 23 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 24 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 25 | import org.apache.flink.streaming.api.functions.async.ResultFuture; 26 | import org.apache.flink.streaming.api.functions.async.RichAsyncFunction; 27 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 28 | 29 | import org.openjdk.jmh.annotations.Benchmark; 30 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 31 | import org.openjdk.jmh.annotations.Param; 32 | import org.openjdk.jmh.annotations.Setup; 33 | import org.openjdk.jmh.annotations.TearDown; 34 | import org.openjdk.jmh.runner.Runner; 35 | import org.openjdk.jmh.runner.RunnerException; 36 | import org.openjdk.jmh.runner.options.Options; 37 | import org.openjdk.jmh.runner.options.OptionsBuilder; 38 | import org.openjdk.jmh.runner.options.VerboseMode; 39 | 40 | import java.util.Collections; 41 | import java.util.concurrent.ExecutorService; 42 | import java.util.concurrent.Executors; 43 | import java.util.concurrent.TimeUnit; 44 | 45 | @OperationsPerInvocation(value = AsyncWaitOperatorBenchmark.RECORDS_PER_INVOCATION) 46 | public class AsyncWaitOperatorBenchmark extends BenchmarkBase { 47 | public static final int RECORDS_PER_INVOCATION = 1_000_000; 48 | 49 | private static final long CHECKPOINT_INTERVAL_MS = 100; 50 | 51 | private static ExecutorService executor; 52 | 53 | @Param 54 | public AsyncDataStream.OutputMode outputMode; 55 | 56 | public static void main(String[] args) 57 | throws RunnerException { 58 | Options options = new OptionsBuilder() 59 | .verbosity(VerboseMode.NORMAL) 60 | .include(".*" + AsyncWaitOperatorBenchmark.class.getCanonicalName() + ".*") 61 | .build(); 62 | 63 | new Runner(options).run(); 64 | } 65 | 66 | @Setup 67 | public void setUp() { 68 | executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); 69 | } 70 | 71 | @TearDown 72 | public void tearDown() { 73 | executor.shutdown(); 74 | } 75 | 76 | @Benchmark 77 | public void asyncWait(FlinkEnvironmentContext context) throws Exception { 78 | 79 | StreamExecutionEnvironment env = context.env; 80 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 81 | env.setParallelism(1); 82 | 83 | DataStreamSource source = env.addSource(new LongSource(RECORDS_PER_INVOCATION)); 84 | DataStream result = createAsyncOperator(source); 85 | result.addSink(new DiscardingSink<>()); 86 | 87 | env.execute(); 88 | } 89 | 90 | private DataStream createAsyncOperator(DataStreamSource source) { 91 | switch (outputMode) { 92 | case ORDERED: 93 | return AsyncDataStream.orderedWait( 94 | source, 95 | new BenchmarkAsyncFunctionExecutor(), 96 | 0, 97 | TimeUnit.MILLISECONDS); 98 | case UNORDERED: 99 | return AsyncDataStream.unorderedWait( 100 | source, 101 | new BenchmarkAsyncFunctionExecutor(), 102 | 0, 103 | TimeUnit.MILLISECONDS); 104 | default: 105 | throw new UnsupportedOperationException("Unknown mode"); 106 | } 107 | } 108 | 109 | private static class BenchmarkAsyncFunctionExecutor extends RichAsyncFunction { 110 | @Override 111 | public void asyncInvoke(Long input, ResultFuture resultFuture) { 112 | executor.execute(() -> resultFuture.complete(Collections.singleton(input * 2))); 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/BenchmarkBase.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.openjdk.jmh.annotations.BenchmarkMode; 22 | import org.openjdk.jmh.annotations.Fork; 23 | import org.openjdk.jmh.annotations.Measurement; 24 | import org.openjdk.jmh.annotations.OutputTimeUnit; 25 | import org.openjdk.jmh.annotations.State; 26 | import org.openjdk.jmh.annotations.Warmup; 27 | 28 | import static java.util.concurrent.TimeUnit.MILLISECONDS; 29 | import static org.openjdk.jmh.annotations.Mode.Throughput; 30 | import static org.openjdk.jmh.annotations.Scope.Thread; 31 | 32 | @SuppressWarnings("MethodMayBeStatic") 33 | @State(Thread) 34 | @OutputTimeUnit(MILLISECONDS) 35 | @BenchmarkMode(Throughput) 36 | @Fork(value = 3, jvmArgsAppend = { 37 | "-Djava.rmi.server.hostname=127.0.0.1", 38 | "-Dcom.sun.management.jmxremote.authenticate=false", 39 | "-Dcom.sun.management.jmxremote.ssl=false", 40 | "-Dcom.sun.management.jmxremote.ssl"}) 41 | @Warmup(iterations = 10) 42 | @Measurement(iterations = 10) 43 | public class BenchmarkBase { 44 | } 45 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/BlockingPartitionBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.LongSource; 22 | import org.apache.flink.configuration.Configuration; 23 | import org.apache.flink.configuration.CoreOptions; 24 | import org.apache.flink.configuration.NettyShuffleEnvironmentOptions; 25 | import org.apache.flink.runtime.jobgraph.ScheduleMode; 26 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 27 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 28 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 29 | import org.apache.flink.streaming.api.graph.GlobalDataExchangeMode; 30 | import org.apache.flink.streaming.api.graph.StreamGraph; 31 | import org.apache.flink.util.FileUtils; 32 | 33 | import org.openjdk.jmh.annotations.Benchmark; 34 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 35 | import org.openjdk.jmh.annotations.Setup; 36 | import org.openjdk.jmh.runner.Runner; 37 | import org.openjdk.jmh.runner.RunnerException; 38 | import org.openjdk.jmh.runner.options.Options; 39 | import org.openjdk.jmh.runner.options.OptionsBuilder; 40 | import org.openjdk.jmh.runner.options.VerboseMode; 41 | 42 | import java.io.IOException; 43 | 44 | /** 45 | * JMH throughput benchmark runner. 46 | */ 47 | @OperationsPerInvocation(value = BlockingPartitionBenchmark.RECORDS_PER_INVOCATION) 48 | public class BlockingPartitionBenchmark extends BenchmarkBase { 49 | 50 | public static final int RECORDS_PER_INVOCATION = 15_000_000; 51 | 52 | public static void main(String[] args) 53 | throws RunnerException { 54 | Options options = new OptionsBuilder() 55 | .verbosity(VerboseMode.NORMAL) 56 | .include(".*" + BlockingPartitionBenchmark.class.getCanonicalName() + ".*") 57 | .build(); 58 | 59 | new Runner(options).run(); 60 | } 61 | 62 | @Benchmark 63 | public void uncompressedFilePartition(UncompressedFileEnvironmentContext context) throws Exception { 64 | executeBenchmark(context.env); 65 | } 66 | 67 | @Benchmark 68 | public void compressedFilePartition(CompressedFileEnvironmentContext context) throws Exception { 69 | executeBenchmark(context.env); 70 | } 71 | 72 | @Benchmark 73 | public void uncompressedMmapPartition(UncompressedMmapEnvironmentContext context) throws Exception { 74 | executeBenchmark(context.env); 75 | } 76 | 77 | private void executeBenchmark(StreamExecutionEnvironment env) throws Exception { 78 | DataStreamSource source = env.addSource(new LongSource(RECORDS_PER_INVOCATION)); 79 | source.addSink(new DiscardingSink<>()); 80 | 81 | StreamGraph streamGraph = env.getStreamGraph(); 82 | streamGraph.setChaining(false); 83 | streamGraph.setGlobalDataExchangeMode(GlobalDataExchangeMode.ALL_EDGES_BLOCKING); 84 | streamGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST); 85 | 86 | env.execute(streamGraph); 87 | } 88 | 89 | /** 90 | * Setup for the benchmark(s). 91 | */ 92 | public static class BlockingPartitionEnvironmentContext extends FlinkEnvironmentContext { 93 | 94 | /** 95 | * Parallelism of 1 causes the reads/writes to be always sequential and only covers the case 96 | * of one reader. More parallelism should be more suitable for finding performance regressions 97 | * of the code. Considering that the benchmarking machine has 4 CPU cores, we set the parallelism 98 | * to 4. 99 | */ 100 | private final int parallelism = 4; 101 | 102 | @Setup 103 | public void setUp() throws IOException { 104 | super.setUp(); 105 | 106 | env.setParallelism(parallelism); 107 | env.setBufferTimeout(-1); 108 | } 109 | 110 | protected Configuration createConfiguration(boolean compressionEnabled, String subpartitionType) { 111 | Configuration configuration = super.createConfiguration(); 112 | 113 | configuration.setBoolean(NettyShuffleEnvironmentOptions.BLOCKING_SHUFFLE_COMPRESSION_ENABLED, compressionEnabled); 114 | configuration.setString(NettyShuffleEnvironmentOptions.NETWORK_BLOCKING_SHUFFLE_TYPE, subpartitionType); 115 | configuration.setString(CoreOptions.TMP_DIRS, FileUtils.getCurrentWorkingDirectory().toAbsolutePath().toUri().toString()); 116 | return configuration; 117 | } 118 | } 119 | 120 | public static class UncompressedFileEnvironmentContext extends BlockingPartitionEnvironmentContext { 121 | @Override 122 | protected Configuration createConfiguration() { 123 | return createConfiguration(false, "file"); 124 | } 125 | } 126 | 127 | public static class CompressedFileEnvironmentContext extends BlockingPartitionEnvironmentContext { 128 | @Override 129 | protected Configuration createConfiguration() { 130 | return createConfiguration(true, "file"); 131 | } 132 | } 133 | 134 | public static class UncompressedMmapEnvironmentContext extends BlockingPartitionEnvironmentContext { 135 | @Override 136 | protected Configuration createConfiguration() { 137 | return createConfiguration(false, "mmap"); 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/CollectSink.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.streaming.api.functions.sink.SinkFunction; 22 | 23 | import java.util.ArrayList; 24 | import java.util.List; 25 | 26 | /** 27 | * Created by pnowojski on 7/5/17. 28 | */ 29 | public class CollectSink implements SinkFunction { 30 | public final List result = new ArrayList<>(); 31 | 32 | @Override 33 | public void invoke(T value) throws Exception { 34 | result.add(value); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/ContinuousFileReaderOperatorBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.benchmark; 19 | 20 | import joptsimple.internal.Strings; 21 | import org.apache.flink.api.common.io.FileInputFormat; 22 | import org.apache.flink.api.common.restartstrategy.RestartStrategies; 23 | import org.apache.flink.api.common.typeinfo.TypeInformation; 24 | import org.apache.flink.configuration.Configuration; 25 | import org.apache.flink.core.fs.FileInputSplit; 26 | import org.apache.flink.core.fs.Path; 27 | import org.apache.flink.core.testutils.OneShotLatch; 28 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 29 | import org.apache.flink.streaming.api.functions.sink.SinkFunction; 30 | import org.apache.flink.streaming.api.functions.source.ContinuousFileReaderOperatorFactory; 31 | import org.apache.flink.streaming.api.functions.source.SourceFunction; 32 | import org.apache.flink.streaming.api.functions.source.TimestampedFileInputSplit; 33 | import org.openjdk.jmh.annotations.Benchmark; 34 | import org.openjdk.jmh.annotations.Level; 35 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 36 | import org.openjdk.jmh.annotations.TearDown; 37 | import org.openjdk.jmh.runner.Runner; 38 | import org.openjdk.jmh.runner.RunnerException; 39 | import org.openjdk.jmh.runner.options.Options; 40 | import org.openjdk.jmh.runner.options.OptionsBuilder; 41 | import org.openjdk.jmh.runner.options.VerboseMode; 42 | 43 | import java.util.concurrent.TimeUnit; 44 | import java.util.concurrent.TimeoutException; 45 | 46 | @OperationsPerInvocation(value = ContinuousFileReaderOperatorBenchmark.RECORDS_PER_INVOCATION) 47 | public class ContinuousFileReaderOperatorBenchmark extends BenchmarkBase { 48 | private static final int SPLITS_PER_INVOCATION = 100; 49 | private static final int LINES_PER_SPLIT = 175_000; 50 | public static final int RECORDS_PER_INVOCATION = SPLITS_PER_INVOCATION * LINES_PER_SPLIT; 51 | 52 | private static final TimestampedFileInputSplit SPLIT = new TimestampedFileInputSplit(0, 0, new Path("."), 0, 0, new String[]{}); 53 | private static final String LINE = Strings.repeat('0', 10); 54 | 55 | // Source should wait until all elements reach sink. Otherwise, END_OF_INPUT is sent once all splits are emitted. 56 | // Thus, all subsequent reads in ContinuousFileReaderOperator would be made in CLOSING state in a simple while-true loop (MailboxExecutor.isIdle is always true). 57 | private static OneShotLatch TARGET_COUNT_REACHED_LATCH = new OneShotLatch(); 58 | 59 | public static void main(String[] args) 60 | throws RunnerException { 61 | Options options = new OptionsBuilder() 62 | .verbosity(VerboseMode.NORMAL) 63 | .include(".*" + ContinuousFileReaderOperatorBenchmark.class.getCanonicalName() + ".*") 64 | .build(); 65 | 66 | new Runner(options).run(); 67 | } 68 | 69 | @TearDown(Level.Iteration) 70 | public void tearDown() { 71 | TARGET_COUNT_REACHED_LATCH.reset(); 72 | } 73 | 74 | @Benchmark 75 | public void readFileSplit(FlinkEnvironmentContext context) throws Exception { 76 | StreamExecutionEnvironment env = context.env; 77 | env.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration()); 78 | env 79 | .enableCheckpointing(100) 80 | .setParallelism(1) 81 | .addSource(new MockSourceFunction()) 82 | .transform("fileReader", TypeInformation.of(String.class), 83 | new ContinuousFileReaderOperatorFactory<>(new MockInputFormat())) 84 | .addSink(new LimitedSink()); 85 | 86 | env.execute(); 87 | } 88 | 89 | private static class MockSourceFunction implements SourceFunction { 90 | private volatile boolean isRunning = true; 91 | private int count = 0; 92 | 93 | @Override 94 | public void run(SourceContext ctx) { 95 | while (isRunning && count < SPLITS_PER_INVOCATION) { 96 | count++; 97 | synchronized (ctx.getCheckpointLock()) { 98 | ctx.collect(SPLIT); 99 | } 100 | } 101 | while (isRunning) { 102 | try { 103 | TARGET_COUNT_REACHED_LATCH.await(100, TimeUnit.MILLISECONDS); 104 | return; 105 | } catch (InterruptedException e) { 106 | if (!isRunning) { 107 | Thread.currentThread().interrupt(); 108 | } 109 | } catch (TimeoutException e) { 110 | // continue waiting 111 | } 112 | } 113 | } 114 | 115 | @Override 116 | public void cancel() { 117 | isRunning = false; 118 | } 119 | } 120 | 121 | private static class MockInputFormat extends FileInputFormat { 122 | private transient int count = 0; 123 | 124 | @Override 125 | public boolean reachedEnd() { 126 | return count >= ContinuousFileReaderOperatorBenchmark.LINES_PER_SPLIT; 127 | } 128 | 129 | @Override 130 | public String nextRecord(String s) { 131 | count++; 132 | return LINE; 133 | } 134 | 135 | @Override 136 | public void open(FileInputSplit fileSplit) { 137 | count = 0; 138 | // prevent super from accessing file 139 | } 140 | 141 | @Override 142 | public void configure(Configuration parameters) { 143 | // prevent super from requiring certain settings (input.file.path) 144 | } 145 | } 146 | 147 | private static class LimitedSink implements SinkFunction { 148 | private int count; 149 | 150 | @Override 151 | public void invoke(String value, Context context) { 152 | if (++count == RECORDS_PER_INVOCATION) { 153 | TARGET_COUNT_REACHED_LATCH.trigger(); 154 | } 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/FlinkEnvironmentContext.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.configuration.Configuration; 22 | import org.apache.flink.configuration.NettyShuffleEnvironmentOptions; 23 | import org.apache.flink.runtime.state.memory.MemoryStateBackend; 24 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 25 | 26 | import org.openjdk.jmh.annotations.Setup; 27 | import org.openjdk.jmh.annotations.State; 28 | 29 | import java.io.IOException; 30 | 31 | import static org.openjdk.jmh.annotations.Scope.Thread; 32 | 33 | @State(Thread) 34 | public class FlinkEnvironmentContext { 35 | 36 | public static final int NUM_NETWORK_BUFFERS = 1000; 37 | 38 | public final StreamExecutionEnvironment env = getStreamExecutionEnvironment(); 39 | 40 | protected final int parallelism = 1; 41 | protected final boolean objectReuse = true; 42 | 43 | @Setup 44 | public void setUp() throws IOException { 45 | // set up the execution environment 46 | env.setParallelism(parallelism); 47 | env.getConfig().disableSysoutLogging(); 48 | if (objectReuse) { 49 | env.getConfig().enableObjectReuse(); 50 | } 51 | 52 | env.setStateBackend(new MemoryStateBackend()); 53 | } 54 | 55 | public void execute() throws Exception { 56 | env.execute(); 57 | } 58 | 59 | protected Configuration createConfiguration() { 60 | final Configuration configuration = new Configuration(); 61 | configuration.setInteger(NettyShuffleEnvironmentOptions.NETWORK_NUM_BUFFERS, NUM_NETWORK_BUFFERS); 62 | return configuration; 63 | } 64 | 65 | private StreamExecutionEnvironment getStreamExecutionEnvironment() { 66 | return StreamExecutionEnvironment.createLocalEnvironment(1, createConfiguration()); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/InputBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.LongSource; 22 | import org.apache.flink.benchmark.functions.MultiplyByTwo; 23 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 24 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 25 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 26 | 27 | import org.openjdk.jmh.annotations.Benchmark; 28 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 29 | import org.openjdk.jmh.runner.Runner; 30 | import org.openjdk.jmh.runner.RunnerException; 31 | import org.openjdk.jmh.runner.options.Options; 32 | import org.openjdk.jmh.runner.options.OptionsBuilder; 33 | import org.openjdk.jmh.runner.options.VerboseMode; 34 | 35 | @OperationsPerInvocation(value = InputBenchmark.RECORDS_PER_INVOCATION) 36 | public class InputBenchmark extends BenchmarkBase { 37 | 38 | public static final int RECORDS_PER_INVOCATION = 15_000_000; 39 | private static final long CHECKPOINT_INTERVAL_MS = 100; 40 | 41 | public static void main(String[] args) 42 | throws RunnerException { 43 | Options options = new OptionsBuilder() 44 | .verbosity(VerboseMode.NORMAL) 45 | .include(".*" + InputBenchmark.class.getCanonicalName() + ".*") 46 | .build(); 47 | 48 | new Runner(options).run(); 49 | } 50 | 51 | @Benchmark 52 | public void mapSink(FlinkEnvironmentContext context) throws Exception { 53 | 54 | StreamExecutionEnvironment env = context.env; 55 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 56 | env.setParallelism(1); 57 | 58 | DataStreamSource source = env.addSource(new LongSource(RECORDS_PER_INVOCATION)); 59 | source 60 | .map(new MultiplyByTwo()) 61 | .addSink(new DiscardingSink<>()); 62 | 63 | env.execute(); 64 | } 65 | 66 | @Benchmark 67 | public void mapRebalanceMapSink(FlinkEnvironmentContext context) throws Exception { 68 | 69 | StreamExecutionEnvironment env = context.env; 70 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 71 | env.setParallelism(1); 72 | 73 | DataStreamSource source = env.addSource(new LongSource(RECORDS_PER_INVOCATION)); 74 | source 75 | .map(new MultiplyByTwo()) 76 | .rebalance() 77 | .map((Long in) -> in) 78 | .addSink(new DiscardingSink<>()); 79 | 80 | env.execute(); 81 | } 82 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/KeyByBenchmarks.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange; 23 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 24 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 25 | 26 | import org.openjdk.jmh.annotations.Benchmark; 27 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 28 | import org.openjdk.jmh.runner.Runner; 29 | import org.openjdk.jmh.runner.RunnerException; 30 | import org.openjdk.jmh.runner.options.Options; 31 | import org.openjdk.jmh.runner.options.OptionsBuilder; 32 | import org.openjdk.jmh.runner.options.VerboseMode; 33 | 34 | /** 35 | * Benchmark for keyBy() on tuples and arrays. 36 | */ 37 | public class KeyByBenchmarks extends BenchmarkBase { 38 | 39 | private static final int TUPLE_RECORDS_PER_INVOCATION = 15_000_000; 40 | private static final int ARRAY_RECORDS_PER_INVOCATION = 7_000_000; 41 | 42 | public static void main(String[] args) 43 | throws RunnerException { 44 | Options options = new OptionsBuilder() 45 | .verbosity(VerboseMode.NORMAL) 46 | .include(".*" + KeyByBenchmarks.class.getCanonicalName() + ".*") 47 | .build(); 48 | 49 | new Runner(options).run(); 50 | } 51 | 52 | @Benchmark 53 | @OperationsPerInvocation(value = KeyByBenchmarks.TUPLE_RECORDS_PER_INVOCATION) 54 | public void tupleKeyBy(FlinkEnvironmentContext context) throws Exception { 55 | StreamExecutionEnvironment env = context.env; 56 | env.setParallelism(4); 57 | 58 | env.addSource(new IncreasingTupleSource(TUPLE_RECORDS_PER_INVOCATION, 10)) 59 | .keyBy(0) 60 | .addSink(new DiscardingSink<>()); 61 | 62 | env.execute(); 63 | } 64 | 65 | @Benchmark 66 | @OperationsPerInvocation(value = KeyByBenchmarks.ARRAY_RECORDS_PER_INVOCATION) 67 | public void arrayKeyBy(FlinkEnvironmentContext context) throws Exception { 68 | StreamExecutionEnvironment env = context.env; 69 | env.setParallelism(4); 70 | 71 | env.addSource(new IncreasingArraySource(ARRAY_RECORDS_PER_INVOCATION, 10)) 72 | .keyBy(0) 73 | .addSink(new DiscardingSink<>()); 74 | 75 | env.execute(); 76 | } 77 | 78 | private static class IncreasingTupleSource extends BaseSourceWithKeyRange> { 79 | private static final long serialVersionUID = 2941333602938145526L; 80 | 81 | IncreasingTupleSource(int numEvents, int numKeys) { 82 | super(numEvents, numKeys); 83 | } 84 | 85 | @Override 86 | protected Tuple2 getElement(int keyId) { 87 | return new Tuple2<>(keyId, 1); 88 | } 89 | 90 | } 91 | 92 | private static class IncreasingArraySource extends BaseSourceWithKeyRange { 93 | private static final long serialVersionUID = -7883758559005221998L; 94 | 95 | IncreasingArraySource(int numEvents, int numKeys) { 96 | super(numEvents, numKeys); 97 | } 98 | 99 | @Override 100 | protected int[] getElement(int keyId) { 101 | return new int[] {keyId, 1}; 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/MemoryStateBackendBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.IntLongApplications; 22 | import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; 23 | import org.apache.flink.streaming.api.windowing.time.Time; 24 | 25 | import org.openjdk.jmh.annotations.Benchmark; 26 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 27 | import org.openjdk.jmh.annotations.Param; 28 | import org.openjdk.jmh.annotations.Setup; 29 | import org.openjdk.jmh.annotations.State; 30 | import org.openjdk.jmh.annotations.TearDown; 31 | import org.openjdk.jmh.runner.Runner; 32 | import org.openjdk.jmh.runner.RunnerException; 33 | import org.openjdk.jmh.runner.options.Options; 34 | import org.openjdk.jmh.runner.options.OptionsBuilder; 35 | import org.openjdk.jmh.runner.options.VerboseMode; 36 | 37 | import java.io.IOException; 38 | 39 | import static org.openjdk.jmh.annotations.Scope.Thread; 40 | 41 | @OperationsPerInvocation(value = MemoryStateBackendBenchmark.RECORDS_PER_INVOCATION) 42 | public class MemoryStateBackendBenchmark extends StateBackendBenchmarkBase { 43 | public static final int RECORDS_PER_INVOCATION = 7_000_000; 44 | 45 | public static void main(String[] args) 46 | throws RunnerException { 47 | Options options = new OptionsBuilder() 48 | .verbosity(VerboseMode.NORMAL) 49 | .include(".*" + MemoryStateBackendBenchmark.class.getCanonicalName() + ".*") 50 | .build(); 51 | 52 | new Runner(options).run(); 53 | } 54 | 55 | @Benchmark 56 | public void stateBackends(MemoryStateBackendContext context) throws Exception { 57 | IntLongApplications.reduceWithWindow(context.source, TumblingEventTimeWindows.of(Time.seconds(10_000))); 58 | context.execute(); 59 | } 60 | 61 | @State(Thread) 62 | public static class MemoryStateBackendContext extends StateBackendContext { 63 | @Param({"MEMORY", "FS", "FS_ASYNC"}) 64 | public StateBackend stateBackend = StateBackend.MEMORY; 65 | 66 | @Setup 67 | public void setUp() throws IOException { 68 | super.setUp(stateBackend, RECORDS_PER_INVOCATION); 69 | } 70 | 71 | @TearDown 72 | public void tearDown() throws IOException { 73 | super.tearDown(); 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/RemoteChannelThroughputBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.api.common.functions.MapFunction; 22 | import org.apache.flink.benchmark.functions.LongSource; 23 | import org.apache.flink.runtime.minicluster.MiniCluster; 24 | import org.apache.flink.runtime.minicluster.MiniClusterConfiguration; 25 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 26 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 27 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 28 | import org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator; 29 | 30 | import org.openjdk.jmh.annotations.Benchmark; 31 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 32 | import org.openjdk.jmh.annotations.Setup; 33 | import org.openjdk.jmh.annotations.TearDown; 34 | import org.openjdk.jmh.runner.Runner; 35 | import org.openjdk.jmh.runner.RunnerException; 36 | import org.openjdk.jmh.runner.options.Options; 37 | import org.openjdk.jmh.runner.options.OptionsBuilder; 38 | import org.openjdk.jmh.runner.options.VerboseMode; 39 | 40 | @OperationsPerInvocation(value = RemoteChannelThroughputBenchmark.RECORDS_PER_INVOCATION) 41 | public class RemoteChannelThroughputBenchmark extends BenchmarkBase { 42 | public static final int NUM_VERTICES = 3; 43 | public static final int PARALLELISM = 4; 44 | public static final int RECORDS_PER_SUBTASK = 10_000_000; 45 | public static final int RECORDS_PER_INVOCATION = RECORDS_PER_SUBTASK * PARALLELISM; 46 | 47 | private static final long CHECKPOINT_INTERVAL_MS = 100; 48 | 49 | private MiniCluster miniCluster; 50 | 51 | public static void main(String[] args) 52 | throws RunnerException { 53 | Options options = new OptionsBuilder() 54 | .verbosity(VerboseMode.NORMAL) 55 | .include(RemoteChannelThroughputBenchmark.class.getCanonicalName()) 56 | .build(); 57 | 58 | new Runner(options).run(); 59 | } 60 | 61 | @Setup 62 | public void setUp() throws Exception { 63 | MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() 64 | .setNumTaskManagers(NUM_VERTICES * PARALLELISM) 65 | .setNumSlotsPerTaskManager(1) 66 | .build(); 67 | miniCluster = new MiniCluster(miniClusterConfiguration); 68 | miniCluster.start(); 69 | } 70 | 71 | @TearDown 72 | public void tearDown() throws Exception { 73 | if (miniCluster != null) { 74 | miniCluster.close(); 75 | } 76 | } 77 | 78 | @Benchmark 79 | public void remoteRebalance(FlinkEnvironmentContext context) throws Exception { 80 | StreamExecutionEnvironment env = context.env; 81 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 82 | env.setParallelism(PARALLELISM); 83 | 84 | DataStreamSource source = env.addSource(new LongSource(RECORDS_PER_SUBTASK)); 85 | source 86 | .slotSharingGroup("source").rebalance() 87 | .map((MapFunction) value -> value).slotSharingGroup("map").rebalance() 88 | .addSink(new DiscardingSink<>()).slotSharingGroup("sink"); 89 | 90 | miniCluster.executeJobBlocking(StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph())); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/RocksStateBackendBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.IntLongApplications; 22 | import org.apache.flink.configuration.Configuration; 23 | import org.apache.flink.configuration.MemorySize; 24 | import org.apache.flink.contrib.streaming.state.RocksDBOptions; 25 | import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; 26 | import org.apache.flink.streaming.api.windowing.time.Time; 27 | 28 | import org.openjdk.jmh.annotations.Benchmark; 29 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 30 | import org.openjdk.jmh.annotations.Param; 31 | import org.openjdk.jmh.annotations.Setup; 32 | import org.openjdk.jmh.annotations.State; 33 | import org.openjdk.jmh.annotations.TearDown; 34 | import org.openjdk.jmh.runner.Runner; 35 | import org.openjdk.jmh.runner.RunnerException; 36 | import org.openjdk.jmh.runner.options.Options; 37 | import org.openjdk.jmh.runner.options.OptionsBuilder; 38 | import org.openjdk.jmh.runner.options.VerboseMode; 39 | 40 | import java.io.IOException; 41 | 42 | import static org.openjdk.jmh.annotations.Scope.Thread; 43 | 44 | @OperationsPerInvocation(value = RocksStateBackendBenchmark.RECORDS_PER_INVOCATION) 45 | public class RocksStateBackendBenchmark extends StateBackendBenchmarkBase { 46 | public static final int RECORDS_PER_INVOCATION = 2_000_000; 47 | 48 | public static void main(String[] args) 49 | throws RunnerException { 50 | Options options = new OptionsBuilder() 51 | .verbosity(VerboseMode.NORMAL) 52 | .include(".*" + RocksStateBackendBenchmark.class.getCanonicalName() + ".*") 53 | .build(); 54 | 55 | new Runner(options).run(); 56 | } 57 | 58 | @Benchmark 59 | public void stateBackends(RocksStateBackendContext context) throws Exception { 60 | IntLongApplications.reduceWithWindow(context.source, TumblingEventTimeWindows.of(Time.seconds(10_000))); 61 | context.execute(); 62 | } 63 | 64 | @State(Thread) 65 | public static class RocksStateBackendContext extends StateBackendContext { 66 | @Param({"ROCKS", "ROCKS_INC"}) 67 | public StateBackend stateBackend = StateBackend.MEMORY; 68 | 69 | @Setup 70 | public void setUp() throws IOException { 71 | super.setUp(stateBackend, RECORDS_PER_INVOCATION); 72 | } 73 | 74 | @TearDown 75 | public void tearDown() throws IOException { 76 | super.tearDown(); 77 | } 78 | 79 | @Override 80 | protected Configuration createConfiguration() { 81 | Configuration configuration = super.createConfiguration(); 82 | // explicit set the managed memory as 322122552 bytes, which is the default managed memory of 1GB TM with 1 slot. 83 | configuration.set(RocksDBOptions.FIX_PER_SLOT_MEMORY_SIZE, MemorySize.parse("322122552b")); 84 | return configuration; 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/SerializationFrameworkMiniBenchmarks.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.api.common.ExecutionConfig; 22 | import org.apache.flink.api.common.typeinfo.TypeInformation; 23 | import org.apache.flink.api.common.typeinfo.Types; 24 | import org.apache.flink.api.java.tuple.Tuple2; 25 | import org.apache.flink.api.java.tuple.Tuple8; 26 | import org.apache.flink.api.java.typeutils.ResultTypeQueryable; 27 | import org.apache.flink.benchmark.full.StringSerializationBenchmark; 28 | import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange; 29 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 30 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 31 | import org.apache.flink.types.Row; 32 | 33 | import org.openjdk.jmh.annotations.Benchmark; 34 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 35 | import org.openjdk.jmh.runner.Runner; 36 | import org.openjdk.jmh.runner.RunnerException; 37 | import org.openjdk.jmh.runner.options.Options; 38 | import org.openjdk.jmh.runner.options.OptionsBuilder; 39 | import org.openjdk.jmh.runner.options.VerboseMode; 40 | 41 | import java.util.Arrays; 42 | import java.util.Random; 43 | import java.util.concurrent.ThreadLocalRandom; 44 | 45 | /** 46 | * Benchmark for serializing POJOs and Tuples with different serialization frameworks. 47 | */ 48 | public class SerializationFrameworkMiniBenchmarks extends BenchmarkBase { 49 | 50 | protected static final int RECORDS_PER_INVOCATION = 300_000; 51 | 52 | public static void main(String[] args) throws RunnerException { 53 | Options options = new OptionsBuilder() 54 | .verbosity(VerboseMode.NORMAL) 55 | .include(".*" + SerializationFrameworkMiniBenchmarks.class.getCanonicalName() + ".*") 56 | .build(); 57 | 58 | new Runner(options).run(); 59 | } 60 | 61 | @Benchmark 62 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 63 | public void serializerPojo(FlinkEnvironmentContext context) throws Exception { 64 | StreamExecutionEnvironment env = context.env; 65 | env.setParallelism(4); 66 | ExecutionConfig executionConfig = env.getConfig(); 67 | executionConfig.registerPojoType(MyPojo.class); 68 | executionConfig.registerPojoType(MyOperation.class); 69 | 70 | env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10)) 71 | .rebalance() 72 | .addSink(new DiscardingSink<>()); 73 | 74 | env.execute(); 75 | } 76 | 77 | @Benchmark 78 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 79 | public void serializerHeavyString(FlinkEnvironmentContext context) throws Exception { 80 | StreamExecutionEnvironment env = context.env; 81 | env.setParallelism(1); 82 | ExecutionConfig executionConfig = env.getConfig(); 83 | executionConfig.registerPojoType(MyPojo.class); 84 | executionConfig.registerPojoType(MyOperation.class); 85 | 86 | env.addSource(new LongStringSource(RECORDS_PER_INVOCATION, 12)) 87 | .rebalance() 88 | .addSink(new DiscardingSink<>()); 89 | 90 | env.execute(); 91 | } 92 | 93 | @Benchmark 94 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 95 | public void serializerTuple(FlinkEnvironmentContext context) throws Exception { 96 | StreamExecutionEnvironment env = context.env; 97 | env.setParallelism(4); 98 | 99 | env.addSource(new TupleSource(RECORDS_PER_INVOCATION, 10)) 100 | .rebalance() 101 | .addSink(new DiscardingSink<>()); 102 | 103 | env.execute(); 104 | } 105 | 106 | @Benchmark 107 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 108 | public void serializerKryo(FlinkEnvironmentContext context) throws Exception { 109 | StreamExecutionEnvironment env = context.env; 110 | env.setParallelism(4); 111 | ExecutionConfig executionConfig = env.getConfig(); 112 | executionConfig.enableForceKryo(); 113 | executionConfig.registerKryoType(MyPojo.class); 114 | executionConfig.registerKryoType(MyOperation.class); 115 | 116 | env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10)) 117 | .rebalance() 118 | .addSink(new DiscardingSink<>()); 119 | 120 | env.execute(); 121 | } 122 | 123 | @Benchmark 124 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 125 | public void serializerAvro(FlinkEnvironmentContext context) throws Exception { 126 | StreamExecutionEnvironment env = context.env; 127 | env.setParallelism(4); 128 | 129 | env.addSource(new AvroPojoSource(RECORDS_PER_INVOCATION, 10)) 130 | .rebalance() 131 | .addSink(new DiscardingSink<>()); 132 | 133 | env.execute(); 134 | } 135 | 136 | @Benchmark 137 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 138 | public void serializerRow(FlinkEnvironmentContext context) throws Exception { 139 | StreamExecutionEnvironment env = context.env; 140 | env.setParallelism(4); 141 | 142 | env.addSource(new RowSource(RECORDS_PER_INVOCATION, 10)) 143 | .rebalance() 144 | .addSink(new DiscardingSink<>()); 145 | 146 | env.execute(); 147 | } 148 | 149 | /** 150 | * Source emitting a long String. 151 | */ 152 | public static class LongStringSource extends BaseSourceWithKeyRange { 153 | private static final long serialVersionUID = 3746240885982877398L; 154 | private String[] templates; 155 | 156 | public LongStringSource(int numEvents, int numKeys) { 157 | super(numEvents, numKeys); 158 | } 159 | 160 | @Override 161 | protected void init() { 162 | super.init(); 163 | templates = new String[] { 164 | makeString(StringSerializationBenchmark.asciiChars, 1024), 165 | makeString(StringSerializationBenchmark.russianChars, 1024), 166 | makeString(StringSerializationBenchmark.chineseChars, 1024) 167 | }; 168 | } 169 | 170 | private String makeString(char[] symbols, int length) { 171 | char[] buffer = new char[length]; 172 | Random random = ThreadLocalRandom.current(); 173 | Arrays.fill(buffer, symbols[random.nextInt(symbols.length)]); 174 | return new String(buffer); 175 | } 176 | 177 | @Override 178 | protected String getElement(int keyId) { 179 | return templates[keyId % templates.length]; 180 | } 181 | } 182 | 183 | /** 184 | * Source emitting a simple {@link MyPojo POJO}. 185 | */ 186 | public static class PojoSource extends BaseSourceWithKeyRange { 187 | private static final long serialVersionUID = 2941333602938145526L; 188 | 189 | private transient MyPojo template; 190 | 191 | public PojoSource(int numEvents, int numKeys) { 192 | super(numEvents, numKeys); 193 | } 194 | 195 | @Override 196 | protected void init() { 197 | super.init(); 198 | template = new MyPojo( 199 | 0, 200 | "myName", 201 | new String[] {"op1", "op2", "op3", "op4"}, 202 | new MyOperation[] { 203 | new MyOperation(1, "op1"), 204 | new MyOperation(2, "op2"), 205 | new MyOperation(3, "op3")}, 206 | 1, 207 | 2, 208 | 3, 209 | "null"); 210 | } 211 | 212 | @Override 213 | protected MyPojo getElement(int keyId) { 214 | template.setId(keyId); 215 | return template; 216 | } 217 | } 218 | 219 | /** 220 | * Source emitting a {@link org.apache.flink.benchmark.avro.MyPojo POJO} generated by an Avro schema. 221 | */ 222 | public static class AvroPojoSource extends BaseSourceWithKeyRange { 223 | private static final long serialVersionUID = 2941333602938145526L; 224 | 225 | private transient org.apache.flink.benchmark.avro.MyPojo template; 226 | 227 | public AvroPojoSource(int numEvents, int numKeys) { 228 | super(numEvents, numKeys); 229 | } 230 | 231 | @Override 232 | protected void init() { 233 | super.init(); 234 | template = new org.apache.flink.benchmark.avro.MyPojo( 235 | 0, 236 | "myName", 237 | Arrays.asList("op1", "op2", "op3", "op4"), 238 | Arrays.asList( 239 | new org.apache.flink.benchmark.avro.MyOperation(1, "op1"), 240 | new org.apache.flink.benchmark.avro.MyOperation(2, "op2"), 241 | new org.apache.flink.benchmark.avro.MyOperation(3, "op3")), 242 | 1, 243 | 2, 244 | 3, 245 | "null"); 246 | } 247 | 248 | @Override 249 | protected org.apache.flink.benchmark.avro.MyPojo getElement(int keyId) { 250 | template.setId(keyId); 251 | return template; 252 | } 253 | } 254 | 255 | /** 256 | * Source emitting a Tuple based on {@link MyPojo}. 257 | */ 258 | public static class TupleSource extends BaseSourceWithKeyRange[], Integer, Integer, Integer, Object>> { 259 | private static final long serialVersionUID = 2941333602938145526L; 260 | 261 | private transient Tuple8 template; 262 | 263 | public TupleSource(int numEvents, int numKeys) { 264 | super(numEvents, numKeys); 265 | } 266 | 267 | @SuppressWarnings("unchecked") 268 | @Override 269 | protected void init() { 270 | super.init(); 271 | template = MyPojo.createTuple( 272 | 0, 273 | "myName", 274 | new String[] {"op1", "op2", "op3", "op4"}, 275 | new Tuple2[] { 276 | MyOperation.createTuple(1, "op1"), 277 | MyOperation.createTuple(2, "op2"), 278 | MyOperation.createTuple(3, "op3")}, 279 | 1, 280 | 2, 281 | 3, 282 | "null"); 283 | } 284 | 285 | @Override 286 | protected Tuple8[], Integer, Integer, Integer, Object> getElement(int keyId) { 287 | template.setField(keyId, 0); 288 | return template; 289 | } 290 | } 291 | 292 | /** 293 | * Source emitting a {@link Row} based on {@link MyPojo}. 294 | */ 295 | public static class RowSource extends BaseSourceWithKeyRange implements ResultTypeQueryable { 296 | private static final long serialVersionUID = 2941333602938145526L; 297 | 298 | private transient Row template; 299 | 300 | public RowSource(int numEvents, int numKeys) { 301 | super(numEvents, numKeys); 302 | } 303 | 304 | @SuppressWarnings("unchecked") 305 | @Override 306 | protected void init() { 307 | super.init(); 308 | template = MyPojo.createRow( 309 | 0, 310 | "myName", 311 | new String[] {"op1", "op2", "op3", "op4"}, 312 | new Row[] { 313 | MyOperation.createRow(1, "op1"), 314 | MyOperation.createRow(2, "op2"), 315 | MyOperation.createRow(3, "op3")}, 316 | 1, 317 | 2, 318 | 3, 319 | "null"); 320 | } 321 | 322 | @Override 323 | protected Row getElement(int keyId) { 324 | template.setField(0, keyId); 325 | return template; 326 | } 327 | 328 | @Override 329 | public TypeInformation getProducedType() { 330 | return MyPojo.getProducedRowType(); 331 | } 332 | } 333 | 334 | /** 335 | * Not so simple POJO. 336 | */ 337 | @SuppressWarnings({"WeakerAccess", "unused"}) 338 | public static class MyPojo { 339 | public int id; 340 | private String name; 341 | private String[] operationNames; 342 | private MyOperation[] operations; 343 | private int otherId1; 344 | private int otherId2; 345 | private int otherId3; 346 | private Object someObject; 347 | 348 | public MyPojo() { 349 | } 350 | 351 | public MyPojo( 352 | int id, 353 | String name, 354 | String[] operationNames, 355 | MyOperation[] operations, 356 | int otherId1, 357 | int otherId2, 358 | int otherId3, 359 | Object someObject) { 360 | this.id = id; 361 | this.name = name; 362 | this.operationNames = operationNames; 363 | this.operations = operations; 364 | this.otherId1 = otherId1; 365 | this.otherId2 = otherId2; 366 | this.otherId3 = otherId3; 367 | this.someObject = someObject; 368 | } 369 | 370 | public int getId() { 371 | return id; 372 | } 373 | 374 | public void setId(int id) { 375 | this.id = id; 376 | } 377 | 378 | public String getName() { 379 | return name; 380 | } 381 | 382 | public void setName(String name) { 383 | this.name = name; 384 | } 385 | 386 | public String[] getOperationNames() { 387 | return operationNames; 388 | } 389 | 390 | public void setOperationNames(String[] operationNames) { 391 | this.operationNames = operationNames; 392 | } 393 | 394 | public MyOperation[] getOperations() { 395 | return operations; 396 | } 397 | 398 | public void setOperations( 399 | MyOperation[] operations) { 400 | this.operations = operations; 401 | } 402 | 403 | public int getOtherId1() { 404 | return otherId1; 405 | } 406 | 407 | public void setOtherId1(int otherId1) { 408 | this.otherId1 = otherId1; 409 | } 410 | 411 | public int getOtherId2() { 412 | return otherId2; 413 | } 414 | 415 | public void setOtherId2(int otherId2) { 416 | this.otherId2 = otherId2; 417 | } 418 | 419 | public int getOtherId3() { 420 | return otherId3; 421 | } 422 | 423 | public void setOtherId3(int otherId3) { 424 | this.otherId3 = otherId3; 425 | } 426 | 427 | public Object getSomeObject() { 428 | return someObject; 429 | } 430 | 431 | public void setSomeObject(Object someObject) { 432 | this.someObject = someObject; 433 | } 434 | 435 | public static Tuple8[], Integer, Integer, Integer, Object> createTuple( 436 | int id, 437 | String name, 438 | String[] operationNames, 439 | Tuple2[] operations, 440 | int otherId1, 441 | int otherId2, 442 | int otherId3, 443 | Object someObject) { 444 | return Tuple8.of(id, name, operationNames, operations, otherId1, otherId2, otherId3, someObject); 445 | } 446 | 447 | public static Row createRow( 448 | int id, 449 | String name, 450 | String[] operationNames, 451 | Row[] operations, 452 | int otherId1, 453 | int otherId2, 454 | int otherId3, 455 | Object someObject) { 456 | return Row.of(id, name, operationNames, operations, otherId1, otherId2, otherId3, someObject); 457 | } 458 | 459 | public static TypeInformation getProducedRowType() { 460 | return Types.ROW( 461 | Types.INT, 462 | Types.STRING, 463 | Types.OBJECT_ARRAY(Types.STRING), 464 | Types.OBJECT_ARRAY(Types.ROW(Types.INT, Types.STRING)), 465 | Types.INT, 466 | Types.INT, 467 | Types.INT, 468 | Types.GENERIC(Object.class) 469 | ); 470 | } 471 | } 472 | 473 | /** 474 | * Another POJO. 475 | */ 476 | @SuppressWarnings({"WeakerAccess", "unused"}) 477 | public static class MyOperation { 478 | int id; 479 | protected String name; 480 | 481 | public MyOperation() { 482 | } 483 | 484 | public MyOperation(int id, String name) { 485 | this.id = id; 486 | this.name = name; 487 | } 488 | 489 | public int getId() { 490 | return id; 491 | } 492 | 493 | public void setId(int id) { 494 | this.id = id; 495 | } 496 | 497 | public String getName() { 498 | return name; 499 | } 500 | 501 | public void setName(String name) { 502 | this.name = name; 503 | } 504 | 505 | public static Tuple2 createTuple(int id, String name) { 506 | return Tuple2.of(id, name); 507 | } 508 | 509 | public static Row createRow(int id, String name) { 510 | return Row.of(id, name); 511 | } 512 | } 513 | } 514 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/StateBackendBenchmarkBase.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.IntegerLongSource; 22 | import org.apache.flink.contrib.streaming.state.RocksDBStateBackend; 23 | import org.apache.flink.runtime.state.AbstractStateBackend; 24 | import org.apache.flink.runtime.state.filesystem.FsStateBackend; 25 | import org.apache.flink.runtime.state.memory.MemoryStateBackend; 26 | import org.apache.flink.streaming.api.TimeCharacteristic; 27 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 28 | import org.apache.flink.util.FileUtils; 29 | 30 | import java.io.File; 31 | import java.io.IOException; 32 | import java.nio.file.Files; 33 | 34 | public class StateBackendBenchmarkBase extends BenchmarkBase { 35 | public enum StateBackend { 36 | MEMORY, 37 | FS, 38 | FS_ASYNC, 39 | ROCKS, 40 | ROCKS_INC 41 | } 42 | 43 | public static class StateBackendContext extends FlinkEnvironmentContext { 44 | 45 | public final File checkpointDir; 46 | 47 | public final int numberOfElements = 1000; 48 | 49 | public DataStreamSource source; 50 | 51 | public StateBackendContext() { 52 | try { 53 | checkpointDir = Files.createTempDirectory("bench-").toFile(); 54 | } catch (IOException e) { 55 | throw new RuntimeException(e); 56 | } 57 | } 58 | 59 | public void setUp(StateBackend stateBackend, long recordsPerInvocation) throws IOException { 60 | super.setUp(); 61 | 62 | final AbstractStateBackend backend; 63 | String checkpointDataUri = "file://" + checkpointDir.getAbsolutePath(); 64 | switch (stateBackend) { 65 | case MEMORY: 66 | backend = new MemoryStateBackend(); 67 | break; 68 | case FS: 69 | backend = new FsStateBackend(checkpointDataUri, false); 70 | break; 71 | case FS_ASYNC: 72 | backend = new FsStateBackend(checkpointDataUri, true); 73 | break; 74 | case ROCKS: 75 | backend = new RocksDBStateBackend(checkpointDataUri, false); 76 | break; 77 | case ROCKS_INC: 78 | backend = new RocksDBStateBackend(checkpointDataUri, true); 79 | break; 80 | default: 81 | throw new UnsupportedOperationException("Unknown state backend: " + stateBackend); 82 | } 83 | 84 | env.setStateBackend(backend); 85 | 86 | env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); 87 | source = env.addSource(new IntegerLongSource(numberOfElements, recordsPerInvocation)); 88 | } 89 | 90 | public void tearDown() throws IOException { 91 | FileUtils.deleteDirectory(checkpointDir); 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/TwoInputBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.api.common.typeinfo.TypeInformation; 22 | import org.apache.flink.benchmark.functions.LongSource; 23 | import org.apache.flink.benchmark.functions.QueuingLongSource; 24 | import org.apache.flink.benchmark.operators.MultiplyByTwoCoStreamMap; 25 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 26 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 27 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 28 | 29 | import org.openjdk.jmh.annotations.Benchmark; 30 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 31 | import org.openjdk.jmh.runner.Runner; 32 | import org.openjdk.jmh.runner.RunnerException; 33 | import org.openjdk.jmh.runner.options.Options; 34 | import org.openjdk.jmh.runner.options.OptionsBuilder; 35 | import org.openjdk.jmh.runner.options.VerboseMode; 36 | 37 | public class TwoInputBenchmark extends BenchmarkBase { 38 | 39 | public static final int RECORDS_PER_INVOCATION = 25_000_000; 40 | 41 | public static final int ONE_IDLE_RECORDS_PER_INVOCATION = 15_000_000; 42 | 43 | private static final long CHECKPOINT_INTERVAL_MS = 100; 44 | 45 | public static void main(String[] args) 46 | throws RunnerException { 47 | Options options = new OptionsBuilder() 48 | .verbosity(VerboseMode.NORMAL) 49 | .include(".*" + TwoInputBenchmark.class.getCanonicalName() + ".*") 50 | .build(); 51 | 52 | new Runner(options).run(); 53 | } 54 | 55 | @Benchmark 56 | @OperationsPerInvocation(value = TwoInputBenchmark.RECORDS_PER_INVOCATION) 57 | public void twoInputMapSink(FlinkEnvironmentContext context) throws Exception { 58 | 59 | StreamExecutionEnvironment env = context.env; 60 | 61 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 62 | env.setParallelism(1); 63 | 64 | // Setting buffer timeout to 1 is an attempt to improve twoInputMapSink benchmark stability. 65 | // Without 1ms buffer timeout, some JVM forks are much slower then others, making results 66 | // unstable and unreliable. 67 | env.setBufferTimeout(1); 68 | 69 | long numRecordsPerInput = RECORDS_PER_INVOCATION / 2; 70 | DataStreamSource source1 = env.addSource(new LongSource(numRecordsPerInput)); 71 | DataStreamSource source2 = env.addSource(new LongSource(numRecordsPerInput)); 72 | 73 | source1 74 | .connect(source2) 75 | .transform("custom operator", TypeInformation.of(Long.class), new MultiplyByTwoCoStreamMap()) 76 | .addSink(new DiscardingSink<>()); 77 | 78 | env.execute(); 79 | } 80 | 81 | @Benchmark 82 | @OperationsPerInvocation(value = TwoInputBenchmark.ONE_IDLE_RECORDS_PER_INVOCATION) 83 | public void twoInputOneIdleMapSink(FlinkEnvironmentContext context) throws Exception { 84 | 85 | StreamExecutionEnvironment env = context.env; 86 | env.enableCheckpointing(CHECKPOINT_INTERVAL_MS); 87 | env.setParallelism(1); 88 | 89 | QueuingLongSource.reset(); 90 | DataStreamSource source1 = env.addSource(new QueuingLongSource(1, ONE_IDLE_RECORDS_PER_INVOCATION - 1)); 91 | DataStreamSource source2 = env.addSource(new QueuingLongSource(2, 1)); 92 | 93 | source1 94 | .connect(source2) 95 | .transform("custom operator", TypeInformation.of(Long.class), new MultiplyByTwoCoStreamMap()) 96 | .addSink(new DiscardingSink<>()); 97 | 98 | env.execute(); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/WindowBenchmarks.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.benchmark.functions.IntLongApplications; 22 | import org.apache.flink.benchmark.functions.IntegerLongSource; 23 | import org.apache.flink.streaming.api.TimeCharacteristic; 24 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 25 | import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows; 26 | import org.apache.flink.streaming.api.windowing.assigners.GlobalWindows; 27 | import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows; 28 | import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows; 29 | import org.apache.flink.streaming.api.windowing.time.Time; 30 | 31 | import org.openjdk.jmh.annotations.Benchmark; 32 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 33 | import org.openjdk.jmh.annotations.Setup; 34 | import org.openjdk.jmh.runner.Runner; 35 | import org.openjdk.jmh.runner.RunnerException; 36 | import org.openjdk.jmh.runner.options.Options; 37 | import org.openjdk.jmh.runner.options.OptionsBuilder; 38 | import org.openjdk.jmh.runner.options.VerboseMode; 39 | 40 | import java.io.IOException; 41 | 42 | @OperationsPerInvocation(value = WindowBenchmarks.RECORDS_PER_INVOCATION) 43 | public class WindowBenchmarks extends BenchmarkBase { 44 | 45 | public static final int RECORDS_PER_INVOCATION = 7_000_000; 46 | 47 | public static void main(String[] args) 48 | throws RunnerException { 49 | Options options = new OptionsBuilder() 50 | .verbosity(VerboseMode.NORMAL) 51 | .include(".*" + WindowBenchmarks.class.getCanonicalName() + ".*") 52 | .build(); 53 | 54 | new Runner(options).run(); 55 | } 56 | 57 | @Benchmark 58 | public void globalWindow(TimeWindowContext context) throws Exception { 59 | IntLongApplications.reduceWithWindow(context.source, GlobalWindows.create()); 60 | context.execute(); 61 | } 62 | 63 | @Benchmark 64 | public void tumblingWindow(TimeWindowContext context) throws Exception { 65 | IntLongApplications.reduceWithWindow(context.source, TumblingEventTimeWindows.of(Time.seconds(10_000))); 66 | context.execute(); 67 | } 68 | 69 | @Benchmark 70 | public void slidingWindow(TimeWindowContext context) throws Exception { 71 | IntLongApplications.reduceWithWindow(context.source, SlidingEventTimeWindows.of(Time.seconds(10_000), Time.seconds(1000))); 72 | context.execute(); 73 | } 74 | 75 | @Benchmark 76 | public void sessionWindow(TimeWindowContext context) throws Exception { 77 | IntLongApplications.reduceWithWindow(context.source, EventTimeSessionWindows.withGap(Time.seconds(500))); 78 | context.execute(); 79 | } 80 | 81 | public static class TimeWindowContext extends FlinkEnvironmentContext { 82 | public final int numberOfElements = 1000; 83 | 84 | public DataStreamSource source; 85 | 86 | @Setup 87 | @Override 88 | public void setUp() throws IOException { 89 | super.setUp(); 90 | 91 | env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); 92 | source = env.addSource(new IntegerLongSource(numberOfElements, RECORDS_PER_INVOCATION)); 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/full/PojoSerializationBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.full; 20 | import org.apache.flink.api.common.ExecutionConfig; 21 | import org.apache.flink.api.common.typeinfo.TypeInformation; 22 | import org.apache.flink.api.common.typeutils.TypeSerializer; 23 | import org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer; 24 | import org.apache.flink.benchmark.BenchmarkBase; 25 | import org.apache.flink.benchmark.SerializationFrameworkMiniBenchmarks; 26 | import org.apache.flink.core.memory.DataInputView; 27 | import org.apache.flink.core.memory.DataInputViewStreamWrapper; 28 | import org.apache.flink.core.memory.DataOutputView; 29 | import org.apache.flink.core.memory.DataOutputViewStreamWrapper; 30 | import org.apache.flink.formats.avro.typeutils.AvroSerializer; 31 | import org.openjdk.jmh.runner.Runner; 32 | import org.openjdk.jmh.runner.RunnerException; 33 | import org.openjdk.jmh.runner.options.Options; 34 | import org.openjdk.jmh.runner.options.OptionsBuilder; 35 | import org.openjdk.jmh.runner.options.VerboseMode; 36 | import org.openjdk.jmh.annotations.*; 37 | 38 | import java.io.ByteArrayInputStream; 39 | import java.io.ByteArrayOutputStream; 40 | import java.io.IOException; 41 | import java.util.Arrays; 42 | import java.util.concurrent.TimeUnit; 43 | 44 | @State(Scope.Benchmark) 45 | @BenchmarkMode({Mode.Throughput}) 46 | @OutputTimeUnit(TimeUnit.MILLISECONDS) 47 | public class PojoSerializationBenchmark extends BenchmarkBase { 48 | 49 | SerializationFrameworkMiniBenchmarks.MyPojo pojo; 50 | org.apache.flink.benchmark.avro.MyPojo avroPojo; 51 | 52 | ExecutionConfig config = new ExecutionConfig(); 53 | TypeSerializer pojoSerializer = 54 | TypeInformation.of(SerializationFrameworkMiniBenchmarks.MyPojo.class).createSerializer(config); 55 | TypeSerializer kryoSerializer = 56 | new KryoSerializer<>(SerializationFrameworkMiniBenchmarks.MyPojo.class, config); 57 | TypeSerializer avroSerializer = 58 | new AvroSerializer<>(org.apache.flink.benchmark.avro.MyPojo.class); 59 | 60 | ByteArrayInputStream pojoBuffer; 61 | ByteArrayInputStream avroBuffer; 62 | ByteArrayInputStream kryoBuffer; 63 | 64 | 65 | @Setup 66 | public void setup() throws IOException { 67 | pojo = new SerializationFrameworkMiniBenchmarks.MyPojo( 68 | 0, 69 | "myName", 70 | new String[] {"op1", "op2", "op3", "op4"}, 71 | new SerializationFrameworkMiniBenchmarks.MyOperation[] { 72 | new SerializationFrameworkMiniBenchmarks.MyOperation(1, "op1"), 73 | new SerializationFrameworkMiniBenchmarks.MyOperation(2, "op2"), 74 | new SerializationFrameworkMiniBenchmarks.MyOperation(3, "op3")}, 75 | 1, 76 | 2, 77 | 3, 78 | "null"); 79 | avroPojo = new org.apache.flink.benchmark.avro.MyPojo( 80 | 0, 81 | "myName", 82 | Arrays.asList("op1", "op2", "op3", "op4"), 83 | Arrays.asList( 84 | new org.apache.flink.benchmark.avro.MyOperation(1, "op1"), 85 | new org.apache.flink.benchmark.avro.MyOperation(2, "op2"), 86 | new org.apache.flink.benchmark.avro.MyOperation(3, "op3")), 87 | 1, 88 | 2, 89 | 3, 90 | "null"); 91 | pojoBuffer = new ByteArrayInputStream(write(pojoSerializer, pojo)); 92 | avroBuffer = new ByteArrayInputStream(write(avroSerializer, avroPojo)); 93 | kryoBuffer = new ByteArrayInputStream(write(kryoSerializer, pojo)); 94 | } 95 | 96 | public static void main(String[] args) 97 | throws RunnerException { 98 | Options options = new OptionsBuilder() 99 | .verbosity(VerboseMode.NORMAL) 100 | .include(".*" + PojoSerializationBenchmark.class.getCanonicalName() + ".*") 101 | .build(); 102 | 103 | new Runner(options).run(); 104 | } 105 | 106 | @Benchmark 107 | public byte[] writePojo() throws IOException { 108 | return write(pojoSerializer, pojo); 109 | } 110 | 111 | @Benchmark 112 | public byte[] writeAvro() throws IOException { 113 | return write(avroSerializer, avroPojo); 114 | } 115 | 116 | @Benchmark 117 | public byte[] writeKryo() throws IOException { 118 | return write(kryoSerializer, pojo); 119 | } 120 | 121 | @Benchmark 122 | public SerializationFrameworkMiniBenchmarks.MyPojo readPojo() throws IOException { 123 | pojoBuffer.reset(); 124 | return pojoSerializer.deserialize(new DataInputViewStreamWrapper(pojoBuffer)); 125 | } 126 | 127 | @Benchmark 128 | public SerializationFrameworkMiniBenchmarks.MyPojo readKryo() throws IOException { 129 | kryoBuffer.reset(); 130 | return kryoSerializer.deserialize(new DataInputViewStreamWrapper(kryoBuffer)); 131 | } 132 | 133 | @Benchmark 134 | public org.apache.flink.benchmark.avro.MyPojo readAvro() throws IOException { 135 | avroBuffer.reset(); 136 | return avroSerializer.deserialize(new DataInputViewStreamWrapper(avroBuffer)); 137 | } 138 | 139 | private byte[] write(TypeSerializer serializer, T value) throws IOException { 140 | ByteArrayOutputStream buffer = new ByteArrayOutputStream(); 141 | DataOutputView out = new DataOutputViewStreamWrapper(buffer); 142 | serializer.serialize(value, out); 143 | return buffer.toByteArray(); 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/full/SerializationFrameworkAllBenchmarks.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.full; 20 | 21 | import org.apache.flink.api.common.ExecutionConfig; 22 | import org.apache.flink.api.common.typeinfo.TypeInformation; 23 | import org.apache.flink.api.java.typeutils.ResultTypeQueryable; 24 | import org.apache.flink.benchmark.FlinkEnvironmentContext; 25 | import org.apache.flink.benchmark.SerializationFrameworkMiniBenchmarks; 26 | import org.apache.flink.benchmark.functions.BaseSourceWithKeyRange; 27 | import org.apache.flink.formats.avro.typeutils.GenericRecordAvroTypeInfo; 28 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 29 | import org.apache.flink.streaming.api.functions.sink.DiscardingSink; 30 | 31 | import com.twitter.chill.protobuf.ProtobufSerializer; 32 | import com.twitter.chill.thrift.TBaseSerializer; 33 | import org.apache.avro.Schema; 34 | import org.apache.avro.generic.GenericData; 35 | import org.apache.avro.generic.GenericRecord; 36 | import org.openjdk.jmh.annotations.Benchmark; 37 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 38 | import org.openjdk.jmh.runner.Runner; 39 | import org.openjdk.jmh.runner.RunnerException; 40 | import org.openjdk.jmh.runner.options.Options; 41 | import org.openjdk.jmh.runner.options.OptionsBuilder; 42 | import org.openjdk.jmh.runner.options.VerboseMode; 43 | 44 | import java.io.FileNotFoundException; 45 | import java.io.IOException; 46 | import java.io.InputStream; 47 | import java.util.Arrays; 48 | 49 | /** 50 | * Benchmark for serializing POJOs and Tuples with different serialization frameworks. 51 | */ 52 | public class SerializationFrameworkAllBenchmarks extends SerializationFrameworkMiniBenchmarks { 53 | 54 | public static void main(String[] args) throws RunnerException { 55 | Options options = new OptionsBuilder() 56 | .verbosity(VerboseMode.NORMAL) 57 | .include(".*" + SerializationFrameworkAllBenchmarks.class.getCanonicalName() + ".*") 58 | .build(); 59 | 60 | new Runner(options).run(); 61 | } 62 | 63 | @Benchmark 64 | @OperationsPerInvocation(value = RECORDS_PER_INVOCATION) 65 | public void serializerPojoWithoutRegistration(FlinkEnvironmentContext context) throws Exception { 66 | StreamExecutionEnvironment env = context.env; 67 | env.setParallelism(4); 68 | 69 | env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10)) 70 | .rebalance() 71 | .addSink(new DiscardingSink<>()); 72 | 73 | env.execute(); 74 | } 75 | 76 | @Benchmark 77 | @OperationsPerInvocation(value = RECORDS_PER_INVOCATION) 78 | public void serializerKryoWithoutRegistration(FlinkEnvironmentContext context) throws Exception { 79 | StreamExecutionEnvironment env = context.env; 80 | env.setParallelism(4); 81 | env.getConfig().enableForceKryo(); 82 | 83 | env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10)) 84 | .rebalance() 85 | .addSink(new DiscardingSink<>()); 86 | 87 | env.execute(); 88 | } 89 | 90 | @Benchmark 91 | @OperationsPerInvocation(value = RECORDS_PER_INVOCATION) 92 | public void serializerAvroReflect(FlinkEnvironmentContext context) throws Exception { 93 | StreamExecutionEnvironment env = context.env; 94 | env.setParallelism(4); 95 | env.getConfig().enableForceAvro(); 96 | 97 | env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10)) 98 | .rebalance() 99 | .addSink(new DiscardingSink<>()); 100 | 101 | env.execute(); 102 | } 103 | 104 | @Benchmark 105 | @OperationsPerInvocation(value = RECORDS_PER_INVOCATION) 106 | public void serializerAvroGeneric(FlinkEnvironmentContext context) throws Exception { 107 | StreamExecutionEnvironment env = context.env; 108 | env.setParallelism(4); 109 | 110 | Schema schema = AvroGenericRecordSource.loadSchema(); 111 | env.addSource(new AvroGenericRecordSource(RECORDS_PER_INVOCATION, 10, schema)) 112 | .rebalance() 113 | .addSink(new DiscardingSink<>()); 114 | 115 | env.execute(); 116 | } 117 | 118 | @Benchmark 119 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 120 | public void serializerKryoThrift(FlinkEnvironmentContext context) throws Exception { 121 | StreamExecutionEnvironment env = context.env; 122 | env.setParallelism(4); 123 | ExecutionConfig executionConfig = env.getConfig(); 124 | executionConfig.enableForceKryo(); 125 | executionConfig.addDefaultKryoSerializer(org.apache.flink.benchmark.thrift.MyPojo.class, TBaseSerializer.class); 126 | executionConfig.addDefaultKryoSerializer(org.apache.flink.benchmark.thrift.MyOperation.class, TBaseSerializer.class); 127 | 128 | env.addSource(new ThriftPojoSource(RECORDS_PER_INVOCATION, 10)) 129 | .rebalance() 130 | .addSink(new DiscardingSink<>()); 131 | 132 | env.execute(); 133 | } 134 | 135 | @Benchmark 136 | @OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION) 137 | public void serializerKryoProtobuf(FlinkEnvironmentContext context) throws Exception { 138 | StreamExecutionEnvironment env = context.env; 139 | env.setParallelism(4); 140 | ExecutionConfig executionConfig = env.getConfig(); 141 | executionConfig.enableForceKryo(); 142 | executionConfig.registerTypeWithKryoSerializer(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.class, ProtobufSerializer.class); 143 | executionConfig.registerTypeWithKryoSerializer(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.class, ProtobufSerializer.class); 144 | 145 | env.addSource(new ProtobufPojoSource(RECORDS_PER_INVOCATION, 10)) 146 | .rebalance() 147 | .addSink(new DiscardingSink<>()); 148 | 149 | env.execute(); 150 | } 151 | 152 | /** 153 | * Source emitting an Avro GenericRecord. 154 | */ 155 | public static class AvroGenericRecordSource extends BaseSourceWithKeyRange implements 156 | ResultTypeQueryable { 157 | private static final long serialVersionUID = 2941333602938145526L; 158 | 159 | private final GenericRecordAvroTypeInfo producedType; 160 | private transient Schema myPojoSchema; 161 | private final String schemaString; 162 | 163 | private transient GenericRecord template; 164 | 165 | public AvroGenericRecordSource(int numEvents, int numKeys, Schema schema) { 166 | super(numEvents, numKeys); 167 | this.producedType = new GenericRecordAvroTypeInfo(schema); 168 | this.myPojoSchema = schema; 169 | this.schemaString = schema.toString(); 170 | } 171 | 172 | private static Schema loadSchema() throws IOException { 173 | ClassLoader classLoader = ClassLoader.getSystemClassLoader(); 174 | try (InputStream is = classLoader.getResourceAsStream("avro/mypojo.avsc")) { 175 | if (is == null) { 176 | throw new FileNotFoundException("File 'mypojo.avsc' not found"); 177 | } 178 | return new Schema.Parser().parse(is); 179 | } 180 | } 181 | 182 | @Override 183 | protected void init() { 184 | super.init(); 185 | 186 | if (myPojoSchema == null) { 187 | this.myPojoSchema = new Schema.Parser().parse(schemaString); 188 | } 189 | Schema myOperationSchema = myPojoSchema.getField("operations").schema().getElementType(); 190 | 191 | template = new GenericData.Record(myPojoSchema); 192 | template.put("id", 0); 193 | template.put("name", "myName"); 194 | template.put("operationName", Arrays.asList("op1", "op2", "op3", "op4")); 195 | 196 | GenericData.Record op1 = new GenericData.Record(myOperationSchema); 197 | op1.put("id", 1); 198 | op1.put("name", "op1"); 199 | GenericData.Record op2 = new GenericData.Record(myOperationSchema); 200 | op2.put("id", 2); 201 | op2.put("name", "op2"); 202 | GenericData.Record op3 = new GenericData.Record(myOperationSchema); 203 | op3.put("id", 3); 204 | op3.put("name", "op3"); 205 | template.put("operations", Arrays.asList(op1, op2, op3)); 206 | 207 | template.put("otherId1", 1); 208 | template.put("otherId2", 2); 209 | template.put("otherId3", 3); 210 | template.put("nullable", "null"); 211 | } 212 | 213 | @Override 214 | protected GenericRecord getElement(int keyId) { 215 | template.put("id", keyId); 216 | return template; 217 | } 218 | 219 | @Override 220 | public TypeInformation getProducedType() { 221 | return producedType; 222 | } 223 | } 224 | 225 | /** 226 | * Source emitting a {@link org.apache.flink.benchmark.thrift.MyPojo POJO} generated by an Apache Thrift schema. 227 | */ 228 | public static class ThriftPojoSource extends BaseSourceWithKeyRange { 229 | private static final long serialVersionUID = 2941333602938145526L; 230 | 231 | private transient org.apache.flink.benchmark.thrift.MyPojo template; 232 | 233 | public ThriftPojoSource(int numEvents, int numKeys) { 234 | super(numEvents, numKeys); 235 | } 236 | 237 | @Override 238 | protected void init() { 239 | super.init(); 240 | template = new org.apache.flink.benchmark.thrift.MyPojo( 241 | 0, 242 | "myName", 243 | Arrays.asList("op1", "op2", "op3", "op4"), 244 | Arrays.asList( 245 | new org.apache.flink.benchmark.thrift.MyOperation(1, "op1"), 246 | new org.apache.flink.benchmark.thrift.MyOperation(2, "op2"), 247 | new org.apache.flink.benchmark.thrift.MyOperation(3, "op3")), 248 | 1, 249 | 2, 250 | 3); 251 | template.setSomeObject("null"); 252 | } 253 | 254 | @Override 255 | protected org.apache.flink.benchmark.thrift.MyPojo getElement(int keyId) { 256 | template.setId(keyId); 257 | return template; 258 | } 259 | } 260 | 261 | /** 262 | * Source emitting a {@link org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo POJO} generated by a Protobuf schema. 263 | */ 264 | public static class ProtobufPojoSource extends BaseSourceWithKeyRange { 265 | private static final long serialVersionUID = 2941333602938145526L; 266 | 267 | private transient org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo template; 268 | 269 | public ProtobufPojoSource(int numEvents, int numKeys) { 270 | super(numEvents, numKeys); 271 | } 272 | 273 | @Override 274 | protected void init() { 275 | super.init(); 276 | template = org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.newBuilder() 277 | .setId(0) 278 | .setName("myName") 279 | .addAllOperationName(Arrays.asList("op1", "op2", "op3", "op4")) 280 | .addOperations(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.newBuilder() 281 | .setId(1) 282 | .setName("op1")) 283 | .addOperations(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.newBuilder() 284 | .setId(2) 285 | .setName("op2")) 286 | .addOperations(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.newBuilder() 287 | .setId(3) 288 | .setName("op3")) 289 | .setOtherId1(1) 290 | .setOtherId2(2) 291 | .setOtherId3(3) 292 | .setSomeObject("null") 293 | .build(); 294 | } 295 | 296 | @Override 297 | protected org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo getElement(int keyId) { 298 | return org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.newBuilder(template) 299 | .setId(keyId) 300 | .build(); 301 | } 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/full/StringSerializationBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.full; 20 | 21 | import org.apache.flink.api.common.ExecutionConfig; 22 | import org.apache.flink.api.common.typeinfo.TypeInformation; 23 | import org.apache.flink.api.common.typeutils.TypeSerializer; 24 | import org.apache.flink.benchmark.BenchmarkBase; 25 | import org.apache.flink.core.memory.DataInputView; 26 | import org.apache.flink.core.memory.DataInputViewStreamWrapper; 27 | import org.apache.flink.core.memory.DataOutputView; 28 | import org.apache.flink.core.memory.DataOutputViewStreamWrapper; 29 | import org.openjdk.jmh.runner.Runner; 30 | import org.openjdk.jmh.runner.RunnerException; 31 | import org.openjdk.jmh.runner.options.Options; 32 | import org.openjdk.jmh.runner.options.OptionsBuilder; 33 | import org.openjdk.jmh.runner.options.VerboseMode; 34 | import org.openjdk.jmh.annotations.*; 35 | 36 | import java.io.ByteArrayInputStream; 37 | import java.io.ByteArrayOutputStream; 38 | import java.io.IOException; 39 | import java.util.Random; 40 | import java.util.concurrent.TimeUnit; 41 | 42 | @State(Scope.Benchmark) 43 | @BenchmarkMode({Mode.Throughput}) 44 | @OutputTimeUnit(TimeUnit.MILLISECONDS) 45 | public class StringSerializationBenchmark extends BenchmarkBase { 46 | 47 | public static void main(String[] args) 48 | throws RunnerException { 49 | Options options = new OptionsBuilder() 50 | .verbosity(VerboseMode.NORMAL) 51 | .include(".*" + StringSerializationBenchmark.class.getCanonicalName() + ".*") 52 | .build(); 53 | 54 | new Runner(options).run(); 55 | } 56 | 57 | @Param({"ascii", "russian", "chinese"}) 58 | public String type; 59 | 60 | @Param({"4", "128", "16384"}) 61 | public String lengthStr; 62 | 63 | int length; 64 | String input; 65 | public static final char[] asciiChars = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890".toCharArray(); 66 | public static final char[] russianChars = "йцукенгшщзхъфывапролджэячсмитьбюЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ".toCharArray(); 67 | public static final char[] chineseChars = "的是不了人我在有他这为之大来以个中上们到国说和地也子要时道出而于就下得可你年生".toCharArray(); 68 | 69 | ExecutionConfig config = new ExecutionConfig(); 70 | TypeSerializer serializer = TypeInformation.of(String.class).createSerializer(config); 71 | ByteArrayInputStream serializedBuffer; 72 | DataInputView serializedStream; 73 | 74 | @Setup 75 | public void setup() throws IOException { 76 | length = Integer.parseInt(lengthStr); 77 | switch (type) { 78 | case "ascii": 79 | input = generate(asciiChars, length); 80 | break; 81 | case "russian": 82 | input = generate(russianChars, length); 83 | break; 84 | case "chinese": 85 | input = generate(chineseChars, length); 86 | break; 87 | default: 88 | throw new IllegalArgumentException(type + "charset is not supported"); 89 | } 90 | byte[] stringBytes = stringWrite(); 91 | serializedBuffer = new ByteArrayInputStream(stringBytes); 92 | serializedStream = new DataInputViewStreamWrapper(serializedBuffer); 93 | } 94 | 95 | @Benchmark 96 | public byte[] stringWrite() throws IOException { 97 | ByteArrayOutputStream buffer = new ByteArrayOutputStream(); 98 | DataOutputView out = new DataOutputViewStreamWrapper(buffer); 99 | serializer.serialize(input, out); 100 | return buffer.toByteArray(); 101 | } 102 | 103 | @Benchmark 104 | public String stringRead() throws IOException { 105 | serializedBuffer.reset(); 106 | return serializer.deserialize(serializedStream); 107 | } 108 | 109 | private String generate(char[] charset, int length) { 110 | char[] buffer = new char[length]; 111 | Random random = new Random(); 112 | for (int i=0; iPlease consider moving benchmarks here to keep the amount of regression benchmarks small. 24 | */ 25 | package org.apache.flink.benchmark.full; 26 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/BaseSourceWithKeyRange.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.java.tuple.Tuple2; 22 | import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction; 23 | 24 | /** 25 | * Abstract base class for sources with a defined number of events and a fixed key range. 26 | */ 27 | abstract public class BaseSourceWithKeyRange implements ParallelSourceFunction { 28 | private static final long serialVersionUID = 8318018060123048234L; 29 | 30 | protected final int numKeys; 31 | protected int remainingEvents; 32 | 33 | public BaseSourceWithKeyRange(int numEvents, int numKeys) { 34 | this.remainingEvents = numEvents; 35 | this.numKeys = numKeys; 36 | } 37 | 38 | protected void init() { 39 | } 40 | 41 | protected abstract T getElement(int keyId); 42 | 43 | @Override 44 | public void run(SourceContext out) { 45 | init(); 46 | 47 | int keyId = 0; 48 | while (--remainingEvents >= 0) { 49 | T element = getElement(keyId); 50 | synchronized (out.getCheckpointLock()) { 51 | out.collect(element); 52 | } 53 | ++keyId; 54 | if (keyId >= numKeys) { 55 | keyId = 0; 56 | } 57 | } 58 | } 59 | 60 | @Override 61 | public void cancel() { 62 | this.remainingEvents = 0; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/IntLongApplications.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.benchmark.CollectSink; 22 | import org.apache.flink.streaming.api.datastream.DataStreamSource; 23 | import org.apache.flink.streaming.api.windowing.assigners.WindowAssigner; 24 | import org.apache.flink.streaming.api.windowing.windows.Window; 25 | 26 | public class IntLongApplications { 27 | public static void reduceWithWindow( 28 | DataStreamSource source, 29 | WindowAssigner windowAssigner) { 30 | source 31 | .map(new MultiplyIntLongByTwo()) 32 | .keyBy(record -> record.key) 33 | .window(windowAssigner) 34 | .reduce(new SumReduceIntLong()) 35 | .addSink(new CollectSink()); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/IntegerLongSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; 22 | 23 | public class IntegerLongSource extends RichParallelSourceFunction { 24 | public static final class Record { 25 | public final int key; 26 | public final long value; 27 | 28 | public Record() { 29 | this(0, 0); 30 | } 31 | 32 | public Record(int key, long value) { 33 | this.key = key; 34 | this.value = value; 35 | } 36 | 37 | public static Record of(int key, long value) { 38 | return new Record(key, value); 39 | } 40 | 41 | public int getKey() { 42 | return key; 43 | } 44 | 45 | @Override 46 | public String toString() { 47 | return String.format("(%s, %s)", key, value); 48 | } 49 | } 50 | 51 | private volatile boolean running = true; 52 | private int numberOfKeys; 53 | private long numberOfElements; 54 | 55 | public IntegerLongSource(int numberOfKeys, long numberOfElements) { 56 | this.numberOfKeys = numberOfKeys; 57 | this.numberOfElements = numberOfElements; 58 | } 59 | 60 | @Override 61 | public void run(SourceContext ctx) throws Exception { 62 | long counter = 0; 63 | 64 | while (running && counter < numberOfElements) { 65 | synchronized (ctx.getCheckpointLock()) { 66 | ctx.collectWithTimestamp(Record.of((int) (counter % numberOfKeys), counter), counter); 67 | counter++; 68 | } 69 | } 70 | running = false; 71 | } 72 | 73 | @Override 74 | public void cancel() { 75 | running = false; 76 | } 77 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/LongSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; 22 | 23 | public class LongSource extends RichParallelSourceFunction { 24 | 25 | private volatile boolean running = true; 26 | private long maxValue; 27 | 28 | public LongSource(long maxValue) { 29 | this.maxValue = maxValue; 30 | } 31 | 32 | @Override 33 | public void run(SourceContext ctx) throws Exception { 34 | long counter = 0; 35 | 36 | while (running) { 37 | synchronized (ctx.getCheckpointLock()) { 38 | ctx.collect(counter); 39 | counter++; 40 | if (counter >= maxValue) { 41 | cancel(); 42 | } 43 | } 44 | } 45 | } 46 | 47 | @Override 48 | public void cancel() { 49 | running = false; 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/MultiplyByTwo.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.functions.MapFunction; 22 | 23 | public class MultiplyByTwo implements MapFunction { 24 | @Override 25 | public Long map(Long value) throws Exception { 26 | return value * 2; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/MultiplyIntLongByTwo.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.functions.MapFunction; 22 | 23 | public class MultiplyIntLongByTwo implements MapFunction { 24 | @Override 25 | public IntegerLongSource.Record map(IntegerLongSource.Record record) throws Exception { 26 | return IntegerLongSource.Record.of(record.key, record.value * 2); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/QueuingLongSource.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | public class QueuingLongSource extends LongSource { 22 | 23 | private static Object lock = new Object(); 24 | 25 | private static int currentRank = 1; 26 | 27 | private final int rank; 28 | 29 | public QueuingLongSource(int rank, long maxValue) { 30 | super(maxValue); 31 | this.rank = rank; 32 | } 33 | 34 | @Override 35 | public void run(SourceContext ctx) throws Exception { 36 | synchronized (lock) { 37 | while (currentRank != rank) { 38 | lock.wait(); 39 | } 40 | } 41 | 42 | super.run(ctx); 43 | 44 | synchronized (lock) { 45 | currentRank++; 46 | lock.notifyAll(); 47 | } 48 | } 49 | 50 | public static void reset() { 51 | currentRank = 1; 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/SuccessException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | public class SuccessException extends Exception { 22 | } 23 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/SumReduce.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.functions.ReduceFunction; 22 | 23 | public class SumReduce implements ReduceFunction { 24 | @Override 25 | public Long reduce(Long value1, Long value2) throws Exception { 26 | return value1 + value2; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/SumReduceIntLong.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.functions.ReduceFunction; 22 | 23 | public class SumReduceIntLong implements ReduceFunction { 24 | @Override 25 | public IntegerLongSource.Record reduce(IntegerLongSource.Record var1, IntegerLongSource.Record var2) throws Exception { 26 | return IntegerLongSource.Record.of(var1.key, var1.value + var2.value); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/TestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.JobExecutionResult; 22 | import org.apache.flink.client.program.ProgramInvocationException; 23 | import org.apache.flink.runtime.client.JobExecutionException; 24 | import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; 25 | 26 | import static org.junit.Assert.fail; 27 | 28 | /** 29 | * Test utilities. 30 | */ 31 | public class TestUtils { 32 | 33 | public static JobExecutionResult tryExecute(StreamExecutionEnvironment see, String name) throws Exception { 34 | try { 35 | return see.execute(name); 36 | } 37 | catch (ProgramInvocationException | JobExecutionException root) { 38 | Throwable cause = root.getCause(); 39 | 40 | // search for nested SuccessExceptions 41 | int depth = 0; 42 | while (!(cause instanceof SuccessException)) { 43 | if (cause == null || depth++ == 20) { 44 | root.printStackTrace(); 45 | fail("Test failed: " + root.getMessage()); 46 | } 47 | else { 48 | cause = cause.getCause(); 49 | } 50 | } 51 | } 52 | 53 | return null; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/functions/ValidatingCounter.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.functions; 20 | 21 | import org.apache.flink.api.common.functions.ReduceFunction; 22 | 23 | public class ValidatingCounter implements ReduceFunction { 24 | private long expectedCount; 25 | private long count = 0; 26 | 27 | public ValidatingCounter(long expectedCount) { 28 | this.expectedCount = expectedCount; 29 | } 30 | 31 | @Override 32 | public T reduce(T value1, T value2) throws Exception { 33 | count++; 34 | if (count >= expectedCount) { 35 | throw new SuccessException(); 36 | } 37 | return value1; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/operators/MultiplyByTwoCoStreamMap.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark.operators; 20 | 21 | import org.apache.flink.streaming.api.operators.AbstractStreamOperator; 22 | import org.apache.flink.streaming.api.operators.TwoInputStreamOperator; 23 | import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; 24 | 25 | public class MultiplyByTwoCoStreamMap 26 | extends AbstractStreamOperator 27 | implements TwoInputStreamOperator { 28 | 29 | @Override 30 | public void processElement1(StreamRecord element) { 31 | output.collect(element.replace(element.getValue() * 2)); 32 | } 33 | 34 | @Override 35 | public void processElement2(StreamRecord element) { 36 | output.collect(element.replace(element.getValue() * 2)); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/benchmark/thrift/MyOperation.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Thrift Compiler (0.13.0) 3 | * 4 | * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 5 | * @generated 6 | */ 7 | package org.apache.flink.benchmark.thrift; 8 | 9 | @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked", "unused"}) 10 | @javax.annotation.Generated(value = "Autogenerated by Thrift Compiler (0.13.0)", date = "2020-03-06") 11 | public class MyOperation implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { 12 | private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MyOperation"); 13 | 14 | private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I32, (short)1); 15 | private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); 16 | 17 | private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new MyOperationStandardSchemeFactory(); 18 | private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new MyOperationTupleSchemeFactory(); 19 | 20 | public int id; // required 21 | public @org.apache.thrift.annotation.Nullable java.lang.String name; // required 22 | 23 | /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ 24 | public enum _Fields implements org.apache.thrift.TFieldIdEnum { 25 | ID((short)1, "id"), 26 | NAME((short)2, "name"); 27 | 28 | private static final java.util.Map byName = new java.util.HashMap(); 29 | 30 | static { 31 | for (_Fields field : java.util.EnumSet.allOf(_Fields.class)) { 32 | byName.put(field.getFieldName(), field); 33 | } 34 | } 35 | 36 | /** 37 | * Find the _Fields constant that matches fieldId, or null if its not found. 38 | */ 39 | @org.apache.thrift.annotation.Nullable 40 | public static _Fields findByThriftId(int fieldId) { 41 | switch(fieldId) { 42 | case 1: // ID 43 | return ID; 44 | case 2: // NAME 45 | return NAME; 46 | default: 47 | return null; 48 | } 49 | } 50 | 51 | /** 52 | * Find the _Fields constant that matches fieldId, throwing an exception 53 | * if it is not found. 54 | */ 55 | public static _Fields findByThriftIdOrThrow(int fieldId) { 56 | _Fields fields = findByThriftId(fieldId); 57 | if (fields == null) throw new java.lang.IllegalArgumentException("Field " + fieldId + " doesn't exist!"); 58 | return fields; 59 | } 60 | 61 | /** 62 | * Find the _Fields constant that matches name, or null if its not found. 63 | */ 64 | @org.apache.thrift.annotation.Nullable 65 | public static _Fields findByName(java.lang.String name) { 66 | return byName.get(name); 67 | } 68 | 69 | private final short _thriftId; 70 | private final java.lang.String _fieldName; 71 | 72 | _Fields(short thriftId, java.lang.String fieldName) { 73 | _thriftId = thriftId; 74 | _fieldName = fieldName; 75 | } 76 | 77 | public short getThriftFieldId() { 78 | return _thriftId; 79 | } 80 | 81 | public java.lang.String getFieldName() { 82 | return _fieldName; 83 | } 84 | } 85 | 86 | // isset id assignments 87 | private static final int __ID_ISSET_ID = 0; 88 | private byte __isset_bitfield = 0; 89 | public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; 90 | static { 91 | java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); 92 | tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.DEFAULT, 93 | new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "int"))); 94 | tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 95 | new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); 96 | metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); 97 | org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MyOperation.class, metaDataMap); 98 | } 99 | 100 | public MyOperation() { 101 | } 102 | 103 | public MyOperation( 104 | int id, 105 | java.lang.String name) 106 | { 107 | this(); 108 | this.id = id; 109 | setIdIsSet(true); 110 | this.name = name; 111 | } 112 | 113 | /** 114 | * Performs a deep copy on other. 115 | */ 116 | public MyOperation(MyOperation other) { 117 | __isset_bitfield = other.__isset_bitfield; 118 | this.id = other.id; 119 | if (other.isSetName()) { 120 | this.name = other.name; 121 | } 122 | } 123 | 124 | public MyOperation deepCopy() { 125 | return new MyOperation(this); 126 | } 127 | 128 | @Override 129 | public void clear() { 130 | setIdIsSet(false); 131 | this.id = 0; 132 | this.name = null; 133 | } 134 | 135 | public int getId() { 136 | return this.id; 137 | } 138 | 139 | public MyOperation setId(int id) { 140 | this.id = id; 141 | setIdIsSet(true); 142 | return this; 143 | } 144 | 145 | public void unsetId() { 146 | __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID); 147 | } 148 | 149 | /** Returns true if field id is set (has been assigned a value) and false otherwise */ 150 | public boolean isSetId() { 151 | return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID); 152 | } 153 | 154 | public void setIdIsSet(boolean value) { 155 | __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value); 156 | } 157 | 158 | @org.apache.thrift.annotation.Nullable 159 | public java.lang.String getName() { 160 | return this.name; 161 | } 162 | 163 | public MyOperation setName(@org.apache.thrift.annotation.Nullable java.lang.String name) { 164 | this.name = name; 165 | return this; 166 | } 167 | 168 | public void unsetName() { 169 | this.name = null; 170 | } 171 | 172 | /** Returns true if field name is set (has been assigned a value) and false otherwise */ 173 | public boolean isSetName() { 174 | return this.name != null; 175 | } 176 | 177 | public void setNameIsSet(boolean value) { 178 | if (!value) { 179 | this.name = null; 180 | } 181 | } 182 | 183 | public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { 184 | switch (field) { 185 | case ID: 186 | if (value == null) { 187 | unsetId(); 188 | } else { 189 | setId((java.lang.Integer)value); 190 | } 191 | break; 192 | 193 | case NAME: 194 | if (value == null) { 195 | unsetName(); 196 | } else { 197 | setName((java.lang.String)value); 198 | } 199 | break; 200 | 201 | } 202 | } 203 | 204 | @org.apache.thrift.annotation.Nullable 205 | public java.lang.Object getFieldValue(_Fields field) { 206 | switch (field) { 207 | case ID: 208 | return getId(); 209 | 210 | case NAME: 211 | return getName(); 212 | 213 | } 214 | throw new java.lang.IllegalStateException(); 215 | } 216 | 217 | /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ 218 | public boolean isSet(_Fields field) { 219 | if (field == null) { 220 | throw new java.lang.IllegalArgumentException(); 221 | } 222 | 223 | switch (field) { 224 | case ID: 225 | return isSetId(); 226 | case NAME: 227 | return isSetName(); 228 | } 229 | throw new java.lang.IllegalStateException(); 230 | } 231 | 232 | @Override 233 | public boolean equals(java.lang.Object that) { 234 | if (that == null) 235 | return false; 236 | if (that instanceof MyOperation) 237 | return this.equals((MyOperation)that); 238 | return false; 239 | } 240 | 241 | public boolean equals(MyOperation that) { 242 | if (that == null) 243 | return false; 244 | if (this == that) 245 | return true; 246 | 247 | boolean this_present_id = true; 248 | boolean that_present_id = true; 249 | if (this_present_id || that_present_id) { 250 | if (!(this_present_id && that_present_id)) 251 | return false; 252 | if (this.id != that.id) 253 | return false; 254 | } 255 | 256 | boolean this_present_name = true && this.isSetName(); 257 | boolean that_present_name = true && that.isSetName(); 258 | if (this_present_name || that_present_name) { 259 | if (!(this_present_name && that_present_name)) 260 | return false; 261 | if (!this.name.equals(that.name)) 262 | return false; 263 | } 264 | 265 | return true; 266 | } 267 | 268 | @Override 269 | public int hashCode() { 270 | int hashCode = 1; 271 | 272 | hashCode = hashCode * 8191 + id; 273 | 274 | hashCode = hashCode * 8191 + ((isSetName()) ? 131071 : 524287); 275 | if (isSetName()) 276 | hashCode = hashCode * 8191 + name.hashCode(); 277 | 278 | return hashCode; 279 | } 280 | 281 | @Override 282 | public int compareTo(MyOperation other) { 283 | if (!getClass().equals(other.getClass())) { 284 | return getClass().getName().compareTo(other.getClass().getName()); 285 | } 286 | 287 | int lastComparison = 0; 288 | 289 | lastComparison = java.lang.Boolean.valueOf(isSetId()).compareTo(other.isSetId()); 290 | if (lastComparison != 0) { 291 | return lastComparison; 292 | } 293 | if (isSetId()) { 294 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id); 295 | if (lastComparison != 0) { 296 | return lastComparison; 297 | } 298 | } 299 | lastComparison = java.lang.Boolean.valueOf(isSetName()).compareTo(other.isSetName()); 300 | if (lastComparison != 0) { 301 | return lastComparison; 302 | } 303 | if (isSetName()) { 304 | lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); 305 | if (lastComparison != 0) { 306 | return lastComparison; 307 | } 308 | } 309 | return 0; 310 | } 311 | 312 | @org.apache.thrift.annotation.Nullable 313 | public _Fields fieldForId(int fieldId) { 314 | return _Fields.findByThriftId(fieldId); 315 | } 316 | 317 | public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { 318 | scheme(iprot).read(iprot, this); 319 | } 320 | 321 | public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { 322 | scheme(oprot).write(oprot, this); 323 | } 324 | 325 | @Override 326 | public java.lang.String toString() { 327 | java.lang.StringBuilder sb = new java.lang.StringBuilder("MyOperation("); 328 | boolean first = true; 329 | 330 | sb.append("id:"); 331 | sb.append(this.id); 332 | first = false; 333 | if (!first) sb.append(", "); 334 | sb.append("name:"); 335 | if (this.name == null) { 336 | sb.append("null"); 337 | } else { 338 | sb.append(this.name); 339 | } 340 | first = false; 341 | sb.append(")"); 342 | return sb.toString(); 343 | } 344 | 345 | public void validate() throws org.apache.thrift.TException { 346 | // check for required fields 347 | // check for sub-struct validity 348 | } 349 | 350 | private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { 351 | try { 352 | write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); 353 | } catch (org.apache.thrift.TException te) { 354 | throw new java.io.IOException(te); 355 | } 356 | } 357 | 358 | private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { 359 | try { 360 | // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. 361 | __isset_bitfield = 0; 362 | read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); 363 | } catch (org.apache.thrift.TException te) { 364 | throw new java.io.IOException(te); 365 | } 366 | } 367 | 368 | private static class MyOperationStandardSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { 369 | public MyOperationStandardScheme getScheme() { 370 | return new MyOperationStandardScheme(); 371 | } 372 | } 373 | 374 | private static class MyOperationStandardScheme extends org.apache.thrift.scheme.StandardScheme { 375 | 376 | public void read(org.apache.thrift.protocol.TProtocol iprot, MyOperation struct) throws org.apache.thrift.TException { 377 | org.apache.thrift.protocol.TField schemeField; 378 | iprot.readStructBegin(); 379 | while (true) 380 | { 381 | schemeField = iprot.readFieldBegin(); 382 | if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 383 | break; 384 | } 385 | switch (schemeField.id) { 386 | case 1: // ID 387 | if (schemeField.type == org.apache.thrift.protocol.TType.I32) { 388 | struct.id = iprot.readI32(); 389 | struct.setIdIsSet(true); 390 | } else { 391 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 392 | } 393 | break; 394 | case 2: // NAME 395 | if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { 396 | struct.name = iprot.readString(); 397 | struct.setNameIsSet(true); 398 | } else { 399 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 400 | } 401 | break; 402 | default: 403 | org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); 404 | } 405 | iprot.readFieldEnd(); 406 | } 407 | iprot.readStructEnd(); 408 | 409 | // check for required fields of primitive type, which can't be checked in the validate method 410 | struct.validate(); 411 | } 412 | 413 | public void write(org.apache.thrift.protocol.TProtocol oprot, MyOperation struct) throws org.apache.thrift.TException { 414 | struct.validate(); 415 | 416 | oprot.writeStructBegin(STRUCT_DESC); 417 | oprot.writeFieldBegin(ID_FIELD_DESC); 418 | oprot.writeI32(struct.id); 419 | oprot.writeFieldEnd(); 420 | if (struct.name != null) { 421 | oprot.writeFieldBegin(NAME_FIELD_DESC); 422 | oprot.writeString(struct.name); 423 | oprot.writeFieldEnd(); 424 | } 425 | oprot.writeFieldStop(); 426 | oprot.writeStructEnd(); 427 | } 428 | 429 | } 430 | 431 | private static class MyOperationTupleSchemeFactory implements org.apache.thrift.scheme.SchemeFactory { 432 | public MyOperationTupleScheme getScheme() { 433 | return new MyOperationTupleScheme(); 434 | } 435 | } 436 | 437 | private static class MyOperationTupleScheme extends org.apache.thrift.scheme.TupleScheme { 438 | 439 | @Override 440 | public void write(org.apache.thrift.protocol.TProtocol prot, MyOperation struct) throws org.apache.thrift.TException { 441 | org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; 442 | java.util.BitSet optionals = new java.util.BitSet(); 443 | if (struct.isSetId()) { 444 | optionals.set(0); 445 | } 446 | if (struct.isSetName()) { 447 | optionals.set(1); 448 | } 449 | oprot.writeBitSet(optionals, 2); 450 | if (struct.isSetId()) { 451 | oprot.writeI32(struct.id); 452 | } 453 | if (struct.isSetName()) { 454 | oprot.writeString(struct.name); 455 | } 456 | } 457 | 458 | @Override 459 | public void read(org.apache.thrift.protocol.TProtocol prot, MyOperation struct) throws org.apache.thrift.TException { 460 | org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; 461 | java.util.BitSet incoming = iprot.readBitSet(2); 462 | if (incoming.get(0)) { 463 | struct.id = iprot.readI32(); 464 | struct.setIdIsSet(true); 465 | } 466 | if (incoming.get(1)) { 467 | struct.name = iprot.readString(); 468 | struct.setNameIsSet(true); 469 | } 470 | } 471 | } 472 | 473 | private static S scheme(org.apache.thrift.protocol.TProtocol proto) { 474 | return (org.apache.thrift.scheme.StandardScheme.class.equals(proto.getScheme()) ? STANDARD_SCHEME_FACTORY : TUPLE_SCHEME_FACTORY).getScheme(); 475 | } 476 | } 477 | 478 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/state/benchmark/ListStateBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.state.benchmark; 20 | 21 | import org.apache.flink.api.common.state.ListState; 22 | import org.apache.flink.api.common.state.ListStateDescriptor; 23 | import org.apache.flink.contrib.streaming.state.RocksDBKeyedStateBackend; 24 | 25 | import org.openjdk.jmh.annotations.Benchmark; 26 | import org.openjdk.jmh.annotations.Level; 27 | import org.openjdk.jmh.annotations.Setup; 28 | import org.openjdk.jmh.annotations.TearDown; 29 | import org.openjdk.jmh.infra.Blackhole; 30 | import org.openjdk.jmh.runner.Runner; 31 | import org.openjdk.jmh.runner.RunnerException; 32 | import org.openjdk.jmh.runner.options.Options; 33 | import org.openjdk.jmh.runner.options.OptionsBuilder; 34 | import org.openjdk.jmh.runner.options.VerboseMode; 35 | 36 | import java.util.ArrayList; 37 | import java.util.List; 38 | import java.util.concurrent.atomic.AtomicInteger; 39 | 40 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.applyToAllKeys; 41 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.compactState; 42 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.createKeyedStateBackend; 43 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.getListState; 44 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.listValueCount; 45 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount; 46 | 47 | /** 48 | * Implementation for list state benchmark testing. 49 | */ 50 | public class ListStateBenchmark extends StateBenchmarkBase { 51 | private final String STATE_NAME = "listState"; 52 | private final ListStateDescriptor STATE_DESC = new ListStateDescriptor<>(STATE_NAME, Long.class); 53 | private ListState listState; 54 | private List dummyLists; 55 | 56 | public static void main(String[] args) throws RunnerException { 57 | Options opt = new OptionsBuilder() 58 | .verbosity(VerboseMode.NORMAL) 59 | .include(".*" + ListStateBenchmark.class.getCanonicalName() + ".*") 60 | .build(); 61 | 62 | new Runner(opt).run(); 63 | } 64 | 65 | @Setup 66 | public void setUp() throws Exception { 67 | keyedStateBackend = createKeyedStateBackend(backendType); 68 | listState = getListState(keyedStateBackend, STATE_DESC); 69 | dummyLists = new ArrayList<>(listValueCount); 70 | for (int i = 0; i < listValueCount; ++i) { 71 | dummyLists.add(random.nextLong()); 72 | } 73 | keyIndex = new AtomicInteger(); 74 | } 75 | 76 | @Setup(Level.Iteration) 77 | public void setUpPerIteration() throws Exception { 78 | for (int i = 0; i < setupKeyCount; ++i) { 79 | keyedStateBackend.setCurrentKey((long) i); 80 | listState.add(random.nextLong()); 81 | } 82 | // make sure only one sst file left, so all get invocation will access this single file, 83 | // to prevent the spike caused by different key distribution in multiple sst files, 84 | // the more access to the older sst file, the lower throughput will be. 85 | if (keyedStateBackend instanceof RocksDBKeyedStateBackend) { 86 | RocksDBKeyedStateBackend rocksDBKeyedStateBackend = (RocksDBKeyedStateBackend) keyedStateBackend; 87 | compactState(rocksDBKeyedStateBackend, STATE_DESC); 88 | } 89 | } 90 | 91 | @TearDown(Level.Iteration) 92 | public void tearDownPerIteration() throws Exception { 93 | applyToAllKeys( 94 | keyedStateBackend, 95 | STATE_DESC, 96 | (k, state) -> { 97 | keyedStateBackend.setCurrentKey(k); 98 | state.clear(); 99 | }); 100 | // make the clearance effective, trigger compaction for RocksDB, and GC for heap. 101 | if (keyedStateBackend instanceof RocksDBKeyedStateBackend) { 102 | RocksDBKeyedStateBackend rocksDBKeyedStateBackend = (RocksDBKeyedStateBackend) keyedStateBackend; 103 | compactState(rocksDBKeyedStateBackend, STATE_DESC); 104 | } else { 105 | System.gc(); 106 | } 107 | // wait a while for the clearance to take effect. 108 | Thread.sleep(1000); 109 | } 110 | 111 | @Benchmark 112 | public void listUpdate(KeyValue keyValue) throws Exception { 113 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 114 | listState.update(keyValue.listValue); 115 | } 116 | 117 | @Benchmark 118 | public void listAdd(KeyValue keyValue) throws Exception { 119 | keyedStateBackend.setCurrentKey(keyValue.newKey); 120 | listState.update(keyValue.listValue); 121 | } 122 | 123 | @Benchmark 124 | public void listAppend(KeyValue keyValue) throws Exception { 125 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 126 | listState.add(keyValue.value); 127 | } 128 | 129 | @Benchmark 130 | public Iterable listGet(KeyValue keyValue) throws Exception { 131 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 132 | return listState.get(); 133 | } 134 | 135 | @Benchmark 136 | public void listGetAndIterate(KeyValue keyValue, Blackhole bh) throws Exception { 137 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 138 | Iterable iterable = listState.get(); 139 | for (Long value : iterable) { 140 | bh.consume(value); 141 | } 142 | } 143 | 144 | @Benchmark 145 | public void listAddAll(KeyValue keyValue) throws Exception { 146 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 147 | listState.addAll(dummyLists); 148 | } 149 | } -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/state/benchmark/MapStateBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.state.benchmark; 20 | 21 | import org.apache.flink.api.common.state.MapState; 22 | import org.apache.flink.api.common.state.MapStateDescriptor; 23 | 24 | import org.openjdk.jmh.annotations.Benchmark; 25 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 26 | import org.openjdk.jmh.annotations.Setup; 27 | import org.openjdk.jmh.infra.Blackhole; 28 | import org.openjdk.jmh.runner.Runner; 29 | import org.openjdk.jmh.runner.RunnerException; 30 | import org.openjdk.jmh.runner.options.Options; 31 | import org.openjdk.jmh.runner.options.OptionsBuilder; 32 | import org.openjdk.jmh.runner.options.VerboseMode; 33 | 34 | import java.util.HashMap; 35 | import java.util.Iterator; 36 | import java.util.Map; 37 | import java.util.concurrent.atomic.AtomicInteger; 38 | 39 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.createKeyedStateBackend; 40 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.getMapState; 41 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeyCount; 42 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeys; 43 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount; 44 | 45 | /** 46 | * Implementation for map state benchmark testing. 47 | */ 48 | public class MapStateBenchmark extends StateBenchmarkBase { 49 | private MapState mapState; 50 | private Map dummyMaps; 51 | 52 | public static void main(String[] args) throws RunnerException { 53 | Options opt = new OptionsBuilder() 54 | .verbosity(VerboseMode.NORMAL) 55 | .include(".*" + MapStateBenchmark.class.getCanonicalName() + ".*") 56 | .build(); 57 | 58 | new Runner(opt).run(); 59 | } 60 | 61 | @Setup 62 | public void setUp() throws Exception { 63 | keyedStateBackend = createKeyedStateBackend(backendType); 64 | mapState = getMapState( 65 | keyedStateBackend, 66 | new MapStateDescriptor<>("mapState", Long.class, Double.class)); 67 | dummyMaps = new HashMap<>(mapKeyCount); 68 | for (int i = 0; i < mapKeyCount; ++i) { 69 | dummyMaps.put(mapKeys.get(i), random.nextDouble()); 70 | } 71 | for (int i = 0; i < setupKeyCount; ++i) { 72 | keyedStateBackend.setCurrentKey((long) i); 73 | for (int j = 0; j < mapKeyCount; j++) { 74 | mapState.put(mapKeys.get(j), random.nextDouble()); 75 | } 76 | } 77 | keyIndex = new AtomicInteger(); 78 | } 79 | 80 | @Benchmark 81 | public void mapUpdate(KeyValue keyValue) throws Exception { 82 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 83 | mapState.put(keyValue.mapKey, keyValue.mapValue); 84 | } 85 | 86 | @Benchmark 87 | public void mapAdd(KeyValue keyValue) throws Exception { 88 | keyedStateBackend.setCurrentKey(keyValue.newKey); 89 | mapState.put(keyValue.mapKey, keyValue.mapValue); 90 | } 91 | 92 | @Benchmark 93 | public Double mapGet(KeyValue keyValue) throws Exception { 94 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 95 | return mapState.get(keyValue.mapKey); 96 | } 97 | 98 | @Benchmark 99 | public boolean mapContains(KeyValue keyValue) throws Exception { 100 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 101 | return mapState.contains(keyValue.mapKey << 1); 102 | } 103 | 104 | @Benchmark 105 | public boolean mapIsEmpty(KeyValue keyValue) throws Exception { 106 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 107 | return mapState.isEmpty(); 108 | } 109 | 110 | @Benchmark 111 | @OperationsPerInvocation(mapKeyCount) 112 | public void mapKeys(KeyValue keyValue, Blackhole bh) throws Exception { 113 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 114 | for (Long key : mapState.keys()) { 115 | bh.consume(key); 116 | } 117 | } 118 | 119 | @Benchmark 120 | @OperationsPerInvocation(mapKeyCount) 121 | public void mapValues(KeyValue keyValue, Blackhole bh) throws Exception { 122 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 123 | for (Double value : mapState.values()) { 124 | bh.consume(value); 125 | } 126 | } 127 | 128 | @Benchmark 129 | @OperationsPerInvocation(mapKeyCount) 130 | public void mapEntries(KeyValue keyValue, Blackhole bh) throws Exception { 131 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 132 | Iterable> iterable = mapState.entries(); 133 | if (iterable != null) { 134 | for (Map.Entry entry : mapState.entries()) { 135 | bh.consume(entry.getKey()); 136 | bh.consume(entry.getValue()); 137 | } 138 | } 139 | } 140 | 141 | @Benchmark 142 | @OperationsPerInvocation(mapKeyCount) 143 | public void mapIterator(KeyValue keyValue, Blackhole bh) throws Exception { 144 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 145 | Iterator> iterator = mapState.iterator(); 146 | while (iterator.hasNext()) { 147 | Map.Entry entry = iterator.next(); 148 | bh.consume(entry.getKey()); 149 | bh.consume(entry.getValue()); 150 | } 151 | } 152 | 153 | @Benchmark 154 | public void mapRemove(KeyValue keyValue) throws Exception { 155 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 156 | mapState.remove(keyValue.mapKey); 157 | } 158 | 159 | @Benchmark 160 | public void mapPutAll(KeyValue keyValue) throws Exception { 161 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 162 | mapState.putAll(dummyMaps); 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/state/benchmark/StateBenchmarkBase.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | package org.apache.flink.state.benchmark; 19 | 20 | import org.apache.flink.benchmark.BenchmarkBase; 21 | import org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils; 22 | import org.apache.flink.runtime.state.KeyedStateBackend; 23 | 24 | import org.openjdk.jmh.annotations.Level; 25 | import org.openjdk.jmh.annotations.Param; 26 | import org.openjdk.jmh.annotations.Scope; 27 | import org.openjdk.jmh.annotations.Setup; 28 | import org.openjdk.jmh.annotations.State; 29 | import org.openjdk.jmh.annotations.TearDown; 30 | 31 | import java.io.IOException; 32 | import java.util.Collections; 33 | import java.util.List; 34 | import java.util.concurrent.ThreadLocalRandom; 35 | import java.util.concurrent.atomic.AtomicInteger; 36 | 37 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.cleanUp; 38 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeyCount; 39 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapKeys; 40 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.mapValues; 41 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.newKeyCount; 42 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.newKeys; 43 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.randomValueCount; 44 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.randomValues; 45 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount; 46 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeys; 47 | 48 | /** 49 | * Base implementation of the state benchmarks. 50 | */ 51 | public class StateBenchmarkBase extends BenchmarkBase { 52 | KeyedStateBackend keyedStateBackend; 53 | 54 | @Param({"HEAP", "ROCKSDB"}) 55 | protected StateBackendBenchmarkUtils.StateBackendType backendType; 56 | 57 | final ThreadLocalRandom random = ThreadLocalRandom.current(); 58 | 59 | @TearDown 60 | public void tearDown() throws IOException { 61 | cleanUp(keyedStateBackend); 62 | } 63 | 64 | static AtomicInteger keyIndex; 65 | 66 | private static int getCurrentIndex() { 67 | int currentIndex = keyIndex.getAndIncrement(); 68 | if (currentIndex == Integer.MAX_VALUE) { 69 | keyIndex.set(0); 70 | } 71 | return currentIndex; 72 | } 73 | 74 | @State(Scope.Thread) 75 | public static class KeyValue { 76 | @Setup(Level.Invocation) 77 | public void kvSetup() { 78 | int currentIndex = getCurrentIndex(); 79 | setUpKey = setupKeys.get(currentIndex % setupKeyCount); 80 | newKey = newKeys.get(currentIndex % newKeyCount); 81 | mapKey = mapKeys.get(currentIndex % mapKeyCount); 82 | mapValue = mapValues.get(currentIndex % mapKeyCount); 83 | value = randomValues.get(currentIndex % randomValueCount); 84 | listValue = Collections.singletonList(randomValues.get(currentIndex % randomValueCount)); 85 | } 86 | 87 | @TearDown(Level.Invocation) 88 | public void kvTearDown() { 89 | listValue = null; 90 | } 91 | 92 | long newKey; 93 | long setUpKey; 94 | long mapKey; 95 | double mapValue; 96 | long value; 97 | List listValue; 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/state/benchmark/StateBenchmarkConstants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.state.benchmark; 20 | 21 | import java.util.ArrayList; 22 | import java.util.Collections; 23 | import java.util.Random; 24 | 25 | /** 26 | * Constants for state benchmark tests. Also generates random keys/values in advance to avoid 27 | * possible affect of using {@link Random#nextLong()} 28 | */ 29 | class StateBenchmarkConstants { 30 | static final int mapKeyCount = 10; 31 | static final int listValueCount = 100; 32 | static final int setupKeyCount = 500_000; 33 | static final String rootDirName = "benchmark"; 34 | static final String recoveryDirName = "localRecovery"; 35 | static final String dbDirName = "dbPath"; 36 | 37 | static final ArrayList mapKeys = new ArrayList<>(mapKeyCount); 38 | 39 | static { 40 | for (int i = 0; i < mapKeyCount; i++) { 41 | mapKeys.add((long) i); 42 | } 43 | Collections.shuffle(mapKeys); 44 | } 45 | 46 | static final ArrayList mapValues = new ArrayList<>(mapKeyCount); 47 | 48 | static { 49 | Random random = new Random(); 50 | for (int i = 0; i < mapKeyCount; i++) { 51 | mapValues.add(random.nextDouble()); 52 | } 53 | Collections.shuffle(mapValues); 54 | } 55 | 56 | static final ArrayList setupKeys = new ArrayList<>(setupKeyCount); 57 | 58 | static { 59 | for (long i = 0; i < setupKeyCount; i++) { 60 | setupKeys.add(i); 61 | } 62 | Collections.shuffle(setupKeys); 63 | } 64 | 65 | static final int newKeyCount = 500_000; 66 | static final ArrayList newKeys = new ArrayList<>(newKeyCount); 67 | 68 | static { 69 | for (long i = 0; i < newKeyCount; i++) { 70 | newKeys.add(i + setupKeyCount); 71 | } 72 | Collections.shuffle(newKeys); 73 | } 74 | 75 | static final int randomValueCount = 1_000_000; 76 | static final ArrayList randomValues = new ArrayList<>(randomValueCount); 77 | 78 | static { 79 | for (long i = 0; i < randomValueCount; i++) { 80 | randomValues.add(i); 81 | } 82 | Collections.shuffle(randomValues); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/main/java/org/apache/flink/state/benchmark/ValueStateBenchmark.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.state.benchmark; 20 | 21 | import org.apache.flink.api.common.state.ValueState; 22 | import org.apache.flink.api.common.state.ValueStateDescriptor; 23 | 24 | import org.openjdk.jmh.annotations.Benchmark; 25 | import org.openjdk.jmh.annotations.Setup; 26 | import org.openjdk.jmh.runner.Runner; 27 | import org.openjdk.jmh.runner.RunnerException; 28 | import org.openjdk.jmh.runner.options.Options; 29 | import org.openjdk.jmh.runner.options.OptionsBuilder; 30 | import org.openjdk.jmh.runner.options.VerboseMode; 31 | 32 | import java.io.IOException; 33 | import java.util.concurrent.atomic.AtomicInteger; 34 | 35 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.createKeyedStateBackend; 36 | import static org.apache.flink.contrib.streaming.state.benchmark.StateBackendBenchmarkUtils.getValueState; 37 | import static org.apache.flink.state.benchmark.StateBenchmarkConstants.setupKeyCount; 38 | 39 | /** 40 | * Implementation for listValue state benchmark testing. 41 | */ 42 | public class ValueStateBenchmark extends StateBenchmarkBase { 43 | private ValueState valueState; 44 | 45 | public static void main(String[] args) throws RunnerException { 46 | Options opt = new OptionsBuilder() 47 | .verbosity(VerboseMode.NORMAL) 48 | .include(".*" + ValueStateBenchmark.class.getCanonicalName() + ".*") 49 | .build(); 50 | 51 | new Runner(opt).run(); 52 | } 53 | 54 | @Setup 55 | public void setUp() throws Exception { 56 | keyedStateBackend = createKeyedStateBackend(backendType); 57 | valueState = getValueState( 58 | keyedStateBackend, 59 | new ValueStateDescriptor<>("kvState", Long.class)); 60 | for (int i = 0; i < setupKeyCount; ++i) { 61 | keyedStateBackend.setCurrentKey((long) i); 62 | valueState.update(random.nextLong()); 63 | } 64 | keyIndex = new AtomicInteger(); 65 | } 66 | 67 | @Benchmark 68 | public void valueUpdate(KeyValue keyValue) throws IOException { 69 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 70 | valueState.update(keyValue.value); 71 | } 72 | 73 | @Benchmark 74 | public void valueAdd(KeyValue keyValue) throws IOException { 75 | keyedStateBackend.setCurrentKey(keyValue.newKey); 76 | valueState.update(keyValue.value); 77 | } 78 | 79 | @Benchmark 80 | public Long valueGet(KeyValue keyValue) throws IOException { 81 | keyedStateBackend.setCurrentKey(keyValue.setUpKey); 82 | return valueState.value(); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/main/resources/avro/mypojo.avsc: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | {"namespace": "org.apache.flink.benchmark.avro", 20 | "type": "record", 21 | "name": "MyPojo", 22 | "fields": [ 23 | { 24 | "name": "id", 25 | "type": "int" 26 | }, 27 | { 28 | "name": "name", 29 | "type": "string" 30 | }, 31 | { 32 | "name": "operationName", 33 | "type": { 34 | "type":"array", 35 | "items": "string" 36 | } 37 | }, 38 | { 39 | "name": "operations", 40 | "type": { 41 | "type": "array", 42 | "items": { 43 | "type": "record", 44 | "name": "MyOperation", 45 | "fields": [ 46 | { 47 | "name": "id", 48 | "type": "int" 49 | }, 50 | { 51 | "name": "name", 52 | "type": "string" 53 | } 54 | ] 55 | 56 | } 57 | } 58 | }, 59 | { 60 | "name": "otherId1", 61 | "type": "int" 62 | }, 63 | { 64 | "name": "otherId2", 65 | "type": "int" 66 | }, 67 | { 68 | "name": "otherId3", 69 | "type": "int" 70 | }, 71 | { 72 | "name": "nullable", 73 | "type": ["null", "string"] 74 | } 75 | ] 76 | } -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | log4j.rootLogger=ERROR, console 20 | 21 | log4j.appender.console=org.apache.log4j.ConsoleAppender 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n 24 | -------------------------------------------------------------------------------- /src/main/resources/protobuf/MyPojo.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | syntax = "proto3"; 20 | package org.apache.flink.benchmark.protobuf; 21 | 22 | message MyPojo { 23 | int32 id = 1; 24 | string name = 2; 25 | repeated string operationName = 3; 26 | repeated MyOperation operations = 4; 27 | int32 otherId1 = 5; 28 | int32 otherId2 = 6; 29 | int32 otherId3 = 7; 30 | string someObject = 8; 31 | } 32 | 33 | message MyOperation { 34 | int32 id = 1; 35 | string name = 2; 36 | } 37 | -------------------------------------------------------------------------------- /src/main/resources/thrift/mypojo.thrift: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | /* 20 | * If you want to create updated Java Thrift classes, you need to install the 21 | * thrift binary with a matching version to the dependency inside the pom.xml or 22 | * override it and generate classes via 23 | * 24 | * > mvn generate-sources -Pgenerate-thrift -Dthrift.version=0.13.0 25 | * 26 | * or more dynamically: 27 | * 28 | * > mvn generate-sources -Pgenerate-thrift -Dthrift.version=$(thrift --version | rev | cut -d' ' -f 1 | rev) 29 | * 30 | * Be sure to use the same thrift version when compiling and running 31 | * tests/benchmarks as well to avoid potential conflicts. 32 | */ 33 | 34 | namespace java org.apache.flink.benchmark.thrift 35 | 36 | typedef i32 int 37 | 38 | struct MyPojo { 39 | 1: int id; 40 | 2: string name; 41 | 3: list operationName; 42 | 4: list operations; 43 | 5: int otherId1; 44 | 6: int otherId2; 45 | 7: int otherId3; 46 | 8: optional string someObject; 47 | } 48 | 49 | struct MyOperation { 50 | 1: int id; 51 | 2: string name; 52 | } 53 | -------------------------------------------------------------------------------- /src/test/java/org/apache/flink/benchmark/DataSkewStreamNetworkThroughputBenchmarkExecutor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.configuration.Configuration; 22 | import org.apache.flink.streaming.runtime.io.benchmark.DataSkewStreamNetworkThroughputBenchmark; 23 | 24 | import org.openjdk.jmh.annotations.Benchmark; 25 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 26 | import org.openjdk.jmh.annotations.Setup; 27 | import org.openjdk.jmh.annotations.State; 28 | import org.openjdk.jmh.runner.Runner; 29 | import org.openjdk.jmh.runner.RunnerException; 30 | import org.openjdk.jmh.runner.options.Options; 31 | import org.openjdk.jmh.runner.options.OptionsBuilder; 32 | import org.openjdk.jmh.runner.options.VerboseMode; 33 | 34 | import static org.openjdk.jmh.annotations.Scope.Thread; 35 | 36 | /** 37 | * JMH throughput benchmark runner for data skew scenario. 38 | */ 39 | @OperationsPerInvocation(value = DataSkewStreamNetworkThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION) 40 | public class DataSkewStreamNetworkThroughputBenchmarkExecutor extends BenchmarkBase { 41 | 42 | static final int RECORDS_PER_INVOCATION = 5_000_000; 43 | 44 | public static void main(String[] args) 45 | throws RunnerException { 46 | Options options = new OptionsBuilder() 47 | .verbosity(VerboseMode.NORMAL) 48 | .include(".*" + DataSkewStreamNetworkThroughputBenchmarkExecutor.class.getCanonicalName() + ".*") 49 | .build(); 50 | 51 | new Runner(options).run(); 52 | } 53 | 54 | @Benchmark 55 | public void networkSkewedThroughput(MultiEnvironment context) throws Exception { 56 | context.executeBenchmark(RECORDS_PER_INVOCATION); 57 | } 58 | 59 | /** 60 | * Setup for the benchmark(s). 61 | */ 62 | @State(Thread) 63 | public static class MultiEnvironment extends DataSkewStreamNetworkThroughputBenchmark { 64 | // 1ms buffer timeout 65 | private final int flushTimeout = 1; 66 | 67 | // 1000 num of channels (subpartitions) 68 | private final int channels = 1000; 69 | 70 | // 10 writer threads, to increase the load on the machine 71 | private final int writers = 10; 72 | 73 | @Setup 74 | public void setUp() throws Exception { 75 | setUp(writers, channels, flushTimeout, false, false, -1, -1, new Configuration()); 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/test/java/org/apache/flink/benchmark/StreamNetworkBroadcastThroughputBenchmarkExecutor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkBroadcastThroughputBenchmark; 22 | 23 | import org.openjdk.jmh.annotations.Benchmark; 24 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 25 | import org.openjdk.jmh.annotations.Setup; 26 | import org.openjdk.jmh.annotations.State; 27 | import org.openjdk.jmh.annotations.TearDown; 28 | import org.openjdk.jmh.runner.Runner; 29 | import org.openjdk.jmh.runner.RunnerException; 30 | import org.openjdk.jmh.runner.options.Options; 31 | import org.openjdk.jmh.runner.options.OptionsBuilder; 32 | import org.openjdk.jmh.runner.options.VerboseMode; 33 | 34 | import static org.openjdk.jmh.annotations.Scope.Thread; 35 | 36 | /** 37 | * JMH throughput benchmark runner. 38 | */ 39 | @OperationsPerInvocation(value = StreamNetworkBroadcastThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION) 40 | public class StreamNetworkBroadcastThroughputBenchmarkExecutor extends BenchmarkBase { 41 | 42 | static final int RECORDS_PER_INVOCATION = 500_000; 43 | 44 | public static void main(String[] args) 45 | throws RunnerException { 46 | Options options = new OptionsBuilder() 47 | .verbosity(VerboseMode.NORMAL) 48 | .include(".*" + StreamNetworkBroadcastThroughputBenchmarkExecutor.class.getCanonicalName() + ".*") 49 | .build(); 50 | 51 | new Runner(options).run(); 52 | } 53 | 54 | @Benchmark 55 | public void networkBroadcastThroughput(MultiEnvironment context) throws Exception { 56 | context.executeBenchmark(RECORDS_PER_INVOCATION); 57 | } 58 | 59 | /** 60 | * Setup for the benchmark(s). 61 | */ 62 | @State(Thread) 63 | public static class MultiEnvironment extends StreamNetworkBroadcastThroughputBenchmark { 64 | 65 | @Setup 66 | public void setUp() throws Exception { 67 | super.setUp(4, 100, 100); 68 | } 69 | 70 | @TearDown 71 | public void tearDown() throws Exception { 72 | super.tearDown(); 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/test/java/org/apache/flink/benchmark/StreamNetworkLatencyBenchmarkExecutor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkPointToPointBenchmark; 22 | 23 | import org.openjdk.jmh.annotations.Benchmark; 24 | import org.openjdk.jmh.annotations.BenchmarkMode; 25 | import org.openjdk.jmh.annotations.OutputTimeUnit; 26 | import org.openjdk.jmh.annotations.Setup; 27 | import org.openjdk.jmh.annotations.State; 28 | import org.openjdk.jmh.annotations.TearDown; 29 | import org.openjdk.jmh.runner.Runner; 30 | import org.openjdk.jmh.runner.RunnerException; 31 | import org.openjdk.jmh.runner.options.Options; 32 | import org.openjdk.jmh.runner.options.OptionsBuilder; 33 | import org.openjdk.jmh.runner.options.VerboseMode; 34 | 35 | import static java.util.concurrent.TimeUnit.MILLISECONDS; 36 | import static org.openjdk.jmh.annotations.Mode.AverageTime; 37 | import static org.openjdk.jmh.annotations.Scope.Thread; 38 | 39 | /** 40 | * JMH latency benchmark runner. 41 | */ 42 | @OutputTimeUnit(MILLISECONDS) 43 | @BenchmarkMode(AverageTime) 44 | public class StreamNetworkLatencyBenchmarkExecutor extends BenchmarkBase { 45 | 46 | private static final int RECORDS_PER_INVOCATION = 100; 47 | 48 | public static void main(String[] args) 49 | throws RunnerException { 50 | Options options = new OptionsBuilder() 51 | .verbosity(VerboseMode.NORMAL) 52 | .include(".*" + StreamNetworkLatencyBenchmarkExecutor.class.getCanonicalName() + ".*") 53 | .build(); 54 | 55 | new Runner(options).run(); 56 | } 57 | 58 | @Benchmark 59 | public void networkLatency1to1(Environment context) throws Exception { 60 | context.executeBenchmark(RECORDS_PER_INVOCATION, false); 61 | } 62 | 63 | /** 64 | * Setup for the benchmark(s). 65 | */ 66 | @State(Thread) 67 | public static class Environment extends StreamNetworkPointToPointBenchmark { 68 | @Setup 69 | public void setUp() throws Exception { 70 | super.setUp(10); 71 | } 72 | 73 | @TearDown 74 | public void tearDown() { 75 | super.tearDown(); 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/test/java/org/apache/flink/benchmark/StreamNetworkThroughputBenchmarkExecutor.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.benchmark; 20 | 21 | import org.apache.flink.configuration.Configuration; 22 | import org.apache.flink.runtime.net.SSLUtilsTest; 23 | import org.apache.flink.streaming.runtime.io.benchmark.StreamNetworkThroughputBenchmark; 24 | 25 | import org.openjdk.jmh.annotations.Benchmark; 26 | import org.openjdk.jmh.annotations.OperationsPerInvocation; 27 | import org.openjdk.jmh.annotations.Param; 28 | import org.openjdk.jmh.annotations.Setup; 29 | import org.openjdk.jmh.annotations.State; 30 | import org.openjdk.jmh.annotations.TearDown; 31 | import org.openjdk.jmh.runner.Runner; 32 | import org.openjdk.jmh.runner.RunnerException; 33 | import org.openjdk.jmh.runner.options.Options; 34 | import org.openjdk.jmh.runner.options.OptionsBuilder; 35 | import org.openjdk.jmh.runner.options.VerboseMode; 36 | 37 | import java.util.Arrays; 38 | 39 | import static org.apache.flink.util.Preconditions.checkArgument; 40 | import static org.openjdk.jmh.annotations.Scope.Thread; 41 | 42 | /** 43 | * JMH throughput benchmark runner. 44 | */ 45 | @OperationsPerInvocation(value = StreamNetworkThroughputBenchmarkExecutor.RECORDS_PER_INVOCATION) 46 | public class StreamNetworkThroughputBenchmarkExecutor extends BenchmarkBase { 47 | 48 | static final int RECORDS_PER_INVOCATION = 5_000_000; 49 | 50 | public static void main(String[] args) 51 | throws RunnerException { 52 | Options options = new OptionsBuilder() 53 | .verbosity(VerboseMode.NORMAL) 54 | .include(".*" + StreamNetworkThroughputBenchmarkExecutor.class.getCanonicalName() + ".*") 55 | .build(); 56 | 57 | new Runner(options).run(); 58 | } 59 | 60 | @Benchmark 61 | public void networkThroughput(MultiEnvironment context) throws Exception { 62 | context.executeBenchmark(RECORDS_PER_INVOCATION); 63 | } 64 | 65 | /** 66 | * Setup for the benchmark(s). 67 | */ 68 | @State(Thread) 69 | public static class MultiEnvironment extends StreamNetworkThroughputBenchmark { 70 | 71 | @Param({"100,100ms", "100,100ms,SSL", "1000,1ms", "1000,100ms", "1000,100ms,SSL", "1000,100ms,OpenSSL"}) 72 | public String channelsFlushTimeout = "100,100ms"; 73 | 74 | //Do not spam continuous benchmarking with number of writers parameter. 75 | //@Param({"1", "4"}) 76 | public int writers = 4; 77 | 78 | @Setup 79 | public void setUp() throws Exception { 80 | int channels = parseChannels(channelsFlushTimeout); 81 | int flushTimeout = parseFlushTimeout(channelsFlushTimeout); 82 | String sslProvider = parseEnableSSL(channelsFlushTimeout); 83 | 84 | setUp( 85 | writers, 86 | channels, 87 | flushTimeout, 88 | false, 89 | false, 90 | -1, 91 | -1, 92 | sslProvider != null ? SSLUtilsTest.createInternalSslConfigWithKeyAndTrustStores( 93 | sslProvider) : new Configuration() 94 | ); 95 | } 96 | 97 | private static String parseEnableSSL(String channelsFlushTimeout) { 98 | String[] parameters = channelsFlushTimeout.split(","); 99 | if (Arrays.asList(parameters).contains("SSL")) { 100 | return "JDK"; 101 | } else if (Arrays.asList(parameters).contains("OpenSSL")) { 102 | return "OPENSSL"; 103 | } else { 104 | return null; 105 | } 106 | } 107 | 108 | private static int parseFlushTimeout(String channelsFlushTimeout) { 109 | String[] parameters = channelsFlushTimeout.split(","); 110 | checkArgument(parameters.length >= 2); 111 | String flushTimeout = parameters[1]; 112 | 113 | checkArgument(flushTimeout.endsWith("ms")); 114 | return Integer.parseInt(flushTimeout.substring(0, flushTimeout.length() - 2)); 115 | } 116 | 117 | private static int parseChannels(String channelsFlushTimeout) { 118 | String[] parameters = channelsFlushTimeout.split(","); 119 | checkArgument(parameters.length >= 1); 120 | return Integer.parseInt(parameters[0]); 121 | } 122 | 123 | @TearDown 124 | public void tearDown() throws Exception { 125 | super.tearDown(); 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/test/log4j.properties: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | log4j.rootLogger=WARN, console 20 | 21 | log4j.appender.console=org.apache.log4j.ConsoleAppender 22 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 23 | log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n 24 | -------------------------------------------------------------------------------- /src/test/resources/local127.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataArtisans/flink-benchmarks/bb09363da6eb1decc6abd27ccba29f9999a14669/src/test/resources/local127.keystore -------------------------------------------------------------------------------- /src/test/resources/local127.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataArtisans/flink-benchmarks/bb09363da6eb1decc6abd27ccba29f9999a14669/src/test/resources/local127.truststore --------------------------------------------------------------------------------