├── .git-blame-ignore-revs ├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .scalafmt.conf ├── LICENSE ├── README.md ├── build.sbt ├── project ├── build.properties └── plugins.sbt └── src ├── main └── scala │ └── io │ └── github │ └── jchapuis │ └── fs2 │ └── kafka │ └── mock │ ├── MockKafkaConsumer.scala │ ├── MockKafkaProducer.scala │ └── impl │ ├── NativeMockKafkaConsumer.scala │ └── NativeMockKafkaProducer.scala └── test └── scala └── io └── github └── jchapuis └── fs2 └── kafka └── mock ├── MockKafkaConsumerSuite.scala └── MockKafkaProducerSuite.scala /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | # Scala Steward: Reformat with scalafmt 3.8.3 2 | f43917a1e5546bd9d8fd63aa96e05cdab9b515ea 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | merge_group: 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | with: 11 | fetch-depth: 0 12 | - uses: coursier/setup-action@v1.3.3 13 | with: 14 | jvm: temurin:17 15 | apps: sbt 16 | - run: sbt + compile coverage test coverageReport coverageAggregate versionPolicyCheck 17 | - uses: codecov/codecov-action@v3 18 | 19 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - 'v*' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | coverage: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | - uses: coursier/setup-action@v1.3.3 14 | with: 15 | jvm: temurin:17 16 | apps: sbt 17 | - name: Test and compute coverage 18 | run: sbt + coverage test coverageReport coverageAggregate 19 | - name: Codecov 20 | env: 21 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 22 | uses: codecov/codecov-action@v3 23 | 24 | publish: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v3 28 | with: 29 | fetch-depth: 0 30 | - uses: coursier/setup-action@v1.3.3 31 | with: 32 | jvm: temurin:17 33 | apps: sbt 34 | - run: sbt + versionCheck ci-release 35 | env: 36 | PGP_PASSPHRASE: ${{ secrets.PGP_PASSPHRASE }} 37 | PGP_SECRET: ${{ secrets.PGP_SECRET }} 38 | SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} 39 | SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} 40 | - uses: codecov/codecov-action@v3 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.class 2 | *.log 3 | 4 | # sbt specific 5 | .DS_Store/ 6 | .cache/ 7 | .history/ 8 | .metals/ 9 | .lib/ 10 | .bloop/ 11 | .vscode/ 12 | dist/* 13 | target/ 14 | lib_managed/ 15 | src_managed/ 16 | project/boot/ 17 | project/plugins/project/ 18 | .bsp/ 19 | .DS_Store 20 | 21 | # Scala-IDE specific 22 | .scala_dependencies 23 | .worksheet 24 | .idea 25 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = 3.8.3 2 | runner.dialect = scala213source3 3 | maxColumn = 120 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # fs2-kafka-mock 2 | [![Release](https://github.com/jchapuis/fs2-kafka-mock/actions/workflows/release.yml/badge.svg)](https://github.com/jchapuis/fs2-kafka-mock/actions/workflows/release.yml) 3 | [![Maven Central](https://maven-badges.herokuapp.com/maven-central/io.github.jchapuis/fs2-kafka-mock_2.13/badge.svg)](https://maven-badges.herokuapp.com/maven-central/io.github.jchapuis/fs2-kafka-mock_2.13) 4 | [![codecov](https://codecov.io/gh/jchapuis/fs2-kafka-mock/branch/master/graph/badge.svg?token=BOAOIFC7BF)](https://codecov.io/gh/jchapuis/fs2-kafka-mock) 5 | Cats friendly 6 | 7 | Mocks for fs2-kafka consumers and producers wrapping the native mocks built into the apache kafka clients library. This allows for testing applications without the need for a real kafka implementation such as testcontainers or embedded kafka. 8 | 9 | ## Usage 10 | 11 | Add the following dependency to your `build.sbt`: 12 | 13 | ```scala 14 | libraryDependencies += "io.github.jchapuis" %% "fs2-kafka-mock" % "{latest version}" 15 | ``` 16 | 17 | ## Mock consumer 18 | The [mock consumer](src/main/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaConsumer.scala) allows covering code making use of fs2-kafka's `KafkaConsumer`. Injection of the mock is done via the implicit `MkConsumer` parameter (that fs2-kafka had the foresight to allow). Methods on the mock allow for publishing and redacting messages, as if these were being published on the real kafka. 19 | 20 | Internally, the mock consumer tracks published records in an array. Note, however, that there are some limitations imposed by the native kafka mock: 21 | - single partition support 22 | - consumers must subscribe for publication to succeed 23 | 24 | ### Example 25 | Here's a short example taken from the project's munit test suite: we mock publication of a message, consume it using a fs2 kafka consumer configured with the mock and verify that the consumer was able to read the record. 26 | 27 | ```scala 28 | test("mock kafka consumer can read a published message") { 29 | MockKafkaConsumer("topic").use { mockConsumer => 30 | for { 31 | _ <- mockConsumer 32 | .publish("topic", "key", "value") 33 | .start // this call semantically blocks until we can publish to the consumer 34 | // hence the need to run it in a separate fiber 35 | record <- { 36 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 37 | KafkaConsumer 38 | .stream( 39 | ConsumerSettings[IO, String, String] 40 | .withGroupId("test") 41 | ) 42 | .subscribeTo("topic") 43 | .records 44 | .map(_.record) 45 | .map(record => (record.topic, record.key, record.value)) 46 | .take(1) 47 | .compile 48 | .toList 49 | .map(_.head) 50 | } 51 | } yield assertEquals(record, ("topic", "key", "value")) 52 | } 53 | } 54 | ``` 55 | 56 | ## Mock producer 57 | The [mock producer](src/main/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaProducer.scala) allows covering code producing with fs2-kafka's `KafkaProducer`. Injection of the mock is done via the implicit `MkProducer` parameter, in a similar way as for the consumer. Various access methods on the mock allow for retrieving published records and iteratively checking for newer messages. 58 | 59 | ```scala 60 | test("mock kafka producer returns next message and allows for checking full history") { 61 | MockKafkaProducer() 62 | .flatMap { mock => 63 | implicit val mkProducer: MkProducer[IO] = mock.mkProducer 64 | val producer = KafkaProducer.resource(ProducerSettings[IO, String, String]) 65 | producer.map((mock, _)) 66 | } 67 | .use { case (mock, producer) => 68 | for { 69 | _ <- producer.produce(ProducerRecords.one(ProducerRecord[String, String]("topic", "key", "value"))).flatten 70 | _ <- mock 71 | .nextMessageFor[String, String]("topic") 72 | .map(maybeKeyValue => assertEquals(maybeKeyValue, Some(("key", "value")))) 73 | _ <- mock 74 | .historyFor[String, String]("topic") 75 | .map(history => assertEquals(history, List(("key", "value")))) 76 | } yield () 77 | } 78 | } 79 | ``` 80 | 81 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import sbtversionpolicy.Compatibility.BinaryAndSourceCompatible 2 | 3 | val scala213 = "2.13.12" 4 | 5 | val scala3 = "3.5.1" 6 | 7 | scalaVersion := scala213 8 | 9 | crossScalaVersions := Seq(scala213, scala3) 10 | 11 | Compile / scalacOptions ++= { 12 | CrossVersion.partialVersion(scalaVersion.value) match { 13 | case Some((2, _)) => Seq("-Xsource:3") 14 | case _ => Nil 15 | } 16 | } 17 | 18 | organization := "io.github.jchapuis" 19 | 20 | name := "fs2-kafka-mock" 21 | 22 | licenses := List("Apache License, Version 2.0" -> url("https://opensource.org/license/apache-2-0/")) 23 | 24 | developers := List( 25 | Developer( 26 | "jchapuis", 27 | "Jonas Chapuis", 28 | "me@jonaschapuis.com", 29 | url("https://jonaschapuis.com") 30 | ) 31 | ) 32 | 33 | sonatypeCredentialHost := "s01.oss.sonatype.org" 34 | 35 | sonatypeProjectHosting := Some(xerial.sbt.Sonatype.GitHubHosting("jchapuis", "fs2-kafka-mock", "me@jonaschapuis.com")) 36 | 37 | Global / onChangedBuildSource := ReloadOnSourceChanges 38 | 39 | versionPolicyIntention := Compatibility.None // TODO back to Compatibility.BinaryCompatible 40 | 41 | versionScheme := Some("early-semver") 42 | 43 | versionPolicyIgnoredInternalDependencyVersions := Some( 44 | "^\\d+\\.\\d+\\.\\d+\\+\\d+".r 45 | ) // Support for versions generated by sbt-dynver 46 | 47 | libraryDependencies ++= Seq( 48 | "com.github.fd4s" %% "fs2-kafka" % "3.5.1", 49 | "org.scalameta" %% "munit" % "1.0.2" % Test, 50 | "org.typelevel" %% "munit-cats-effect" % "2.0.0" % Test, 51 | "org.typelevel" %% "cats-effect-testkit" % "3.5.4" % Test 52 | ) 53 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.9.9 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("org.wartremover" % "sbt-wartremover" % "3.2.2") 2 | 3 | addSbtPlugin("org.scoverage" % "sbt-scoverage" % "2.1.1") 4 | 5 | addSbtPlugin("ch.epfl.scala" % "sbt-version-policy" % "3.2.1") 6 | 7 | addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.7.0") 8 | 9 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.5.2") 10 | -------------------------------------------------------------------------------- /src/main/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaConsumer.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock 2 | 3 | import cats.effect.kernel.Ref 4 | import cats.effect.std.Mutex 5 | import cats.effect.unsafe.IORuntime 6 | import cats.effect.{IO, Resource} 7 | import fs2.kafka.* 8 | import fs2.kafka.consumer.MkConsumer 9 | import io.github.jchapuis.fs2.kafka.mock.impl.NativeMockKafkaConsumer 10 | import org.apache.kafka.clients.consumer.* 11 | import org.apache.kafka.common.TopicPartition 12 | 13 | import java.time.Instant 14 | import scala.jdk.CollectionConverters.* 15 | 16 | /** Defines methods for a mock kafka consumer: allows for publishing test messages and provides a MkConsumer instance 17 | */ 18 | trait MockKafkaConsumer { 19 | 20 | /** Publish a message to the topic. 21 | * 22 | * Semantically blocking until a consumer subscribes to the topic: internally, it's polling for assignments, as the 23 | * native kafka mock doesn't support upfront publication but requires an assignment to be made before publishing. 24 | * @param topic 25 | * the topic to publish to 26 | * @param key 27 | * the key to publish 28 | * @param value 29 | * the value to publish 30 | * @param timestamp 31 | * optional timestamp of the message 32 | * @param keySerializer 33 | * the key serializer 34 | * @param valueSerializer 35 | * the value serializer 36 | * @tparam K 37 | * the key type 38 | * @tparam V 39 | * the value type 40 | * @return 41 | * once published, a unit 42 | */ 43 | def publish[K, V](topic: String, key: K, value: V, timestamp: Option[Instant] = None)(implicit 44 | keySerializer: KeySerializer[IO, K], 45 | valueSerializer: ValueSerializer[IO, V] 46 | ): IO[Unit] 47 | 48 | /** Redact a message from the topic, aka. publish a tombstone. 49 | * 50 | * @param topic 51 | * the topic to redact from 52 | * @param key 53 | * the key to redact 54 | * @param keySerializer 55 | * the key serializer 56 | * @tparam K 57 | * the key type 58 | * @return 59 | * once redacted, a unit 60 | */ 61 | def redact[K](topic: String, key: K)(implicit keySerializer: KeySerializer[IO, K]): IO[Unit] 62 | 63 | /** MkConsumer instance providing the mock consumer. Including this instance in implicit scope where the kafka 64 | * consumer resource is created will feed it with the mock consumer instance instead of the default, real one. 65 | */ 66 | implicit def mkConsumer: MkConsumer[IO] 67 | } 68 | 69 | object MockKafkaConsumer { 70 | 71 | /** Create a mock kafka consumer for the given topics. Backed by the mock consumer built into the kafka client 72 | * library. 73 | * 74 | * @param topics 75 | * the topics to create the consumer for 76 | * @param IORuntime 77 | * the implicit IORuntime 78 | * @return 79 | * a resource containing the mock kafka consumer 80 | */ 81 | def apply(topics: String*)(implicit IORuntime: IORuntime): Resource[IO, MockKafkaConsumer] = 82 | Resource.eval(Ref.of[IO, Map[String, Long]](topics.map(_ -> 0L).toMap)).flatMap { currentOffsets => 83 | val mockConsumer = new MockConsumer[Array[Byte], Array[Byte]](OffsetResetStrategy.EARLIEST) 84 | Resource.make(Mutex[IO].map { mutex => 85 | val partitions = topics.map(topic => new TopicPartition(topic, 0)) 86 | val beginningOffsets = partitions.map(_ -> (0L: java.lang.Long)).toMap 87 | mockConsumer.updateBeginningOffsets(beginningOffsets.asJava) 88 | new NativeMockKafkaConsumer(mockConsumer, currentOffsets, mutex) 89 | })(_ => IO(mockConsumer.close())) 90 | } 91 | 92 | } 93 | -------------------------------------------------------------------------------- /src/main/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaProducer.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock 2 | 3 | import cats.Eq 4 | import cats.effect.IO 5 | import cats.effect.kernel.{Ref, Resource} 6 | import cats.effect.std.Mutex 7 | import cats.syntax.eq.* 8 | import cats.syntax.traverse.* 9 | import MockKafkaProducer.Patience 10 | import fs2.kafka.producer.MkProducer 11 | import fs2.kafka.* 12 | import io.github.jchapuis.fs2.kafka.mock.impl.NativeMockKafkaProducer 13 | import org.apache.kafka.clients.producer.{MockProducer, ProducerRecord} 14 | import org.apache.kafka.common.serialization.ByteArraySerializer 15 | 16 | import scala.concurrent.duration.* 17 | import scala.jdk.CollectionConverters.* 18 | 19 | /** Defines methods for a mock kafka producer: allows for checking for test messages and provides a MkProducer instance. 20 | * Keeps an internal offset for each topic so that messages can be consumed one by one with convenience methods. 21 | */ 22 | trait MockKafkaProducer { 23 | 24 | /** Returns the list of published messages for the given topic. 25 | * @param topic 26 | * the topic to get the history for 27 | * @param keyDeserializer 28 | * the key deserializer 29 | * @param valueDeserializer 30 | * the value deserializer 31 | * @tparam K 32 | * the key type 33 | * @tparam V 34 | * the value type 35 | * @return 36 | * the list of published messages thus far 37 | */ 38 | def historyFor[K, V](topic: String)(implicit 39 | keyDeserializer: KeyDeserializer[IO, K], 40 | valueDeserializer: ValueDeserializer[IO, V] 41 | ): IO[List[(K, V)]] 42 | 43 | /** Returns the list of published messages for the given topic and key. 44 | * @param topic 45 | * the topic to get the history for 46 | * @param key 47 | * the key to get the history for 48 | * @param keyDeserializer 49 | * the key deserializer 50 | * @param valueDeserializer 51 | * the value deserializer 52 | * @tparam K 53 | * the key type 54 | * @tparam V 55 | * the value type 56 | * @return 57 | * the list of published messages thus far 58 | */ 59 | def historyFor[K: Eq, V](topic: String, key: K)(implicit 60 | keyDeserializer: KeyDeserializer[IO, K], 61 | valueDeserializer: ValueDeserializer[IO, V] 62 | ): IO[List[V]] 63 | 64 | /** Returns the next message for the given topic, if any. Increments the internal offset for the topic. 65 | * @param topic 66 | * the topic to get the next message for 67 | * @param keyDeserializer 68 | * the key deserializer 69 | * @param valueDeserializer 70 | * the value deserializer 71 | * @tparam K 72 | * the key type 73 | * @tparam V 74 | * the value type 75 | * @return 76 | * the next message for the given topic, if any 77 | */ 78 | def nextMessageFor[K, V](topic: String)(implicit 79 | keyDeserializer: KeyDeserializer[IO, K], 80 | valueDeserializer: ValueDeserializer[IO, V] 81 | ): IO[Option[(K, V)]] 82 | 83 | /** Returns the next message for the given topic. Semantically blocks, with polling intervals and timeout specified 84 | * with the patience implicit parameter 85 | * @param topic 86 | * the topic to get the next message for 87 | * @param patience 88 | * the patience to use for polling for the next message 89 | * @param keyDeserializer 90 | * the key deserializer 91 | * @param valueDeserializer 92 | * the value deserializer 93 | * @throws `NoSuchElementException` 94 | * if no message is available before the timeout 95 | */ 96 | def nextEventualMessageFor[K, V](topic: String)(implicit 97 | patience: Patience, 98 | keyDeserializer: KeyDeserializer[IO, K], 99 | valueDeserializer: ValueDeserializer[IO, V] 100 | ): IO[(K, V)] 101 | 102 | /** Returns the next message for the given topic and key, if any. Increments the internal offset for the topic. 103 | * @param topic 104 | * the topic to get the next message for 105 | * @param key 106 | * the key to get the next message for 107 | * @param keyDeserializer 108 | * the key deserializer 109 | * @param valueDeserializer 110 | * the value deserializer 111 | * @tparam K 112 | * the key type 113 | * @tparam V 114 | * the value type 115 | * @return 116 | * the next message for the given topic and key, if any 117 | */ 118 | def nextValueFor[K: Eq, V](topic: String, key: K)(implicit 119 | keyDeserializer: KeyDeserializer[IO, K], 120 | valueDeserializer: ValueDeserializer[IO, V] 121 | ): IO[Option[V]] 122 | 123 | /** Returns the next message for the given topic and key. Semantically blocks, with polling intervals and timeout 124 | * specified with the patience implicit parameter 125 | * @param topic 126 | * the topic to get the next message for 127 | * @param key 128 | * the key to get the next message for 129 | * @param patience 130 | * the patience to use for polling for the next message 131 | * @param keyDeserializer 132 | * the key deserializer 133 | * @param valueDeserializer 134 | * the value deserializer 135 | * @throws `NoSuchElementException` 136 | * if no message is available before the timeout 137 | */ 138 | def nextEventualValueFor[K: Eq, V](topic: String, key: K)(implicit 139 | patience: Patience, 140 | keyDeserializer: KeyDeserializer[IO, K], 141 | valueDeserializer: ValueDeserializer[IO, V] 142 | ): IO[V] 143 | 144 | /** Returns the next message for the given topic and key wrapped in Some, or None if a redaction was received. 145 | * Semantically blocks, with polling intervals and timeout specified with the patience implicit parameter 146 | * @param topic 147 | * the topic to get the next message for 148 | * @param key 149 | * the key to get the next message for 150 | * @param patience 151 | * the patience to use for polling for the next message 152 | * @param keyDeserializer 153 | * the key deserializer 154 | * @param valueDeserializer 155 | * the value deserializer 156 | * @throws `NoSuchElementException` 157 | * if no message is available before the timeout 158 | */ 159 | def nextEventualValueOrRedactionFor[K: Eq, V](topic: String, key: K)(implicit 160 | patience: Patience, 161 | keyDeserializer: KeyDeserializer[IO, K], 162 | valueDeserializer: ValueDeserializer[IO, V] 163 | ): IO[Option[V]] 164 | 165 | /** MkProducer instance providing the mock producer. Including this instance in implicit scope where the kafka 166 | * producer is created will feed it with the mock producer instance instead of the real one 167 | */ 168 | implicit def mkProducer: MkProducer[IO] 169 | } 170 | 171 | object MockKafkaProducer { 172 | final case class Patience(timeout: FiniteDuration, interval: FiniteDuration) 173 | object Patience { 174 | implicit val default: Patience = Patience(150.millis, 15.millis) 175 | } 176 | 177 | /** Creates a mock kafka producer, backed by the mock producer built into the kafka client library. 178 | * @return 179 | * a resource containing the mock kafka producer 180 | */ 181 | def apply(): Resource[IO, MockKafkaProducer] = 182 | Resource 183 | .eval(Ref.of[IO, Map[String, Int]](Map.empty)) 184 | .flatMap(currentOffsets => 185 | Resource 186 | .eval(Mutex[IO]) 187 | .map(mutex => 188 | new NativeMockKafkaProducer( 189 | new MockProducer[Array[Byte], Array[Byte]](true, new ByteArraySerializer, new ByteArraySerializer), 190 | currentOffsets, 191 | mutex 192 | ) 193 | ) 194 | ) 195 | } 196 | -------------------------------------------------------------------------------- /src/main/scala/io/github/jchapuis/fs2/kafka/mock/impl/NativeMockKafkaConsumer.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock.impl 2 | 3 | import cats.effect.IO 4 | import cats.effect.kernel.Ref 5 | import cats.effect.std.Mutex 6 | import cats.effect.unsafe.IORuntime 7 | import fs2.kafka.consumer.MkConsumer 8 | import fs2.kafka.* 9 | import io.github.jchapuis.fs2.kafka.mock.MockKafkaConsumer 10 | import org.apache.kafka.clients.consumer.* 11 | import org.apache.kafka.common.header.internals.RecordHeaders 12 | import org.apache.kafka.common.record.TimestampType 13 | import org.apache.kafka.common.{Metric, MetricName, Node, PartitionInfo, TopicPartition, Uuid} 14 | 15 | import java.time.{Duration, Instant} 16 | import java.util.{Optional, OptionalLong} 17 | import java.util.regex.Pattern 18 | import java.{lang, util} 19 | import scala.annotation.nowarn 20 | import scala.concurrent.duration.DurationInt 21 | import scala.jdk.CollectionConverters.* 22 | 23 | private[mock] class NativeMockKafkaConsumer( 24 | val mockConsumer: MockConsumer[Array[Byte], Array[Byte]], 25 | currentOffsets: Ref[IO, Map[String, Long]], 26 | mutex: Mutex[IO] 27 | )(implicit IORuntime: IORuntime) 28 | extends MockKafkaConsumer { 29 | private val singlePartition = 0 30 | 31 | private def incrementOffset(topic: String): IO[Long] = 32 | for { 33 | offsets <- currentOffsets.get 34 | updatedOffsets <- currentOffsets.updateAndGet(_.updated(topic, offsets(topic) + 1)) 35 | } yield updatedOffsets(topic) 36 | 37 | def publish[K, V](topic: String, key: K, value: V, timestamp: Option[Instant])(implicit 38 | keySerializer: KeySerializer[IO, K], 39 | valueSerializer: ValueSerializer[IO, V] 40 | ): IO[Unit] = for { 41 | key <- keySerializer.serialize(topic, Headers.empty, key) 42 | value <- valueSerializer.serialize(topic, Headers.empty, value) 43 | _ <- addRecord(topic, key, Option(value), timestamp) 44 | } yield () 45 | 46 | private def waitForConsumerToBeAssignedTo(topic: String): IO[Unit] = 47 | IO(mockConsumer.assignment().asScala.map(_.topic()).toSet).flatMap { assignedTopics => 48 | if (assignedTopics.contains(topic)) IO.unit 49 | else IO.sleep(100.millis) >> waitForConsumerToBeAssignedTo(topic) 50 | } 51 | 52 | private def addRecord( 53 | topic: String, 54 | key: Array[Byte], 55 | value: Option[Array[Byte]], 56 | maybeTimestamp: Option[Instant] 57 | ) = 58 | waitForConsumerToBeAssignedTo(topic) >> 59 | mutex.lock.surround { 60 | IO.uncancelable(_ => 61 | for { 62 | offset <- incrementOffset(topic) 63 | timestamp <- maybeTimestamp.map(IO.pure).getOrElse(IO.realTimeInstant).map(_.toEpochMilli) 64 | record = new org.apache.kafka.clients.consumer.ConsumerRecord[Array[Byte], Array[Byte]]( 65 | topic, 66 | singlePartition, 67 | offset, 68 | timestamp, 69 | maybeTimestamp.map(_ => TimestampType.CREATE_TIME).getOrElse(TimestampType.LOG_APPEND_TIME), 70 | key.length, 71 | value.map(_.length).getOrElse(0), 72 | key, 73 | value.orNull, 74 | new RecordHeaders, 75 | Optional.empty[Integer] 76 | ) 77 | _ <- IO(mockConsumer.addRecord(record)) 78 | } yield () 79 | ) 80 | } 81 | 82 | def redact[K](topic: String, key: K)(implicit keySerializer: KeySerializer[IO, K]): IO[Unit] = 83 | for { 84 | key <- keySerializer.serialize(topic, Headers.empty, key) 85 | _ <- addRecord(topic, key, None, None) 86 | } yield () 87 | 88 | private def withMutex[T](f: => T): T = mutex.lock.surround(IO(f)).unsafeRunSync() 89 | 90 | @nowarn("cat=deprecation") 91 | implicit lazy val mkConsumer: MkConsumer[IO] = new MkConsumer[IO] { 92 | private val mockFacadeWithPresetSubscriptions = new KafkaByteConsumer { 93 | def assignment(): util.Set[TopicPartition] = withMutex(mockConsumer.assignment()) 94 | 95 | def subscription(): util.Set[String] = withMutex(mockConsumer.subscription()) 96 | 97 | def subscribe(topics: util.Collection[String]): Unit = withMutex { 98 | mockConsumer.subscribe(topics) 99 | ensureConsumerAssignedTo(topics.asScala.toList) 100 | } 101 | 102 | def subscribe(topics: util.Collection[String], callback: ConsumerRebalanceListener): Unit = withMutex { 103 | mockConsumer.subscribe(topics, callback) 104 | ensureConsumerAssignedTo(topics.asScala.toList) 105 | } 106 | 107 | def assign(partitions: util.Collection[TopicPartition]): Unit = withMutex(mockConsumer.assign(partitions)) 108 | 109 | def subscribe(pattern: Pattern, callback: ConsumerRebalanceListener): Unit = 110 | withMutex(mockConsumer.subscribe(pattern, callback)) 111 | 112 | def subscribe(pattern: Pattern): Unit = withMutex(mockConsumer.subscribe(pattern)) 113 | 114 | def unsubscribe(): Unit = withMutex(mockConsumer.unsubscribe()) 115 | 116 | def poll(timeout: Long): ConsumerRecords[Array[Byte], Array[Byte]] = withMutex(mockConsumer.poll(timeout)) 117 | 118 | def poll(timeout: Duration): ConsumerRecords[Array[Byte], Array[Byte]] = withMutex( 119 | mockConsumer.poll(timeout) 120 | ) 121 | 122 | def commitSync(): Unit = withMutex(mockConsumer.commitSync()) 123 | 124 | def commitSync(timeout: Duration): Unit = withMutex(mockConsumer.commitSync(timeout)) 125 | 126 | def commitSync(offsets: util.Map[TopicPartition, OffsetAndMetadata]): Unit = withMutex( 127 | mockConsumer.commitSync(offsets) 128 | ) 129 | 130 | def commitSync(offsets: util.Map[TopicPartition, OffsetAndMetadata], timeout: Duration): Unit = 131 | withMutex(mockConsumer.commitSync(offsets, timeout)) 132 | 133 | def commitAsync(): Unit = withMutex(mockConsumer.commitAsync()) 134 | 135 | def commitAsync(callback: OffsetCommitCallback): Unit = withMutex(mockConsumer.commitAsync(callback)) 136 | 137 | def commitAsync(offsets: util.Map[TopicPartition, OffsetAndMetadata], callback: OffsetCommitCallback): Unit = 138 | withMutex(mockConsumer.commitAsync(offsets, callback)) 139 | 140 | def seek(partition: TopicPartition, offset: Long): Unit = withMutex(mockConsumer.seek(partition, offset)) 141 | 142 | def seek(partition: TopicPartition, offsetAndMetadata: OffsetAndMetadata): Unit = 143 | withMutex(mockConsumer.seek(partition, offsetAndMetadata)) 144 | 145 | def seekToBeginning(partitions: util.Collection[TopicPartition]): Unit = 146 | withMutex(mockConsumer.seekToBeginning(partitions)) 147 | 148 | def seekToEnd(partitions: util.Collection[TopicPartition]): Unit = withMutex( 149 | mockConsumer.seekToEnd(partitions) 150 | ) 151 | 152 | def position(partition: TopicPartition): Long = withMutex(mockConsumer.position(partition)) 153 | 154 | def position(partition: TopicPartition, timeout: Duration): Long = withMutex( 155 | mockConsumer.position(partition, timeout) 156 | ) 157 | 158 | def committed(partition: TopicPartition): OffsetAndMetadata = withMutex(mockConsumer.committed(partition)) 159 | 160 | def committed(partition: TopicPartition, timeout: Duration): OffsetAndMetadata = 161 | withMutex(mockConsumer.committed(partition, timeout)) 162 | 163 | def committed(partitions: util.Set[TopicPartition]): util.Map[TopicPartition, OffsetAndMetadata] = 164 | withMutex(mockConsumer.committed(partitions)) 165 | 166 | def committed( 167 | partitions: util.Set[TopicPartition], 168 | timeout: Duration 169 | ): util.Map[TopicPartition, OffsetAndMetadata] = withMutex(mockConsumer.committed(partitions, timeout)) 170 | 171 | def metrics(): util.Map[MetricName, _ <: Metric] = withMutex(mockConsumer.metrics()) 172 | 173 | def partitionsFor(topic: String): util.List[PartitionInfo] = 174 | withMutex { 175 | if (mockConsumer.assignment().isEmpty) { 176 | util.List.of(new PartitionInfo(topic, singlePartition, null, null, null)) 177 | } else { 178 | mockConsumer.partitionsFor(topic) 179 | } 180 | } 181 | 182 | def partitionsFor(topic: String, timeout: Duration): util.List[PartitionInfo] = partitionsFor(topic) 183 | 184 | def listTopics(): util.Map[String, util.List[PartitionInfo]] = withMutex(mockConsumer.listTopics()) 185 | 186 | def listTopics(timeout: Duration): util.Map[String, util.List[PartitionInfo]] = withMutex( 187 | mockConsumer.listTopics(timeout) 188 | ) 189 | 190 | def paused(): util.Set[TopicPartition] = withMutex(mockConsumer.paused()) 191 | 192 | def pause(partitions: util.Collection[TopicPartition]): Unit = withMutex(mockConsumer.pause(partitions)) 193 | 194 | def resume(partitions: util.Collection[TopicPartition]): Unit = withMutex(mockConsumer.resume(partitions)) 195 | 196 | def offsetsForTimes( 197 | timestampsToSearch: util.Map[TopicPartition, lang.Long] 198 | ): util.Map[TopicPartition, OffsetAndTimestamp] = withMutex { 199 | val partitions = timestampsToSearch.keySet().asScala.toList 200 | mockConsumer 201 | .beginningOffsets( 202 | partitions.asJava 203 | ) // dummy implementation as it's not supported, just returns beginning offsets 204 | .asScala 205 | .map { case (partition, offset) => 206 | partition -> new OffsetAndTimestamp(offset, timestampsToSearch.get(partition)) 207 | } 208 | .asJava 209 | } 210 | 211 | def offsetsForTimes( 212 | timestampsToSearch: util.Map[TopicPartition, lang.Long], 213 | timeout: Duration 214 | ): util.Map[TopicPartition, OffsetAndTimestamp] = offsetsForTimes(timestampsToSearch) 215 | 216 | def beginningOffsets(partitions: util.Collection[TopicPartition]): util.Map[TopicPartition, lang.Long] = 217 | withMutex(mockConsumer.beginningOffsets(partitions)) 218 | 219 | def beginningOffsets( 220 | partitions: util.Collection[TopicPartition], 221 | timeout: Duration 222 | ): util.Map[TopicPartition, lang.Long] = withMutex(mockConsumer.beginningOffsets(partitions, timeout)) 223 | 224 | def endOffsets(partitions: util.Collection[TopicPartition]): util.Map[TopicPartition, lang.Long] = 225 | withMutex(mockConsumer.endOffsets(partitions)) 226 | 227 | def endOffsets( 228 | partitions: util.Collection[TopicPartition], 229 | timeout: Duration 230 | ): util.Map[TopicPartition, lang.Long] = withMutex(mockConsumer.endOffsets(partitions, timeout)) 231 | 232 | def currentLag(topicPartition: TopicPartition): OptionalLong = withMutex( 233 | mockConsumer.currentLag(topicPartition) 234 | ) 235 | 236 | def groupMetadata(): ConsumerGroupMetadata = withMutex(mockConsumer.groupMetadata()) 237 | 238 | def enforceRebalance(): Unit = withMutex(mockConsumer.enforceRebalance()) 239 | 240 | def enforceRebalance(reason: String): Unit = withMutex(mockConsumer.enforceRebalance(reason)) 241 | 242 | def close(): Unit = withMutex(mockConsumer.close()) 243 | 244 | def close(timeout: Duration): Unit = withMutex(mockConsumer.close(timeout)) 245 | 246 | def wakeup(): Unit = withMutex(mockConsumer.wakeup()) 247 | 248 | def clientInstanceId(timeout: Duration): Uuid = Uuid.randomUuid() 249 | } 250 | 251 | private def ensureConsumerAssignedTo(topics: List[String]): Unit = 252 | mockConsumer.rebalance(topics.map(new TopicPartition(_, singlePartition)).asJava) 253 | 254 | def apply[G[_]](settings: ConsumerSettings[G, ?, ?]): IO[KafkaByteConsumer] = 255 | IO.pure(mockFacadeWithPresetSubscriptions) 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /src/main/scala/io/github/jchapuis/fs2/kafka/mock/impl/NativeMockKafkaProducer.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock.impl 2 | 3 | import cats.Eq 4 | import cats.effect.IO 5 | import cats.effect.kernel.Ref 6 | import cats.effect.std.Mutex 7 | import cats.syntax.all.* 8 | import fs2.kafka.* 9 | import fs2.kafka.producer.MkProducer 10 | import io.github.jchapuis.fs2.kafka.mock.MockKafkaProducer 11 | import io.github.jchapuis.fs2.kafka.mock.MockKafkaProducer.Patience 12 | import org.apache.kafka.clients.producer.{MockProducer, ProducerRecord} 13 | 14 | import scala.jdk.CollectionConverters.* 15 | 16 | private[mock] class NativeMockKafkaProducer( 17 | val mockProducer: MockProducer[Array[Byte], Array[Byte]], 18 | currentOffsets: Ref[IO, Map[String, Int]], 19 | mutex: Mutex[IO] 20 | ) extends MockKafkaProducer { 21 | 22 | def nextMessageFor[K, V](topic: String)(implicit 23 | keyDeserializer: KeyDeserializer[IO, K], 24 | valueDeserializer: ValueDeserializer[IO, V] 25 | ): IO[Option[(K, V)]] = 26 | nextSelectedRecord(topic, record => IO(record.topic === topic)).map(_.collect { case (k, Some(v)) => (k, v) }) 27 | 28 | def nextValueFor[K: Eq, V](topic: String, key: K)(implicit 29 | keyDeserializer: KeyDeserializer[IO, K], 30 | valueDeserializer: ValueDeserializer[IO, V] 31 | ): IO[Option[V]] = 32 | nextSelectedRecord( 33 | topic, 34 | record => 35 | if (record.topic === topic) keyDeserializer.deserialize(topic, Headers.empty, record.key).map(_ === key) 36 | else IO.pure(false) 37 | ).map(_.collect { case (_, Some(value)) => value }) 38 | 39 | private def nextSelectedRecord[K, V]( 40 | topic: String, 41 | recordSelector: ProducerRecord[Array[Byte], Array[Byte]] => IO[Boolean] 42 | )(implicit 43 | keyDeserializer: KeyDeserializer[IO, K], 44 | valueDeserializer: ValueDeserializer[IO, V] 45 | ): IO[Option[(K, Option[V])]] = 46 | mutex.lock.surround { 47 | for { 48 | currentOffset <- currentOffsets.get.map(_.getOrElse(topic, -1)) 49 | messages <- selectedHistory[K, V](topic, recordSelector) 50 | maybeNextRecord = messages 51 | .drop(messages.indexWhere { case (index, _, _) => index === currentOffset } + 1) 52 | .headOption 53 | _ <- IO.whenA(maybeNextRecord.isDefined)( 54 | currentOffsets.update(_.updated(topic, maybeNextRecord.map { case (index, _, _) => index }.get)) 55 | ) 56 | } yield maybeNextRecord.map { case (_, key, value) => (key, value) } 57 | } 58 | 59 | def nextEventualMessageFor[K, V]( 60 | topic: String 61 | )(implicit 62 | patience: Patience, 63 | keyDeserializer: KeyDeserializer[IO, K], 64 | valueDeserializer: ValueDeserializer[IO, V] 65 | ): IO[(K, V)] = nextEventualRecordFor[K, V](topic, record => IO(record.topic === topic)) 66 | 67 | def nextEventualValueFor[K: Eq, V](topic: String, key: K)(implicit 68 | patience: Patience, 69 | keyDeserializer: KeyDeserializer[IO, K], 70 | valueDeserializer: ValueDeserializer[IO, V] 71 | ): IO[V] = nextEventualRecordFor[K, V]( 72 | topic, 73 | record => 74 | if (record.topic === topic) keyDeserializer.deserialize(topic, Headers.empty, record.key).map(_ === key) 75 | else IO.pure(false) 76 | ).map { case (_, value) => value } 77 | 78 | def nextEventualValueOrRedactionFor[K: Eq, V](topic: String, key: K)(implicit 79 | patience: Patience, 80 | keyDeserializer: KeyDeserializer[IO, K], 81 | valueDeserializer: ValueDeserializer[IO, V] 82 | ): IO[Option[V]] = nextEventualRecordOrRedactedFor[K, V]( 83 | topic, 84 | record => 85 | if (record.topic === topic) keyDeserializer.deserialize(topic, Headers.empty, record.key).map(_ === key) 86 | else IO.pure(false) 87 | ).map { case (_, value) => value } 88 | 89 | // note that this is not tail-recursive 90 | private def nextEventualRecordFor[K, V]( 91 | topic: String, 92 | recordSelector: ProducerRecord[Array[Byte], Array[Byte]] => IO[Boolean] 93 | )(implicit 94 | patience: Patience, 95 | keyDeserializer: KeyDeserializer[IO, K], 96 | valueDeserializer: ValueDeserializer[IO, V] 97 | ): IO[(K, V)] = nextSelectedRecord[K, V](topic, recordSelector).flatMap { 98 | case Some((k, Some(v))) => IO.pure((k, v)) 99 | case _ if patience.timeout.toNanos > 0 => 100 | IO.sleep(patience.interval) *> { 101 | val nextPatience: Patience = patience.copy(timeout = patience.timeout - patience.interval) 102 | nextEventualRecordFor[K, V](topic, recordSelector)(nextPatience, implicitly, implicitly) 103 | } 104 | case _ => IO.raiseError(new NoSuchElementException(s"no message found for topic $topic")) 105 | } 106 | 107 | // note that this is not tail-recursive 108 | private def nextEventualRecordOrRedactedFor[K, V]( 109 | topic: String, 110 | recordSelector: ProducerRecord[Array[Byte], Array[Byte]] => IO[Boolean] 111 | )(implicit 112 | patience: Patience, 113 | keyDeserializer: KeyDeserializer[IO, K], 114 | valueDeserializer: ValueDeserializer[IO, V] 115 | ): IO[(K, Option[V])] = nextSelectedRecord[K, V](topic, recordSelector).flatMap { 116 | case Some(record) => IO.pure(record) 117 | case None if patience.timeout.toNanos > 0 => 118 | IO.sleep(patience.interval) *> { 119 | val nextPatience: Patience = patience.copy(timeout = patience.timeout - patience.interval) 120 | nextEventualRecordOrRedactedFor[K, V](topic, recordSelector)(nextPatience, implicitly, implicitly) 121 | } 122 | case None => IO.raiseError(new NoSuchElementException(s"no message found for topic $topic")) 123 | } 124 | def historyFor[K, V]( 125 | topic: String 126 | )(implicit keyDeserializer: KeyDeserializer[IO, K], valueDeserializer: ValueDeserializer[IO, V]): IO[List[(K, V)]] = 127 | selectedHistory(topic, record => IO(record.topic === topic)).map(_.collect { case (_, key, Some(value)) => 128 | (key, value) 129 | }) 130 | 131 | def historyFor[K: Eq, V](topic: String, key: K)(implicit 132 | keyDeserializer: KeyDeserializer[IO, K], 133 | valueDeserializer: ValueDeserializer[IO, V] 134 | ): IO[List[V]] = selectedHistory[K, V]( 135 | topic, 136 | record => 137 | if (record.topic === topic) keyDeserializer.deserialize(topic, Headers.empty, record.key).map(_ === key) 138 | else IO.pure(false) 139 | ).map(_.collect { case (_, _, Some(value)) => value }) 140 | 141 | private def selectedHistory[K, V]( 142 | topic: String, 143 | recordSelector: ProducerRecord[Array[Byte], Array[Byte]] => IO[Boolean] 144 | )(implicit 145 | keyDeserializer: KeyDeserializer[IO, K], 146 | valueDeserializer: ValueDeserializer[IO, V] 147 | ): IO[List[(Int, K, Option[V])]] = mockProducer.history.asScala.zipWithIndex.toList 148 | .flatTraverse { case (record, index) => 149 | recordSelector(record) 150 | .ifM( 151 | ( 152 | IO.pure(index), 153 | keyDeserializer.deserialize(topic, Headers.empty, record.key), 154 | Option(record.value).traverse(valueDeserializer.deserialize(topic, Headers.empty, _)) 155 | ).parTupled.map(List(_)), // outer flatTraverse will flatten the List(_) 156 | IO(Nil) // drop this record 157 | ) 158 | } 159 | 160 | implicit lazy val mkProducer: MkProducer[IO] = new MkProducer[IO] { 161 | def apply[G[_]](settings: ProducerSettings[G, ?, ?]): IO[KafkaByteProducer] = IO(mockProducer) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/test/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaConsumerSuite.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock 2 | 3 | import cats.effect.IO 4 | import cats.syntax.traverse.* 5 | import cats.syntax.eq.* 6 | import fs2.kafka.{ConsumerSettings, KafkaConsumer, Timestamp} 7 | import fs2.kafka.consumer.MkConsumer 8 | import fs2.Stream 9 | import munit.CatsEffectSuite 10 | 11 | class MockKafkaConsumerSuite extends CatsEffectSuite { 12 | val topicA = "test-topic-a" 13 | val keyA = "test-key-a" 14 | val valueA = "test-value-a" 15 | val topicB = "test-topic-b" 16 | val keyB = "test-key-b" 17 | val valueB = "test-value-b" 18 | 19 | test("mock kafka consumer publishes messages that can be read") { 20 | MockKafkaConsumer(topicA, topicB).use { mockConsumer => 21 | for { 22 | _ <- (mockConsumer.publish(topicA, keyA, valueA) >> 23 | mockConsumer.publish(topicB, keyB, valueB) >> 24 | mockConsumer.publish(topicA, keyB, valueB)).start 25 | records <- { 26 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 27 | KafkaConsumer 28 | .stream(ConsumerSettings[IO, String, String].withGroupId("test")) 29 | .subscribeTo(topicA, topicB) 30 | .records 31 | .map(_.record) 32 | .map(record => (record.topic, record.key, record.value)) 33 | .take(3) 34 | .compile 35 | .toList 36 | } 37 | _ <- IO(records.contains((topicA, keyA, valueA))).assert 38 | _ <- IO(records.contains((topicA, keyB, valueB))).assert 39 | _ <- IO(records.contains((topicB, keyB, valueB))).assert 40 | } yield () 41 | } 42 | } 43 | 44 | test("mock kafka consumer supports specifying record timestamp") { 45 | MockKafkaConsumer(topicA).use { mockConsumer => 46 | for { 47 | timestamp2 <- IO.realTimeInstant 48 | timestamp3 <- IO.realTimeInstant 49 | timestamp1 <- IO.realTimeInstant 50 | _ <- (mockConsumer.publish(topicA, keyA, valueA, Some(timestamp1)) >> 51 | mockConsumer.publish(topicA, keyA, valueA, Some(timestamp2)) >> 52 | mockConsumer.publish(topicA, keyA, valueA, Some(timestamp3))).start 53 | assertions <- { 54 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 55 | KafkaConsumer 56 | .stream(ConsumerSettings[IO, String, String].withGroupId("test")) 57 | .subscribeTo(topicA) 58 | .records 59 | .map(_.record.timestamp) 60 | .take(3) 61 | .zipWith(Stream(timestamp1, timestamp2, timestamp3)) { case (obtained, expected) => 62 | IO(obtained === Timestamp.createTime(expected.toEpochMilli)).assert 63 | } 64 | .compile 65 | .toList 66 | } 67 | _ <- assertions.sequence 68 | } yield () 69 | } 70 | } 71 | 72 | test("mock kafka consumer supports redacting entries") { 73 | MockKafkaConsumer(topicA).use { mockConsumer => 74 | for { 75 | _ <- (mockConsumer.publish(topicA, keyA, valueA) >> mockConsumer.redact(topicA, keyA)).start 76 | records <- { 77 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 78 | KafkaConsumer 79 | .stream(ConsumerSettings[IO, String, Option[String]].withGroupId("test")) 80 | .subscribeTo(topicA) 81 | .records 82 | .map(_.record) 83 | .take(2) 84 | .compile 85 | .toList 86 | } 87 | _ <- IO(records.headOption.flatMap(_.value)).assertEquals(Some(valueA)) 88 | _ <- IO(records.lastOption.flatMap(_.value)).assertEquals(None) 89 | } yield () 90 | } 91 | } 92 | 93 | test("mock kafka consumer handles Option values correctly") { 94 | MockKafkaConsumer(topicA).use { mockConsumer => 95 | for { 96 | _ <- mockConsumer.publish[String, Option[String]](topicA, keyA, None).start 97 | records <- { 98 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 99 | KafkaConsumer 100 | .stream(ConsumerSettings[IO, String, Option[String]].withGroupId("test")) 101 | .subscribeTo(topicA) 102 | .records 103 | .map(_.record) 104 | .take(1) 105 | .compile 106 | .toList 107 | } 108 | _ <- IO(records.headOption.flatMap(_.value)).assertEquals(None) 109 | } yield () 110 | } 111 | } 112 | 113 | // README.md example 114 | test("mock kafka consumer can read a published message") { 115 | MockKafkaConsumer("topic").use { mockConsumer => 116 | for { 117 | _ <- mockConsumer 118 | .publish("topic", "key", "value") 119 | .start // this call semantically blocks until we can publish to the consumer, so launch it into a fiber 120 | record <- { 121 | implicit val mkConsumer: MkConsumer[IO] = mockConsumer.mkConsumer 122 | KafkaConsumer 123 | .stream( 124 | ConsumerSettings[IO, String, String] 125 | .withGroupId("test") 126 | ) 127 | .subscribeTo("topic") 128 | .records 129 | .map(_.record) 130 | .map(record => (record.topic, record.key, record.value)) 131 | .take(1) 132 | .compile 133 | .toList 134 | .map(_.head) 135 | } 136 | } yield assertEquals(record, ("topic", "key", "value")) 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/test/scala/io/github/jchapuis/fs2/kafka/mock/MockKafkaProducerSuite.scala: -------------------------------------------------------------------------------- 1 | package io.github.jchapuis.fs2.kafka.mock 2 | 3 | import cats.effect.IO 4 | import cats.effect.testkit.TestControl 5 | import cats.syntax.eq.* 6 | import fs2.Chunk 7 | import fs2.kafka.{KafkaProducer, ProducerRecord, ProducerRecords, ProducerSettings} 8 | import fs2.kafka.producer.MkProducer 9 | import munit.CatsEffectSuite 10 | 11 | import scala.concurrent.duration.* 12 | 13 | class MockKafkaProducerSuite extends CatsEffectSuite { 14 | val topic = "test-topic" 15 | val key = "test-key" 16 | val value = "test-value" 17 | val record = ProducerRecord[String, String](topic, key, value) 18 | 19 | test("mock kafka producer returns the history of published messages") { 20 | createMockProducer.use { case (mock, producer) => 21 | for { 22 | _ <- producer.produce(ProducerRecords(Chunk(record, record, record))).flatten 23 | topicHistory <- mock.historyFor[String, String](topic) 24 | _ <- IO(topicHistory.size == 3).assert 25 | _ <- IO(topicHistory.forall { case (key, value) => key === key && value === value }).assert 26 | keyHistory <- mock.historyFor[String, String](topic, key) 27 | _ <- IO(keyHistory.size == 3).assert 28 | _ <- IO(keyHistory.forall(_ === value)).assert 29 | } yield () 30 | } 31 | } 32 | 33 | test("mock kafka producer returns next messages iteratively") { 34 | createMockProducer.use { case (mock, producer) => 35 | for { 36 | _ <- producer.produce(ProducerRecords.one(record)).flatten 37 | _ <- mock 38 | .nextMessageFor[String, String](topic) 39 | .map(maybeKeyValue => assertEquals(maybeKeyValue, Some((key, value)))) 40 | _ <- producer 41 | .produce(ProducerRecords(Chunk(ProducerRecord(topic, key, "foo"), ProducerRecord(topic, key, "bar")))) 42 | .flatten 43 | _ <- mock.nextValueFor[String, String](topic, key).map(assertEquals(_, Some("foo"))) 44 | _ <- mock.nextValueFor[String, String](topic, key).map(assertEquals(_, Some("bar"))) 45 | } yield () 46 | } 47 | } 48 | 49 | test("mock kafka producer supports detecting redaction") { 50 | createMockProducer.use { case (mock, producer) => 51 | createMockRedactor(mock.mkProducer).use { redactor => 52 | for { 53 | _ <- producer.produce(ProducerRecords.one(record)).flatten 54 | _ <- mock 55 | .nextMessageFor[String, String](topic) 56 | .map(maybeKeyValue => assertEquals(maybeKeyValue, Some((key, value)))) 57 | _ <- redactor.produceOne(topic, key, ()) 58 | _ <- mock.nextEventualValueOrRedactionFor[String, String](topic, key).map(assertEquals(_, None)) // redaction 59 | _ <- producer.produce(ProducerRecords.one(ProducerRecord(topic, key, "foo"))).flatten 60 | _ <- mock.nextEventualValueOrRedactionFor[String, String](topic, key).map(assertEquals(_, Some("foo"))) 61 | _ <- mock.nextValueFor[String, String](topic, key).map(assertEquals(_, None)) 62 | 63 | } yield () 64 | } 65 | } 66 | } 67 | 68 | test("mock kafka producer returns eventual message") { 69 | implicit val patience = MockKafkaProducer.Patience(timeout = 2.seconds, interval = 100.millis) 70 | val test = createMockProducer.use { case (mock, producer) => 71 | for { 72 | _ <- { 73 | IO.sleep(1.second) >> 74 | producer.produce(ProducerRecords.one(record)).flatten >> 75 | IO.sleep(1.second) >> 76 | producer.produce(ProducerRecords.one(record)).flatten 77 | }.start 78 | _ <- mock 79 | .nextEventualMessageFor[String, String](topic) 80 | .map(record => assertEquals(record, (key, value))) 81 | _ <- mock.nextEventualValueFor[String, String](topic, key).map(assertEquals(_, value)) 82 | } yield () 83 | } 84 | TestControl.executeEmbed(test) 85 | } 86 | 87 | test("mock kafka producer raises exception when no eventual message") { 88 | implicit val patience = MockKafkaProducer.Patience(timeout = 1.seconds, interval = 100.millis) 89 | val test = createMockProducer.use { case (mock, producer) => 90 | for { 91 | _ <- { IO.sleep(2.second) >> producer.produce(ProducerRecords.one(record)).flatten }.start 92 | _ <- mock 93 | .nextEventualMessageFor[String, String](topic) 94 | .map(assertEquals(_, (key, value))) 95 | } yield () 96 | } 97 | TestControl.executeEmbed(test).intercept[NoSuchElementException] 98 | } 99 | 100 | private def createMockProducer = { 101 | MockKafkaProducer() 102 | .flatMap { mock => 103 | implicit val mkProducer: MkProducer[IO] = mock.mkProducer 104 | val producer = KafkaProducer.resource(ProducerSettings[IO, String, String]) 105 | producer.map((mock, _)) 106 | } 107 | } 108 | 109 | private def createMockRedactor(implicit mkProducer: MkProducer[IO]) = 110 | KafkaProducer.resource(ProducerSettings[IO, String, Unit]) 111 | 112 | // README.md example 113 | test("mock kafka producer returns next message and allows for checking full history") { 114 | MockKafkaProducer() 115 | .flatMap { mock => 116 | implicit val mkProducer: MkProducer[IO] = mock.mkProducer 117 | val producer = KafkaProducer.resource(ProducerSettings[IO, String, String]) 118 | producer.map((mock, _)) 119 | } 120 | .use { case (mock, producer) => 121 | for { 122 | _ <- producer.produce(ProducerRecords.one(ProducerRecord[String, String]("topic", "key", "value"))).flatten 123 | _ <- mock 124 | .nextMessageFor[String, String]("topic") 125 | .map(maybeKeyValue => assertEquals(maybeKeyValue, Some(("key", "value")))) 126 | _ <- mock 127 | .historyFor[String, String]("topic") 128 | .map(history => assertEquals(history, List(("key", "value")))) 129 | } yield () 130 | } 131 | } 132 | } 133 | --------------------------------------------------------------------------------