├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── RdKafka.sln
├── appveyor.yml
├── doc
├── docfx.json
├── index.md
├── template
│ ├── partials
│ │ └── navbar.tmpl.partial
│ └── styles
│ │ └── main.css
└── toc.yml
├── examples
├── AdvancedConsumer
│ ├── AdvancedConsumer.xproj
│ ├── Program.cs
│ └── project.json
├── AdvancedProducer
│ ├── AdvancedProducer.xproj
│ ├── Program.cs
│ └── project.json
├── Benchmark
│ ├── Benchmark.xproj
│ ├── Program.cs
│ └── project.json
├── Misc
│ ├── Misc.xproj
│ ├── Program.cs
│ └── project.json
├── SimpleConsumer
│ ├── Program.cs
│ ├── SimpleConsumer.xproj
│ └── project.json
└── SimpleProducer
│ ├── Program.cs
│ ├── SimpleProducer.xproj
│ └── project.json
├── global.json
├── src
└── RdKafka
│ ├── Config.cs
│ ├── Consumer.cs
│ ├── ErrorCode.cs
│ ├── EventConsumer.cs
│ ├── Handle.cs
│ ├── IDeliveryHandler.cs
│ ├── Internal
│ ├── LibRdKafka.cs
│ ├── Metadata.cs
│ ├── SafeConfigHandle.cs
│ ├── SafeHandleZeroIsInvalid.cs
│ ├── SafeKafkaHandle.cs
│ ├── SafeTopicConfigHandle.cs
│ └── SafeTopicHandle.cs
│ ├── Library.cs
│ ├── Message.cs
│ ├── Metadata.cs
│ ├── Offset.cs
│ ├── Producer.cs
│ ├── RdKafka.xproj
│ ├── RdKafkaException.cs
│ ├── Topic.cs
│ ├── TopicConfig.cs
│ └── project.json
└── test
└── RdKafka.Tests
├── ConfigTests.cs
├── RdKafka.Tests.xproj
└── project.json
/.gitignore:
--------------------------------------------------------------------------------
1 | project.lock.json
2 | bin/
3 | obj/
4 | *.so
5 | *.dylib
6 | *.csproj.user
7 | *.xproj.user
8 | .vs
9 | todo.txt
10 | .cache
11 | src/RdKafka/runtimes
12 | Properties
13 | packages/
14 | doc/api/
15 | doc/_site/
16 | doc/src
17 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # Origin: https://github.com/ah-/rdkafka-dotnet
2 |
3 | language: csharp
4 | sudo: required
5 | dist: trusty
6 |
7 | os:
8 | - osx
9 | - linux
10 |
11 | env:
12 | - PATH
13 |
14 | install:
15 | - if [ "$TRAVIS_OS_NAME" == "linux" ]; then sudo apt-get install libunwind8; fi
16 | - wget https://raw.githubusercontent.com/dotnet/cli/rel/1.0.0-preview2/scripts/obtain/dotnet-install.sh
17 | - sudo bash dotnet-install.sh --version 1.0.0-preview2-003121
18 | - export PATH=$HOME/.dotnet:$PATH
19 | - dotnet restore
20 |
21 | script:
22 | - dotnet build ./src/RdKafka/project.json
23 | - dotnet pack ./src/RdKafka/project.json
24 | - dotnet test ./test/RdKafka.Tests/
25 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | rdkafka-dotnet - C# Apache Kafka client library
2 |
3 | Copyright (c) 2015-2016, Andreas Heider
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice,
10 | this list of conditions and the following disclaimer.
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 | POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # This project has moved to https://github.com/confluentinc/confluent-kafka-dotnet and is now being maintained by Confluent
2 |
3 | Please update to [Confluent.Kafka](https://www.nuget.org/packages/Confluent.Kafka/).
4 |
5 |
6 |
7 |
8 |
9 |
10 | rdkafka-dotnet - C# Apache Kafka client
11 | =======================================
12 |
13 | [](https://travis-ci.org/ah-/rdkafka-dotnet)
14 | [](https://ci.appveyor.com/project/ah-/rdkafka-dotnet)
15 | [](https://gitter.im/edenhill/librdkafka)
16 |
17 | Copyright (c) 2015-2016, [Andreas Heider](mailto:andreas@heider.io)
18 |
19 | **rdkafka-dotnet** is a C# client for [Apache Kafka](http://kafka.apache.org/) based on [librdkafka](https://github.com/edenhill/librdkafka).
20 |
21 | **rdkafka-dotnet** is licensed under the 2-clause BSD license.
22 |
23 | ## Usage
24 |
25 | Just reference the [RdKafka NuGet package](https://www.nuget.org/packages/RdKafka)
26 |
27 | ## Examples
28 |
29 | ### Producing messages
30 |
31 | ```cs
32 | using (Producer producer = new Producer("127.0.0.1:9092"))
33 | using (Topic topic = producer.Topic("testtopic"))
34 | {
35 | byte[] data = Encoding.UTF8.GetBytes("Hello RdKafka");
36 | DeliveryReport deliveryReport = await topic.Produce(data);
37 | Console.WriteLine($"Produced to Partition: {deliveryReport.Partition}, Offset: {deliveryReport.Offset}");
38 | }
39 |
40 | ```
41 |
42 | ### Consuming messages
43 |
44 | ```cs
45 | var config = new Config() { GroupId = "example-csharp-consumer" };
46 | using (var consumer = new EventConsumer(config, "127.0.0.1:9092"))
47 | {
48 | consumer.OnMessage += (obj, msg) =>
49 | {
50 | string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length);
51 | Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}");
52 | };
53 |
54 | consumer.Subscribe(new []{"testtopic"});
55 | consumer.Start();
56 |
57 | Console.WriteLine("Started consumer, press enter to stop consuming");
58 | Console.ReadLine();
59 | }
60 | ```
61 |
62 | ### More
63 |
64 | See `examples/`
65 |
66 | ## Documentation
67 |
68 | [Read the API Documentation here](https://ah-.github.io/rdkafka-dotnet/api/RdKafka.html)
69 |
70 | [Read the FAQ for answers to common questions](https://github.com/ah-/rdkafka-dotnet/wiki/Faq)
71 |
72 | ## Supported Platforms and .NET Releases
73 |
74 | Requires .NET 4.5 or later. Tested with .NET Core on Linux, OS X and Windows, and classic .NET 4.5 on Windows.
75 |
--------------------------------------------------------------------------------
/RdKafka.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 14
4 | VisualStudioVersion = 14.0.25420.1
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "RdKafka", "src\RdKafka\RdKafka.xproj", "{B2DDB635-4423-45D7-B3DC-F701E6010868}"
7 | EndProject
8 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "RdKafka.Tests", "test\RdKafka.Tests\RdKafka.Tests.xproj", "{33151BE2-C10B-41BC-8C5E-E55211A1722D}"
9 | EndProject
10 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "SimpleProducer", "examples\SimpleProducer\SimpleProducer.xproj", "{A7BF0A75-D3E7-4024-8597-5FCCC567D372}"
11 | EndProject
12 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "SimpleConsumer", "examples\SimpleConsumer\SimpleConsumer.xproj", "{85F08EF0-9E30-49D3-B86F-DBA36267843C}"
13 | EndProject
14 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{56321620-6DE2-44DC-942B-E3701C01EBFE}"
15 | EndProject
16 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{03D93818-5A54-4913-A5CD-CA0C373CEE05}"
17 | EndProject
18 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "examples", "examples", "{5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}"
19 | EndProject
20 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Misc", "examples\Misc\Misc.xproj", "{2E6FC09E-2BED-4765-8298-9584E172B6CF}"
21 | EndProject
22 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "AdvancedConsumer", "examples\AdvancedConsumer\AdvancedConsumer.xproj", "{0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE}"
23 | EndProject
24 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "Benchmark", "examples\Benchmark\Benchmark.xproj", "{9EA27B93-4714-453C-9319-1920945CEB78}"
25 | EndProject
26 | Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "AdvancedProducer", "examples\AdvancedProducer\AdvancedProducer.xproj", "{DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB}"
27 | EndProject
28 | Global
29 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
30 | Debug|Any CPU = Debug|Any CPU
31 | Release|Any CPU = Release|Any CPU
32 | EndGlobalSection
33 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
34 | {B2DDB635-4423-45D7-B3DC-F701E6010868}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
35 | {B2DDB635-4423-45D7-B3DC-F701E6010868}.Debug|Any CPU.Build.0 = Debug|Any CPU
36 | {B2DDB635-4423-45D7-B3DC-F701E6010868}.Release|Any CPU.ActiveCfg = Release|Any CPU
37 | {B2DDB635-4423-45D7-B3DC-F701E6010868}.Release|Any CPU.Build.0 = Release|Any CPU
38 | {33151BE2-C10B-41BC-8C5E-E55211A1722D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
39 | {33151BE2-C10B-41BC-8C5E-E55211A1722D}.Debug|Any CPU.Build.0 = Debug|Any CPU
40 | {33151BE2-C10B-41BC-8C5E-E55211A1722D}.Release|Any CPU.ActiveCfg = Release|Any CPU
41 | {33151BE2-C10B-41BC-8C5E-E55211A1722D}.Release|Any CPU.Build.0 = Release|Any CPU
42 | {A7BF0A75-D3E7-4024-8597-5FCCC567D372}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
43 | {A7BF0A75-D3E7-4024-8597-5FCCC567D372}.Debug|Any CPU.Build.0 = Debug|Any CPU
44 | {A7BF0A75-D3E7-4024-8597-5FCCC567D372}.Release|Any CPU.ActiveCfg = Release|Any CPU
45 | {A7BF0A75-D3E7-4024-8597-5FCCC567D372}.Release|Any CPU.Build.0 = Release|Any CPU
46 | {85F08EF0-9E30-49D3-B86F-DBA36267843C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
47 | {85F08EF0-9E30-49D3-B86F-DBA36267843C}.Debug|Any CPU.Build.0 = Debug|Any CPU
48 | {85F08EF0-9E30-49D3-B86F-DBA36267843C}.Release|Any CPU.ActiveCfg = Release|Any CPU
49 | {85F08EF0-9E30-49D3-B86F-DBA36267843C}.Release|Any CPU.Build.0 = Release|Any CPU
50 | {2E6FC09E-2BED-4765-8298-9584E172B6CF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
51 | {2E6FC09E-2BED-4765-8298-9584E172B6CF}.Debug|Any CPU.Build.0 = Debug|Any CPU
52 | {2E6FC09E-2BED-4765-8298-9584E172B6CF}.Release|Any CPU.ActiveCfg = Release|Any CPU
53 | {2E6FC09E-2BED-4765-8298-9584E172B6CF}.Release|Any CPU.Build.0 = Release|Any CPU
54 | {0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
55 | {0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE}.Debug|Any CPU.Build.0 = Debug|Any CPU
56 | {0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE}.Release|Any CPU.ActiveCfg = Release|Any CPU
57 | {0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE}.Release|Any CPU.Build.0 = Release|Any CPU
58 | {9EA27B93-4714-453C-9319-1920945CEB78}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
59 | {9EA27B93-4714-453C-9319-1920945CEB78}.Debug|Any CPU.Build.0 = Debug|Any CPU
60 | {9EA27B93-4714-453C-9319-1920945CEB78}.Release|Any CPU.ActiveCfg = Release|Any CPU
61 | {9EA27B93-4714-453C-9319-1920945CEB78}.Release|Any CPU.Build.0 = Release|Any CPU
62 | {DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
63 | {DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB}.Debug|Any CPU.Build.0 = Debug|Any CPU
64 | {DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB}.Release|Any CPU.ActiveCfg = Release|Any CPU
65 | {DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB}.Release|Any CPU.Build.0 = Release|Any CPU
66 | EndGlobalSection
67 | GlobalSection(SolutionProperties) = preSolution
68 | HideSolutionNode = FALSE
69 | EndGlobalSection
70 | GlobalSection(NestedProjects) = preSolution
71 | {B2DDB635-4423-45D7-B3DC-F701E6010868} = {56321620-6DE2-44DC-942B-E3701C01EBFE}
72 | {33151BE2-C10B-41BC-8C5E-E55211A1722D} = {03D93818-5A54-4913-A5CD-CA0C373CEE05}
73 | {A7BF0A75-D3E7-4024-8597-5FCCC567D372} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
74 | {85F08EF0-9E30-49D3-B86F-DBA36267843C} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
75 | {2E6FC09E-2BED-4765-8298-9584E172B6CF} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
76 | {0A3DD19C-3A80-47DB-83B0-A1EF6078C9EE} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
77 | {9EA27B93-4714-453C-9319-1920945CEB78} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
78 | {DDCE9F6B-6A9D-4B05-BAD0-E77D6359B1FB} = {5E1F754C-FCE9-4B80-B13D-5F9ACE13305A}
79 | EndGlobalSection
80 | EndGlobal
81 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | os: Visual Studio 2015
2 |
3 | configuration:
4 | - Release
5 |
6 | install:
7 | - ps: Start-FileDownload 'https://download.microsoft.com/download/A/3/8/A38489F3-9777-41DD-83F8-2CBDFAB2520C/packages/DotNetCore.1.0.0-SDK.Preview2-x64.exe'
8 | - cmd: DotNetCore.1.0.0-SDK.Preview2-x64.exe /quiet
9 |
10 | environment:
11 | PATH: $(PATH);$(PROGRAMFILES)\dotnet\
12 |
13 | build_script:
14 | - appveyor-retry dotnet restore -v Minimal
15 | - dotnet build **/project.json -c %CONFIGURATION%
16 | - cmd: IF "%APPVEYOR_REPO_TAG%" == "true" (dotnet pack src/RdKafka/project.json -c %CONFIGURATION%)
17 | - cmd: IF NOT "%APPVEYOR_REPO_TAG%" == "true" (dotnet pack src/RdKafka/project.json -c %CONFIGURATION% --version-suffix ci-%APPVEYOR_BUILD_NUMBER%)
18 | - dotnet test test/RdKafka.Tests
19 |
20 | test: off
21 |
22 | artifacts:
23 | - path: ./src/RdKafka/bin/Release/*.nupkg
24 |
25 | deploy:
26 | provider: NuGet
27 | api_key:
28 | secure: qbl3uvq1riFSNeRw6/MHnaDbJ0Ft9evEcz3nQp061pKEhXn0ex4eI56dwUOO2fWF
29 | skip_symbols: true
30 | artifact: /.*\.nupkg/
31 |
--------------------------------------------------------------------------------
/doc/docfx.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": [
3 | {
4 | "src": [
5 | {
6 | "files": [
7 | "src/RdKafka/*.cs"
8 | ],
9 | "cwd": ".."
10 | }
11 | ],
12 | "dest": "api"
13 | }
14 | ],
15 | "build": {
16 | "content": [
17 | {
18 | "files": [
19 | "api/**.yml",
20 | "toc.yml",
21 | "*.md"
22 | ]
23 | }
24 | ],
25 | "globalMetadata": {
26 | "_appTitle": "RdKafka",
27 | "_disableContribution": true,
28 | "_disableFooter": true
29 | },
30 | "dest": "_site",
31 | "template": ["default", "template"]
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/doc/index.md:
--------------------------------------------------------------------------------
1 | rdkafka-dotnet - C# Apache Kafka client
2 | =======================================
3 |
4 | **rdkafka-dotnet** is a C# client for [Apache Kafka](http://kafka.apache.org/) based on [librdkafka](https://github.com/edenhill/librdkafka).
5 |
6 | ## Usage
7 |
8 | Just reference the [RdKafka NuGet package](https://www.nuget.org/packages/RdKafka)
9 |
10 | ## Api Reference
11 |
12 | [Read the Api Documentation here](api/RdKafka.html)
13 |
14 | ## Examples
15 |
16 | ### Producing messages
17 |
18 | ```cs
19 | using (Producer producer = new Producer("127.0.0.1:9092"))
20 | using (Topic topic = producer.Topic("testtopic"))
21 | {
22 | byte[] data = Encoding.UTF8.GetBytes("Hello RdKafka");
23 | DeliveryReport deliveryReport = await topic.Produce(data);
24 | Console.WriteLine($"Produced to Partition: {deliveryReport.Partition}, Offset: {deliveryReport.Offset}");
25 | }
26 |
27 | ```
28 |
29 | ### Consuming messages
30 |
31 | ```cs
32 | var config = new Config() { GroupId = "example-csharp-consumer" };
33 | using (var consumer = new EventConsumer(config, "127.0.0.1:9092"))
34 | {
35 | consumer.OnMessage += (obj, msg) =>
36 | {
37 | string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length);
38 | Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}");
39 | };
40 |
41 | consumer.Subscribe(new []{"testtopic"});
42 | consumer.Start();
43 |
44 | Console.WriteLine("Started consumer, press enter to stop consuming");
45 | Console.ReadLine();
46 | }
47 | ```
48 |
--------------------------------------------------------------------------------
/doc/template/partials/navbar.tmpl.partial:
--------------------------------------------------------------------------------
1 |
16 |
--------------------------------------------------------------------------------
/doc/template/styles/main.css:
--------------------------------------------------------------------------------
1 | @import url(//fonts.googleapis.com/css?family=Roboto+Condensed:700);
2 | @import url(//fonts.googleapis.com/css?family=Open+Sans);
3 |
4 | /* Main styles */
5 | body {
6 | font-family: "Open Sans", "Segoe UI", sans-serif;
7 | font-size: 15px;
8 | padding-top: 50px;
9 | }
10 | ul {
11 | list-style-image: url("../../images/core/list-bullet.png");
12 | }
13 | nav {
14 | font-size: 14px;
15 | }
16 | .navbar-nav > li > a.nav-active, .navbar-nav > li > a.nav-active:hover {
17 | background-color: #333;
18 | color: #fff;
19 | }
20 |
21 | h1, h2, h3, h4, h5 {
22 | font-family: "Roboto Condensed", "Segoe UI", sans-serif;
23 | font-weight: bold;
24 | }
25 |
26 |
27 | footer {
28 | text-align: center;
29 | width: 100%;
30 | margin-top: 50px;
31 | color: #c0c0c0;
32 | }
33 | footer > .inner-footer a {
34 | color: #c0c0c0;
35 | text-decoration: none;
36 | }
37 | footer > .inner-footer a:hover {
38 | color: #32145a;
39 | text-decoration: none;
40 | }
41 | .content a {
42 | /*color: #A979B3;*/
43 | color: #A356B3;
44 | text-decoration: none;
45 | outline: 0;
46 | }
47 | .content a:hover {
48 | /*transition: color .15s cubic-bezier(.33, .66, .66, 1);*/
49 | text-decoration: none;
50 | color: #682079;
51 | }
52 |
53 |
54 | /* End of main styles */
55 |
56 | /* Index page styles */
57 | .btn-hero-core {
58 | padding: 15px 25px;
59 | background-color: #32145a;
60 | color: #d89ae4;
61 | display: inline-block;
62 | font-family: "Open Sans", sans-serif;
63 | font-size: 20px;
64 | font-weight: bold;
65 | margin-left: 20px;
66 | -webkit-box-shadow: 2px 2px 3px 0px #2C0D33; /* Safari 3-4, iOS 4.0.2 - 4.2, Android 2.3+ */
67 | -moz-box-shadow: 2px 2px 3px 0px #2C0D33; /* Firefox 3.5 - 3.6 */
68 | box-shadow: 2px 2px 3px 0px #2C0D33; /* Opera 10.5, IE 9, Firefox 4+, Chrome 6+, iOS 5 */
69 | }
70 | .btn-hero-core:hover {
71 | color: #d89ae4;
72 | text-decoration: none;
73 | }
74 | .hero {
75 | background-color: #682079;
76 | width: inherit;
77 | color: #fff;
78 | }
79 | .starter-template {
80 | padding: 40px 15px;
81 | text-align: center;
82 | }
83 | .dotnet {
84 | color: #fff;
85 | }
86 | #rest-vps {
87 | display: none;
88 | }
89 | .value-prop-heading {
90 | margin-top: 0px;
91 | }
92 | .value-props {
93 | margin-top: 40px;
94 | margin-bottom: 40px;
95 | }
96 |
97 | .intro-image {
98 | text-align: center;
99 | }
100 | .intro-image > img {
101 | margin-top: 20px;
102 | }
103 |
104 | /* End of index page styles */
105 |
106 | /* Getting started page styles */
107 | .getting-started-intro {
108 | text-align: center;
109 | margin-top: 40px;
110 | margin-bottom: 40px;
111 | }
112 | .getting-started-intro > h2, h4 {
113 | margin-bottom: 30px;
114 | }
115 | .btn-gs {
116 | width: 150px;
117 | }
118 | .btn-gs:hover, .btn-gs:active, .btn-gs:focus, .jquery-active {
119 | color: #fff;
120 | background-color: #682079;
121 | outline: 0 !important;
122 | }
123 |
124 |
125 | .step {
126 | width: 100%;
127 | margin: 50px auto;
128 | padding: 20px 0px;
129 | text-align: center;
130 | font-size: 16px;
131 | border: solid 1px #c0c0c0;
132 | min-height: 300px;
133 | background-color: #fff;
134 | border-radius: 10px;
135 | }
136 | .step-block {
137 | display: block;
138 | }
139 | .step-none {
140 | display: none;
141 | }
142 | .step-number {
143 | position: relative;
144 | top: -40px;
145 | background-color: #32145a;
146 | color: #fff;
147 | font-weight: bold;
148 | font-size: 24px;
149 | z-index: 999;
150 | margin-left: auto;
151 | margin-right: auto;
152 | width: 80px;
153 | padding: 10px;
154 | border: solid 1px #c0c0c0;
155 | border-radius: 10px;
156 | }
157 |
158 | .step > h3 {
159 | margin: 0;
160 | margin-bottom: 30px;
161 | font-size: 30px;
162 | }
163 | .step > p {
164 | margin-top: 10px;
165 | margin-bottom: 20px;
166 | width: 70%;
167 | text-align: center;
168 | margin-left: auto;
169 | margin-right: auto;
170 | }
171 | .code-sample {
172 | white-space: pre;
173 | }
174 |
175 |
176 | /* Terminal backgrounds */
177 | .terminal {
178 | display: block;
179 | width: 850px;
180 | margin-left: auto;
181 | margin-right: auto;
182 | }
183 | .terminal-titlebar {
184 | background-color: #c0c0c0;
185 | height: 30px;
186 | border-top-left-radius: 5px;
187 | border-top-right-radius: 5px;
188 | }
189 |
190 | .terminal-body {
191 | background-color: #000;
192 | color: #fff;
193 | font-family: "Consolas", "Monaco", monospace;
194 | font-size: 16px;
195 | font-weight: bold;
196 | padding: 15px;
197 | text-align: left;
198 | height: auto;
199 | overflow: auto;
200 | word-wrap: break-word;
201 | border-bottom-left-radius: 5px;
202 | border-bottom-right-radius: 5px;
203 | }
204 | .prompt {
205 | -webkit-touch-callout: none;
206 | -webkit-user-select: none;
207 | -khtml-user-select: none;
208 | -moz-user-select: none;
209 | -ms-user-select: none;
210 | user-select: none;
211 | color: #c0c0c0;
212 | }
213 | .windows-prompt:after {
214 | content: 'PS > ';
215 | }
216 | .unix-prompt:after {
217 | content: '~$ ';
218 | }
219 |
220 | @media (max-device-width: 480px) and (orientation: portrait), (max-device-width: 700px) and (orientation: landscape){
221 | /* Index page overrides */
222 | .btn-hero-core {
223 | padding: 10px 15px;
224 | margin-left: 0px;
225 | font-size: 16px;
226 | }
227 | .intro-image > img {
228 | display: none;
229 | }
230 |
231 | /* Overview overrides */
232 | img[src*="10kft_view"] {
233 | width: 100%;
234 | height: 100%;
235 | }
236 |
237 | /* Getting started overrides */
238 | .btn-gs {
239 | width: auto;
240 | }
241 |
242 | .btn-gs:hover, .btn-gs:active, .btn-gs:focus, .jquery-active {
243 | width: auto;
244 | }
245 |
246 | .step {
247 | width: 90%;
248 | font-size: 14px;
249 | }
250 | .step > h3 {
251 | font-size: 24px;
252 | }
253 | .step-number {
254 | width: 40px;
255 | font-size: 18px;
256 | padding: 5px;
257 | }
258 | .terminal {
259 | width: 95%;
260 | }
261 | .terminal-titlebar {
262 | height: 20px;
263 | }
264 | .terminal-body {
265 | font-size: 12px;
266 | padding: 5px;
267 | }
268 | }
269 |
270 | body {
271 | font-family: "Open Sans", "Segoe UI", sans-serif;
272 | padding-top: 0px;
273 | }
274 | footer {
275 | z-index: 0;
276 | }
277 |
278 | .navbar-brand {
279 | font-size: 18px;
280 | padding: 15px;
281 | }
282 | .toc .level3 {
283 | font-weight: normal;
284 | margin-top: 5px;
285 | margin-left: 10px;
286 | }
287 | @media only screen and (max-width: 768px) {
288 | .toc .level3 > li {
289 | display: inline-block;
290 | }
291 | .toc .level3 > li:after {
292 | margin-left: -3px;
293 | margin-right: 5px;
294 | content: ", ";
295 | color: #666666;
296 | }
297 | }
298 | @media (max-width: 260px) {
299 | .toc .level3 > li {
300 | display: block;
301 | }
302 |
303 | .toc .level3 > li:after {
304 | display: none;
305 | }
306 | }
307 |
--------------------------------------------------------------------------------
/doc/toc.yml:
--------------------------------------------------------------------------------
1 | - name: Api reference
2 | href: api/RdKafka.html
3 | homepage: api/RdKafka.yml
4 |
--------------------------------------------------------------------------------
/examples/AdvancedConsumer/AdvancedConsumer.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | 0a3dd19c-3a80-47db-83b0-a1ef6078c9ee
10 | AdvancedConsumer
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/AdvancedConsumer/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using RdKafka;
6 |
7 | namespace AdvancedConsumer
8 | {
9 | public class Program
10 | {
11 | public static void Run(string brokerList, List topics)
12 | {
13 | bool enableAutoCommit = false;
14 |
15 | var config = new Config()
16 | {
17 | GroupId = "advanced-csharp-consumer",
18 | EnableAutoCommit = enableAutoCommit,
19 | StatisticsInterval = TimeSpan.FromSeconds(60)
20 | };
21 |
22 | using (var consumer = new EventConsumer(config, brokerList))
23 | {
24 | consumer.OnMessage += (obj, msg) => {
25 | string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length);
26 | Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}");
27 |
28 | if (!enableAutoCommit && msg.Offset % 10 == 0)
29 | {
30 | Console.WriteLine($"Committing offset");
31 | consumer.Commit(msg).Wait();
32 | Console.WriteLine($"Committed offset");
33 | }
34 | };
35 |
36 | consumer.OnConsumerError += (obj, errorCode) =>
37 | {
38 | Console.WriteLine($"Consumer Error: {errorCode}");
39 | };
40 |
41 | consumer.OnEndReached += (obj, end) => {
42 | Console.WriteLine($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}");
43 | };
44 |
45 | consumer.OnError += (obj, error) => {
46 | Console.WriteLine($"Error: {error.ErrorCode} {error.Reason}");
47 | };
48 |
49 | if (enableAutoCommit)
50 | {
51 | consumer.OnOffsetCommit += (obj, commit) => {
52 | if (commit.Error != ErrorCode.NO_ERROR)
53 | {
54 | Console.WriteLine($"Failed to commit offsets: {commit.Error}");
55 | }
56 | Console.WriteLine($"Successfully committed offsets: [{string.Join(", ", commit.Offsets)}]");
57 | };
58 | }
59 |
60 | consumer.OnPartitionsAssigned += (obj, partitions) => {
61 | Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}], member id: {consumer.MemberId}");
62 | consumer.Assign(partitions);
63 | };
64 |
65 | consumer.OnPartitionsRevoked += (obj, partitions) => {
66 | Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]");
67 | consumer.Unassign();
68 | };
69 |
70 | consumer.OnStatistics += (obj, json) => {
71 | Console.WriteLine($"Statistics: {json}");
72 | };
73 |
74 | consumer.Subscribe(topics);
75 | consumer.Start();
76 |
77 | Console.WriteLine($"Assigned to: [{string.Join(", ", consumer.Assignment)}]");
78 | Console.WriteLine($"Subscribed to: [{string.Join(", ", consumer.Subscription)}]");
79 |
80 | Console.WriteLine($"Started consumer, press enter to stop consuming");
81 | Console.ReadLine();
82 | }
83 | }
84 |
85 | public static void Main(string[] args)
86 | {
87 | Run(args[0], args.Skip(1).ToList());
88 | }
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/examples/AdvancedConsumer/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/AdvancedProducer/AdvancedProducer.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | ddce9f6b-6a9d-4b05-bad0-e77d6359b1fb
10 | AdvancedProducer
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/AdvancedProducer/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Text;
3 | using System.Threading.Tasks;
4 | using RdKafka;
5 |
6 | namespace AdvancedProducer
7 | {
8 | public class Program
9 | {
10 | public static void Main(string[] args)
11 | {
12 | string brokerList = args[0];
13 | string topicName = args[1];
14 |
15 | var topicConfig = new TopicConfig
16 | {
17 | CustomPartitioner = (top, key, cnt) =>
18 | {
19 | var kt = (key != null) ? Encoding.UTF8.GetString(key, 0, key.Length) : "(null)";
20 | int partition = (key?.Length ?? 0) % cnt;
21 | bool available = top.PartitionAvailable(partition);
22 | Console.WriteLine($"Partitioner topic: {top.Name} key: {kt} partition count: {cnt} -> {partition} {available}");
23 | return partition;
24 | }
25 | };
26 |
27 | using (Producer producer = new Producer(brokerList))
28 | using (Topic topic = producer.Topic(topicName, topicConfig))
29 | {
30 | Console.WriteLine($"{producer.Name} producing on {topic.Name}. q to exit.");
31 |
32 | string text;
33 | while ((text = Console.ReadLine()) != "q")
34 | {
35 | byte[] data = Encoding.UTF8.GetBytes(text);
36 | byte[] key = null;
37 | // Use the first word as the key
38 | int index = text.IndexOf(" ");
39 | if (index != -1)
40 | {
41 | key = Encoding.UTF8.GetBytes(text.Substring(0, index));
42 | }
43 |
44 | Task deliveryReport = topic.Produce(data, key);
45 | var unused = deliveryReport.ContinueWith(task =>
46 | {
47 | Console.WriteLine($"Partition: {task.Result.Partition}, Offset: {task.Result.Offset}");
48 | });
49 | }
50 | }
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/examples/AdvancedProducer/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/Benchmark/Benchmark.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | 9ea27b93-4714-453c-9319-1920945ceb78
10 | Benchmark
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/Benchmark/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Diagnostics;
4 | using System.Linq;
5 | using System.Text;
6 | using System.Threading;
7 | using System.Threading.Tasks;
8 | using RdKafka;
9 |
10 | namespace Benchmark
11 | {
12 | public class Program
13 | {
14 | public class DeliveryHandler : IDeliveryHandler
15 | {
16 | public void SetException(Exception exception)
17 | {
18 | throw exception;
19 | }
20 |
21 | public void SetResult(DeliveryReport deliveryReport)
22 | {
23 | }
24 | }
25 |
26 | public static void Produce(string broker, string topicName, long numMessages)
27 | {
28 | var deliveryHandler = new DeliveryHandler();
29 |
30 | using (var producer = new Producer(broker))
31 | using (Topic topic = producer.Topic(topicName))
32 | {
33 | Console.WriteLine($"{producer.Name} producing on {topic.Name}");
34 | for (int i = 0; i < numMessages; i++)
35 | {
36 | byte[] data = Encoding.UTF8.GetBytes(i.ToString());
37 | topic.Produce(data, deliveryHandler);
38 | }
39 |
40 | Console.WriteLine("Shutting down");
41 | }
42 | }
43 |
44 | public static async Task Consume(string broker, string topic)
45 | {
46 | long n = 0;
47 |
48 | var topicConfig = new TopicConfig();
49 | topicConfig["auto.offset.reset"] = "smallest";
50 | var config = new Config()
51 | {
52 | GroupId = "benchmark-consumer",
53 | DefaultTopicConfig = topicConfig
54 | };
55 | using (var consumer = new EventConsumer(config, broker))
56 | {
57 | var signal = new SemaphoreSlim(0, 1);
58 |
59 | consumer.OnMessage += (obj, msg) =>
60 | {
61 | n += 1;
62 | };
63 |
64 | consumer.OnEndReached += (obj, end) =>
65 | {
66 | Console.WriteLine($"End reached");
67 | signal.Release();
68 | };
69 |
70 | consumer.Subscribe(new List{topic});
71 | consumer.Start();
72 |
73 | await signal.WaitAsync();
74 | Console.WriteLine($"Shutting down");
75 | }
76 |
77 | return n;
78 | }
79 |
80 | public static void Main(string[] args)
81 | {
82 | string brokerList = args[0];
83 | string topic = args[1];
84 |
85 | long numMessages = 10000000;
86 | var stopwatch = new Stopwatch();
87 |
88 | stopwatch.Start();
89 | Produce(brokerList, topic, numMessages);
90 | stopwatch.Stop();
91 |
92 | Console.WriteLine($"Sent {numMessages} messages in {stopwatch.Elapsed}");
93 | Console.WriteLine($"{numMessages / stopwatch.Elapsed.TotalSeconds:F0} messages/second");
94 |
95 | stopwatch.Restart();
96 | long n = Consume(brokerList, topic).Result;
97 | stopwatch.Stop();
98 |
99 | Console.WriteLine($"Received {n} messages in {stopwatch.Elapsed}");
100 | Console.WriteLine($"{n / stopwatch.Elapsed.TotalSeconds:F0} messages/second");
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/examples/Benchmark/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/Misc/Misc.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | 2e6fc09e-2bed-4765-8298-9584e172b6cf
10 | Misc
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/Misc/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Linq;
3 | using System.Threading.Tasks;
4 | using RdKafka;
5 |
6 | namespace Misc
7 | {
8 | public class Program
9 | {
10 | static string ToString(int[] array) => $"[{string.Join(", ", array)}]";
11 |
12 | static async Task ListGroups(string brokerList)
13 | {
14 | using (var producer = new Producer(brokerList))
15 | {
16 | var groups = await producer.ListGroups(TimeSpan.FromSeconds(10));
17 | Console.WriteLine($"Consumer Groups:");
18 | foreach (var g in groups)
19 | {
20 | Console.WriteLine($" Group: {g.Group} {g.Error} {g.State}");
21 | Console.WriteLine($" Broker: {g.Broker.BrokerId} {g.Broker.Host}:{g.Broker.Port}");
22 | Console.WriteLine($" Protocol: {g.ProtocolType} {g.Protocol}");
23 | Console.WriteLine($" Members:");
24 | foreach (var m in g.Members)
25 | {
26 | Console.WriteLine($" {m.MemberId} {m.ClientId} {m.ClientHost}");
27 | Console.WriteLine($" Metadata: {m.MemberMetadata.Length} bytes");
28 | //Console.WriteLine(System.Text.Encoding.UTF8.GetString(m.MemberMetadata));
29 | Console.WriteLine($" Assignment: {m.MemberAssignment.Length} bytes");
30 | //Console.WriteLine(System.Text.Encoding.UTF8.GetString(m.MemberAssignment));
31 | }
32 | }
33 | }
34 | }
35 |
36 | static async Task PrintMetadata(string brokerList)
37 | {
38 | using (var producer = new Producer(brokerList))
39 | {
40 | var meta = await producer.Metadata();
41 | Console.WriteLine($"{meta.OriginatingBrokerId} {meta.OriginatingBrokerName}");
42 | meta.Brokers.ForEach(broker =>
43 | Console.WriteLine($"Broker: {broker.BrokerId} {broker.Host}:{broker.Port}"));
44 |
45 | meta.Topics.ForEach(topic =>
46 | {
47 | Console.WriteLine($"Topic: {topic.Topic} {topic.Error}");
48 | topic.Partitions.ForEach(partition =>
49 | {
50 | Console.WriteLine($" Partition: {partition.PartitionId}");
51 | Console.WriteLine($" Replicas: {ToString(partition.Replicas)}");
52 | Console.WriteLine($" InSyncReplicas: {ToString(partition.InSyncReplicas)}");
53 | });
54 | });
55 | }
56 | }
57 |
58 | public static void Main(string[] args)
59 | {
60 | Console.WriteLine($"Hello RdKafka!");
61 | Console.WriteLine($"{Library.Version:X}");
62 | Console.WriteLine($"{Library.VersionString}");
63 | Console.WriteLine($"{string.Join(", ", Library.DebugContexts)}");
64 |
65 | if (args.Contains("--list-groups"))
66 | {
67 | ListGroups(args[0]).Wait();
68 | }
69 |
70 | if (args.Contains("--metadata"))
71 | {
72 | PrintMetadata(args[0]).Wait();
73 | }
74 |
75 | if (args.Contains("--dump-config"))
76 | {
77 | foreach (var kv in new Config().Dump())
78 | {
79 | Console.WriteLine($"\"{kv.Key}\": \"{kv.Value}\"");
80 | }
81 | }
82 | }
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/examples/Misc/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/SimpleConsumer/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using RdKafka;
6 |
7 | namespace SimpleProducer
8 | {
9 | public class Program
10 | {
11 | public static void Main(string[] args)
12 | {
13 | string brokerList = args[0];
14 | var topics = args.Skip(1).ToList();
15 |
16 | var config = new Config() { GroupId = "simple-csharp-consumer" };
17 | using (var consumer = new EventConsumer(config, brokerList))
18 | {
19 | consumer.OnMessage += (obj, msg) =>
20 | {
21 | string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length);
22 | Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}");
23 | };
24 |
25 | consumer.Assign(new List {new TopicPartitionOffset(topics.First(), 0, 5)});
26 | consumer.Start();
27 |
28 | Console.WriteLine("Started consumer, press enter to stop consuming");
29 | Console.ReadLine();
30 | }
31 | }
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/examples/SimpleConsumer/SimpleConsumer.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | 85f08ef0-9e30-49d3-b86f-dba36267843c
10 | SimpleConsumer
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/SimpleConsumer/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/examples/SimpleProducer/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Text;
3 | using System.Threading.Tasks;
4 | using RdKafka;
5 |
6 | namespace SimpleProducer
7 | {
8 | public class Program
9 | {
10 | public static void Main(string[] args)
11 | {
12 | string brokerList = args[0];
13 | string topicName = args[1];
14 |
15 | using (Producer producer = new Producer(brokerList))
16 | using (Topic topic = producer.Topic(topicName))
17 | {
18 | Console.WriteLine($"{producer.Name} producing on {topic.Name}. q to exit.");
19 |
20 | string text;
21 | while ((text = Console.ReadLine()) != "q")
22 | {
23 | byte[] data = Encoding.UTF8.GetBytes(text);
24 | Task deliveryReport = topic.Produce(data);
25 | var unused = deliveryReport.ContinueWith(task =>
26 | {
27 | Console.WriteLine($"Partition: {task.Result.Partition}, Offset: {task.Result.Offset}");
28 | });
29 | }
30 | }
31 | }
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/examples/SimpleProducer/SimpleProducer.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | a7bf0a75-d3e7-4024-8597-5fccc567d372
10 | SimpleProducer
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/examples/SimpleProducer/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "1.0.0",
3 | "authors": ["Andreas Heider"],
4 |
5 | "buildOptions": {
6 | "emitEntryPoint": true
7 | },
8 |
9 | "dependencies": {
10 | "RdKafka": {
11 | "target": "project"
12 | }
13 | },
14 |
15 | "frameworks": {
16 | "netcoreapp1.0": {
17 | "dependencies": {
18 | "Microsoft.NETCore.App": {
19 | "type": "platform",
20 | "version": "1.0.0"
21 | }
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/global.json:
--------------------------------------------------------------------------------
1 | {
2 | "projects": [
3 | "src",
4 | "test",
5 | "examples"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/src/RdKafka/Config.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using RdKafka.Internal;
4 |
5 | namespace RdKafka
6 | {
7 | ///
8 | /// Global configuration that is passed to
9 | /// Consumer or Producer constructors.
10 | ///
11 | public class Config
12 | {
13 | internal readonly SafeConfigHandle handle;
14 |
15 | public Config()
16 | {
17 | handle = SafeConfigHandle.Create();
18 | }
19 |
20 | ///
21 | /// Dump all configuration names and values into a dictionary.
22 | ///
23 | public Dictionary Dump() => handle.Dump();
24 |
25 | ///
26 | /// Get or set a configuration value directly.
27 | ///
28 | /// See CONFIGURATION.md for the full list of supported properties.
29 | ///
30 | /// The configuration property name.
31 | /// The configuration property value.
32 | /// is invalid.
33 | /// Configuration property does not exist.
34 | public string this[string name]
35 | {
36 | set
37 | {
38 | handle.Set(name, value);
39 | }
40 | get
41 | {
42 | return handle.Get(name);
43 | }
44 | }
45 |
46 | ///
47 | /// Client group id string.
48 | ///
49 | /// All clients sharing the same group.id belong to the same group.
50 | /// >
51 | public string GroupId
52 | {
53 | set { this["group.id"] = value; }
54 | get { return this["group.id"]; }
55 | }
56 |
57 | ///
58 | /// Automatically and periodically commit offsets in the background.
59 | /// >
60 | public bool EnableAutoCommit
61 | {
62 | set { this["enable.auto.commit"] = value ? "true" : "false"; }
63 | get { return this["enable.auto.commit"] == "true"; }
64 | }
65 |
66 | public delegate void LogCallback(string handle, int level, string fac, string buf);
67 | ///
68 | /// Set custom logger callback.
69 | ///
70 | /// By default RdKafka logs using Console.WriteLine.
71 | ///
72 | public LogCallback Logger { get; set; }
73 |
74 | ///
75 | /// Statistics emit interval for OnStatistics.
76 | ///
77 | public TimeSpan StatisticsInterval
78 | {
79 | set { this["statistics.interval.ms"] = ((int) value.TotalMilliseconds).ToString(); }
80 | get { return TimeSpan.FromMilliseconds(int.Parse(this["statistics.interval.ms"])); }
81 | }
82 |
83 | ///
84 | /// Sets the default topic configuration to use for automatically
85 | /// subscribed topics (e.g., through pattern-matched topics).
86 | ///
87 | public TopicConfig DefaultTopicConfig { get; set; }
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/src/RdKafka/Consumer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Threading.Tasks;
4 | using RdKafka.Internal;
5 |
6 | namespace RdKafka
7 | {
8 | ///
9 | /// High-level Kafka Consumer, receives messages from a Kafka cluster.
10 | ///
11 | /// Requires Kafka >= 0.9.0.0.
12 | ///
13 | public class Consumer : Handle
14 | {
15 | public Consumer(Config config, string brokerList = null)
16 | {
17 | RebalanceDelegate = RebalanceCallback;
18 | CommitDelegate = CommitCallback;
19 |
20 | IntPtr cfgPtr = config.handle.Dup();
21 | LibRdKafka.conf_set_rebalance_cb(cfgPtr, RebalanceDelegate);
22 | LibRdKafka.conf_set_offset_commit_cb(cfgPtr, CommitDelegate);
23 | if (config.DefaultTopicConfig != null)
24 | {
25 | LibRdKafka.conf_set_default_topic_conf(cfgPtr,
26 | config.DefaultTopicConfig.handle.Dup());
27 | }
28 | Init(RdKafkaType.Consumer, cfgPtr, config.Logger);
29 |
30 | if (brokerList != null)
31 | {
32 | handle.AddBrokers(brokerList);
33 | }
34 | }
35 |
36 | ///
37 | /// Returns the current partition assignment as set by Assign.
38 | ///
39 | public List Assignment => handle.GetAssignment();
40 |
41 | ///
42 | /// Returns the current partition subscription as set by Subscribe.
43 | ///
44 | public List Subscription => handle.GetSubscription();
45 |
46 | ///
47 | /// Update the subscription set to topics.
48 | ///
49 | /// Any previous subscription will be unassigned and unsubscribed first.
50 | ///
51 | /// The subscription set denotes the desired topics to consume and this
52 | /// set is provided to the partition assignor (one of the elected group
53 | /// members) for all clients which then uses the configured
54 | /// partition.assignment.strategy to assign the subscription sets's
55 | /// topics's partitions to the consumers, depending on their subscription.
56 | ///
57 | public void Subscribe(ICollection topics)
58 | {
59 | handle.Subscribe(topics);
60 | }
61 |
62 | ///
63 | /// Unsubscribe from the current subscription set.
64 | ///
65 | public void Unsubscribe()
66 | {
67 | handle.Unsubscribe();
68 | }
69 |
70 | ///
71 | /// Update the assignment set to \p partitions.
72 | ///
73 | /// The assignment set is the set of partitions actually being consumed
74 | /// by the KafkaConsumer.
75 | ///
76 | public void Assign(ICollection partitions)
77 | {
78 | handle.Assign(partitions);
79 | }
80 |
81 | ///
82 | /// Stop consumption and remove the current assignment.
83 | ///
84 | public void Unassign()
85 | {
86 | handle.Assign(null);
87 | }
88 |
89 | ///
90 | /// Manually consume message or get error, triggers events.
91 | ///
92 | /// Will invoke events for OnPartitionsAssigned/Revoked,
93 | /// OnOffsetCommit, etc. on the calling thread.
94 | ///
95 | /// Returns one of:
96 | /// - proper message (ErrorCode is NO_ERROR)
97 | /// - error event (ErrorCode is != NO_ERROR)
98 | /// - timeout due to no message or event within timeout (null)
99 | ///
100 | public MessageAndError? Consume(TimeSpan timeout) => handle.ConsumerPoll((IntPtr)timeout.TotalMilliseconds);
101 |
102 | ///
103 | /// Commit offsets for the current assignment.
104 | ///
105 | public Task Commit()
106 | {
107 | handle.Commit();
108 | return Task.FromResult(false);
109 | }
110 |
111 | ///
112 | /// Commit offset for a single topic+partition based on message.
113 | ///
114 | public Task Commit(Message message)
115 | {
116 | var tpo = message.TopicPartitionOffset;
117 | Commit(new List()
118 | {
119 | new TopicPartitionOffset(tpo.Topic, tpo.Partition, tpo.Offset + 1)
120 | });
121 | return Task.FromResult(false);
122 | }
123 |
124 | ///
125 | /// Commit explicit list of offsets.
126 | ///
127 | public Task Commit(ICollection offsets)
128 | {
129 | handle.Commit(offsets);
130 | return Task.FromResult(false);
131 | }
132 |
133 | ///
134 | /// Retrieve committed offsets for topics+partitions.
135 | ///
136 | public Task> Committed(ICollection partitions, TimeSpan timeout)
137 | {
138 | var result = handle.Committed(partitions, (IntPtr) timeout.TotalMilliseconds);
139 | return Task.FromResult(result);
140 | }
141 |
142 | ///
143 | /// Retrieve current positions (offsets) for topics+partitions.
144 | ///
145 | /// The offset field of each requested partition will be set to the offset
146 | /// of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was
147 | /// no previous message.
148 | ///
149 | public List Position(ICollection partitions) => handle.Position(partitions);
150 |
151 | ///
152 | /// Get last known low (oldest/beginning) and high (newest/end) offsets for partition.
153 | ///
154 | /// The low offset is updated periodically (if statistics.interval.ms is set)
155 | /// while the high offset is updated on each fetched message set from the broker.
156 | ///
157 | /// If there is no cached offset (either low or high, or both) then
158 | /// RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.
159 | ///
160 | public Offsets GetWatermarkOffsets(TopicPartition topicPartition)
161 | => handle.GetWatermarkOffsets(topicPartition.Topic, topicPartition.Partition);
162 |
163 | // Rebalance callbacks
164 | public event EventHandler> OnPartitionsAssigned;
165 | public event EventHandler> OnPartitionsRevoked;
166 |
167 | // Explicitly keep reference to delegate so it stays alive
168 | LibRdKafka.RebalanceCallback RebalanceDelegate;
169 | void RebalanceCallback(IntPtr rk, ErrorCode err,
170 | /* rd_kafka_topic_partition_list_t * */ IntPtr partitions,
171 | IntPtr opaque)
172 | {
173 | var partitionList = SafeKafkaHandle.GetTopicPartitionOffsetList(partitions);
174 | if (err == ErrorCode._ASSIGN_PARTITIONS)
175 | {
176 | var handler = OnPartitionsAssigned;
177 | if (handler != null && handler.GetInvocationList().Length > 0)
178 | {
179 | handler(this, partitionList);
180 | }
181 | else
182 | {
183 | Assign(partitionList);
184 | }
185 | }
186 | if (err == ErrorCode._REVOKE_PARTITIONS)
187 | {
188 | var handler = OnPartitionsRevoked;
189 | if (handler != null && handler.GetInvocationList().Length > 0)
190 | {
191 | handler(this, partitionList);
192 | }
193 | else
194 | {
195 | Unassign();
196 | }
197 | }
198 | }
199 |
200 | public struct OffsetCommitArgs
201 | {
202 | public ErrorCode Error { get; set; }
203 | public IList Offsets { get; set; }
204 | }
205 | public event EventHandler OnOffsetCommit;
206 |
207 | // Explicitly keep reference to delegate so it stays alive
208 | LibRdKafka.CommitCallback CommitDelegate;
209 | internal void CommitCallback(IntPtr rk,
210 | ErrorCode err,
211 | /* rd_kafka_topic_partition_list_t * */ IntPtr offsets,
212 | IntPtr opaque)
213 | {
214 | OnOffsetCommit?.Invoke(this, new OffsetCommitArgs()
215 | {
216 | Error = err,
217 | Offsets = SafeKafkaHandle.GetTopicPartitionOffsetList(offsets)
218 | });
219 | }
220 |
221 | protected override void Dispose(bool disposing)
222 | {
223 | if (disposing)
224 | {
225 | handle.ConsumerClose();
226 | }
227 |
228 | base.Dispose(disposing);
229 | }
230 | }
231 | }
232 |
--------------------------------------------------------------------------------
/src/RdKafka/ErrorCode.cs:
--------------------------------------------------------------------------------
1 | namespace RdKafka
2 | {
3 | /// Internal errors to rdkafka are prefixed with _
4 | public enum ErrorCode
5 | {
6 | /// Begin internal error codes
7 | _BEGIN = -200,
8 | /// Received message is incorrect
9 | _BAD_MSG = -199,
10 | /// Bad/unknown compression
11 | _BAD_COMPRESSION = -198,
12 | /// Broker is going away
13 | _DESTROY = -197,
14 | /// Generic failure
15 | _FAIL = -196,
16 | /// Broker transport failure
17 | _TRANSPORT = -195,
18 | /// Critical system resource
19 | _CRIT_SYS_RESOURCE = -194,
20 | /// Failed to resolve broker
21 | _RESOLVE = -193,
22 | /// Produced message timed out
23 | _MSG_TIMED_OUT = -192,
24 | /// Reached the end of the topic+partition queue on the broker. Not really an error.
25 | _PARTITION_EOF = -191,
26 | /// Permanent: Partition does not exist in cluster.
27 | _UNKNOWN_PARTITION = -190,
28 | /// File or filesystem error
29 | _FS = -189,
30 | /// Permanent: Topic does not exist in cluster.
31 | _UNKNOWN_TOPIC = -188,
32 | /// All broker connections are down.
33 | _ALL_BROKERS_DOWN = -187,
34 | /// Invalid argument, or invalid configuration
35 | _INVALID_ARG = -186,
36 | /// Operation timed out
37 | _TIMED_OUT = -185,
38 | /// Queue is full
39 | _QUEUE_FULL = -184,
40 | /// ISR count < required.acks
41 | _ISR_INSUFF = -183,
42 | /// Broker node update
43 | _NODE_UPDATE = -182,
44 | /// SSL error
45 | _SSL = -181,
46 | /// Waiting for coordinator to become available.
47 | _WAIT_COORD = -180,
48 | /// Unknown client group
49 | _UNKNOWN_GROUP = -179,
50 | /// Operation in progress
51 | _IN_PROGRESS = -178,
52 | /// Previous operation in progress, wait for it to finish.
53 | _PREV_IN_PROGRESS = -177,
54 | /// This operation would interfere with an existing subscription
55 | _EXISTING_SUBSCRIPTION = -176,
56 | /// Assigned partitions (rebalance_cb)
57 | _ASSIGN_PARTITIONS = -175,
58 | /// Revoked partitions (rebalance_cb)
59 | _REVOKE_PARTITIONS = -174,
60 | /// Conflicting use
61 | _CONFLICT = -173,
62 | /// Wrong state
63 | _STATE = -172,
64 | /// Unknown protocol
65 | _UNKNOWN_PROTOCOL = -171,
66 | /// Not implemented
67 | _NOT_IMPLEMENTED = -170,
68 | /// Authentication failure
69 | _AUTHENTICATION = -169,
70 | /// No stored offset
71 | _NO_OFFSET = -168,
72 | ///Outdated
73 | _OUTDATED = -167,
74 | /// Timed out in queue
75 | _TIMED_OUT_QUEUE = -166,
76 |
77 | /// End internal error codes
78 | _END = -100,
79 |
80 | // Kafka broker errors:
81 | /// Unknown broker error
82 | UNKNOWN = -1,
83 | /// Success
84 | NO_ERROR = 0,
85 | /// Offset out of range
86 | OFFSET_OUT_OF_RANGE = 1,
87 | /// Invalid message
88 | INVALID_MSG = 2,
89 | /// Unknown topic or partition
90 | UNKNOWN_TOPIC_OR_PART = 3,
91 | /// Invalid message size
92 | INVALID_MSG_SIZE = 4,
93 | /// Leader not available
94 | LEADER_NOT_AVAILABLE = 5,
95 | /// Not leader for partition
96 | NOT_LEADER_FOR_PARTITION = 6,
97 | /// Request timed out
98 | REQUEST_TIMED_OUT = 7,
99 | /// Broker not available
100 | BROKER_NOT_AVAILABLE = 8,
101 | /// Replica not available
102 | REPLICA_NOT_AVAILABLE = 9,
103 | /// Message size too large
104 | MSG_SIZE_TOO_LARGE = 10,
105 | /// StaleControllerEpochCode
106 | STALE_CTRL_EPOCH = 11,
107 | /// Offset metadata string too large
108 | OFFSET_METADATA_TOO_LARGE = 12,
109 | /// Broker disconnected before response received
110 | NETWORK_EXCEPTION = 13,
111 | /// Group coordinator load in progress
112 | GROUP_LOAD_IN_PROGRESS = 14,
113 | /// Group coordinator not available
114 | GROUP_COORDINATOR_NOT_AVAILABLE = 15,
115 | /// Not coordinator for group
116 | NOT_COORDINATOR_FOR_GROUP = 16,
117 | /// Invalid topic
118 | TOPIC_EXCEPTION = 17,
119 | /// Message batch larger than configured server segment size
120 | RECORD_LIST_TOO_LARGE = 18,
121 | /// Not enough in-sync replicas
122 | NOT_ENOUGH_REPLICAS = 19,
123 | /// Message(s) written to insufficient number of in-sync replicas
124 | NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
125 | /// Invalid required acks value
126 | INVALID_REQUIRED_ACKS = 21,
127 | /// Specified group generation id is not valid
128 | ILLEGAL_GENERATION = 22,
129 | /// Inconsistent group protocol
130 | INCONSISTENT_GROUP_PROTOCOL = 23,
131 | /// Invalid group.id
132 | INVALID_GROUP_ID = 24,
133 | /// Unknown member
134 | UNKNOWN_MEMBER_ID = 25,
135 | /// Invalid session timeout
136 | INVALID_SESSION_TIMEOUT = 26,
137 | /// Group rebalance in progress
138 | REBALANCE_IN_PROGRESS = 27,
139 | /// Commit offset data size is not valid
140 | INVALID_COMMIT_OFFSET_SIZE = 28,
141 | /// Topic authorization failed
142 | TOPIC_AUTHORIZATION_FAILED = 29,
143 | /// Group authorization failed
144 | GROUP_AUTHORIZATION_FAILED = 30,
145 | /// Cluster authorization failed
146 | CLUSTER_AUTHORIZATION_FAILED = 31,
147 | /// Invalid timestamp
148 | INVALID_TIMESTAMP = 32,
149 | /// Unsupported SASL mechanism
150 | UNSUPPORTED_SASL_MECHANISM = 33,
151 | /// Illegal SASL state
152 | ILLEGAL_SASL_STATE = 34,
153 | /// Unuspported version
154 | UNSUPPORTED_VERSION = 35,
155 | };
156 | }
157 |
--------------------------------------------------------------------------------
/src/RdKafka/EventConsumer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Threading;
3 | using System.Threading.Tasks;
4 |
5 | namespace RdKafka
6 | {
7 | ///
8 | /// Kafka Consumer that forwards received messages as events to the application.
9 | ///
10 | /// Thin abstraction on top of .
11 | ///
12 | public class EventConsumer : Consumer
13 | {
14 | Task consumerTask;
15 | CancellationTokenSource consumerCts;
16 |
17 | public event EventHandler OnMessage;
18 | public event EventHandler OnConsumerError;
19 | public event EventHandler OnEndReached;
20 |
21 | public EventConsumer(Config config, string brokerList = null)
22 | : base(config, brokerList)
23 | {}
24 |
25 | ///
26 | /// Start automatically consuming message and trigger events.
27 | ///
28 | /// Will invoke OnMessage, OnEndReached and OnConsumerError events.
29 | ///
30 | public void Start()
31 | {
32 | if (consumerTask != null)
33 | {
34 | throw new InvalidOperationException("Consumer task already running");
35 | }
36 |
37 | consumerCts = new CancellationTokenSource();
38 | var ct = consumerCts.Token;
39 | consumerTask = Task.Factory.StartNew(() =>
40 | {
41 | while (!ct.IsCancellationRequested)
42 | {
43 | var messageAndError = Consume(TimeSpan.FromSeconds(1));
44 | if (messageAndError.HasValue)
45 | {
46 | var mae = messageAndError.Value;
47 | if (mae.Error == ErrorCode.NO_ERROR)
48 | {
49 | OnMessage?.Invoke(this, mae.Message);
50 | }
51 | else if (mae.Error == ErrorCode._PARTITION_EOF)
52 | {
53 | OnEndReached?.Invoke(this,
54 | new TopicPartitionOffset()
55 | {
56 | Topic = mae.Message.Topic,
57 | Partition = mae.Message.Partition,
58 | Offset = mae.Message.Offset,
59 | });
60 | }
61 | else
62 | {
63 | OnConsumerError?.Invoke(this, mae.Error);
64 | }
65 | }
66 | }
67 | }, ct, TaskCreationOptions.LongRunning, TaskScheduler.Default);
68 | }
69 |
70 | public async Task Stop()
71 | {
72 | consumerCts.Cancel();
73 | try
74 | {
75 | await consumerTask;
76 | }
77 | finally
78 | {
79 | consumerTask = null;
80 | consumerCts = null;
81 | }
82 | }
83 |
84 |
85 | protected override void Dispose(bool disposing)
86 | {
87 | if (disposing)
88 | {
89 | if (consumerTask != null)
90 | {
91 | Stop().Wait();
92 | }
93 | }
94 |
95 | base.Dispose(disposing);
96 | }
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/src/RdKafka/Handle.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Runtime.InteropServices;
5 | using System.Threading.Tasks;
6 | using System.Threading;
7 | using RdKafka.Internal;
8 |
9 | namespace RdKafka
10 | {
11 | ///
12 | /// Shared base of and .
13 | ///
14 | public class Handle : IDisposable
15 | {
16 | internal SafeKafkaHandle handle;
17 | LibRdKafka.ErrorCallback ErrorDelegate;
18 | LibRdKafka.LogCallback LogDelegate;
19 | LibRdKafka.StatsCallback StatsDelegate;
20 | Task callbackTask;
21 | CancellationTokenSource callbackCts;
22 |
23 | ~Handle()
24 | {
25 | Dispose(false);
26 | }
27 |
28 | internal void Init(RdKafkaType type, IntPtr config, Config.LogCallback logger)
29 | {
30 | ErrorDelegate = (IntPtr rk, ErrorCode err, string reason, IntPtr opaque) =>
31 | {
32 | OnError?.Invoke(this, new ErrorArgs()
33 | {
34 | ErrorCode = err,
35 | Reason = reason
36 | });
37 | };
38 | LibRdKafka.conf_set_error_cb(config, ErrorDelegate);
39 |
40 | if (logger == null)
41 | {
42 | logger = ((string handle, int level, string fac, string buf) =>
43 | {
44 | var now = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff");
45 | Console.WriteLine($"{level}|{now}|{handle}|{fac}| {buf}");
46 | });
47 | }
48 |
49 | LogDelegate = (IntPtr rk, int level, string fac, string buf) =>
50 | {
51 | // The log_cb is called very early during construction, before
52 | // SafeKafkaHandle or any of the C# wrappers are ready.
53 | // So we can't really pass rk on, just pass the rk name instead.
54 | var name = Marshal.PtrToStringAnsi(LibRdKafka.name(rk));
55 | logger(name, level, fac, buf);
56 | };
57 | LibRdKafka.conf_set_log_cb(config, LogDelegate);
58 |
59 | StatsDelegate = (IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque) =>
60 | {
61 | OnStatistics?.Invoke(this, Marshal.PtrToStringAnsi(json));
62 | return 0;
63 | };
64 | LibRdKafka.conf_set_stats_cb(config, StatsDelegate);
65 |
66 | handle = SafeKafkaHandle.Create(type, config);
67 |
68 | callbackCts = new CancellationTokenSource();
69 | callbackTask = StartCallbackTask(callbackCts.Token);
70 | }
71 |
72 | public void Dispose()
73 | {
74 | Dispose(true);
75 | GC.SuppressFinalize(this);
76 | }
77 |
78 | protected virtual void Dispose(bool disposing)
79 | {
80 | callbackCts.Cancel();
81 | callbackTask.Wait();
82 |
83 | if (disposing)
84 | {
85 | // Wait until all outstanding sends have completed
86 | while (OutQueueLength > 0)
87 | {
88 | handle.Poll((IntPtr) 100);
89 | }
90 |
91 | handle.Dispose();
92 | }
93 | }
94 |
95 | ///
96 | /// The name of the handle
97 | ///
98 | public string Name => handle.GetName();
99 |
100 | ///
101 | /// The client's broker-assigned group member id
102 | ///
103 | /// Last assigned member id, or empty string if not currently
104 | /// a group member.
105 | ///
106 | public string MemberId => handle.MemberId();
107 |
108 | ///
109 | /// The current out queue length
110 | ///
111 | /// The out queue contains messages and requests waiting to be sent to,
112 | /// or acknowledged by, the broker.
113 | ///
114 | public long OutQueueLength => handle.GetOutQueueLength();
115 |
116 | public int LogLevel
117 | {
118 | set {
119 | handle.SetLogLevel(value);
120 | }
121 | }
122 |
123 | ///
124 | /// Request Metadata from broker.
125 | ///
126 | /// Parameters:
127 | /// allTopics - if true: request info about all topics in cluster,
128 | /// if false: only request info about locally known topics.
129 | /// onlyForTopic - only request info about this topic
130 | /// includeInternal - include internal topics prefixed with __
131 | /// timeout - maximum response time before failing.
132 | ///
133 | public Task Metadata (bool allTopics=true, Topic onlyForTopic=null,
134 | bool includeInternal=false, TimeSpan timeout=default(TimeSpan))
135 | => Task.FromResult(handle.Metadata(allTopics, onlyForTopic?.handle, includeInternal, timeout));
136 |
137 | ///
138 | /// Request lowest and highest offsets for a topic partition from broker.
139 | ///
140 | public Task QueryWatermarkOffsets(TopicPartition topicPartition, TimeSpan timeout=default(TimeSpan))
141 | => Task.FromResult(handle.QueryWatermarkOffsets(topicPartition.Topic, topicPartition.Partition, timeout));
142 |
143 | public struct ErrorArgs
144 | {
145 | public ErrorCode ErrorCode { get; set; }
146 | public string Reason { get; set; }
147 | }
148 |
149 | ///
150 | /// Fires on critical errors, e.g. connection failures or all brokers being down.
151 | ///
152 | public event EventHandler OnError;
153 |
154 | public event EventHandler OnStatistics;
155 |
156 | Task StartCallbackTask(CancellationToken ct)
157 | => Task.Factory.StartNew(() =>
158 | {
159 | while (!ct.IsCancellationRequested)
160 | {
161 | handle.Poll((IntPtr) 1000);
162 | }
163 | }, ct, TaskCreationOptions.LongRunning, TaskScheduler.Default);
164 |
165 | public Task> ListGroups(TimeSpan timeout)
166 | => Task.FromResult(handle.ListGroups(null, (IntPtr) timeout.TotalMilliseconds));
167 |
168 | public Task ListGroup(string group, TimeSpan timeout)
169 | => Task.FromResult(handle.ListGroups(group, (IntPtr) timeout.TotalMilliseconds).Single());
170 | }
171 | }
172 |
--------------------------------------------------------------------------------
/src/RdKafka/IDeliveryHandler.cs:
--------------------------------------------------------------------------------
1 | using System;
2 |
3 | namespace RdKafka
4 | {
5 | ///
6 | /// Used by the topics of the producer client to notify on produce request progress.
7 | ///
8 | /// Methods of this interface will be executed in an RdKafka-internal thread and will block other operations - consider this when implementing.
9 | public interface IDeliveryHandler
10 | {
11 | ///
12 | /// Invoked if an exception happens for the given produce request.
13 | ///
14 | ///
15 | void SetException(Exception exception);
16 |
17 | ///
18 | /// Invoked when the produce request successfully completes.
19 | ///
20 | ///
21 | void SetResult(DeliveryReport deliveryReport);
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/LibRdKafka.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.IO;
3 | using System.Text;
4 | using System.Runtime.InteropServices;
5 | #if NET451
6 | using System.Reflection;
7 | #endif
8 |
9 | namespace RdKafka.Internal
10 | {
11 | internal static class LibRdKafka
12 | {
13 | const long minVersion = 0x000901ff;
14 |
15 | #if NET451
16 | [DllImport("kernel32", SetLastError = true)]
17 | private static extern IntPtr LoadLibrary(string lpFileName);
18 | #endif
19 |
20 | static LibRdKafka()
21 | {
22 | #if NET451
23 | var is64 = IntPtr.Size == 8;
24 | try {
25 | var baseUri = new Uri(Assembly.GetExecutingAssembly().GetName().CodeBase);
26 | var baseDirectory = Path.GetDirectoryName(baseUri.LocalPath);
27 |
28 | LoadLibrary(Path.Combine(baseDirectory, is64 ? "x64/zlib.dll" : "x86/zlib.dll"));
29 | LoadLibrary(Path.Combine(baseDirectory, is64 ? "x64/librdkafka.dll" : "x86/librdkafka.dll"));
30 | }
31 | catch (Exception) { }
32 | #endif
33 |
34 | _version = NativeMethods.rd_kafka_version;
35 | _version_str = NativeMethods.rd_kafka_version_str;
36 | _get_debug_contexts = NativeMethods.rd_kafka_get_debug_contexts;
37 | _err2str = NativeMethods.rd_kafka_err2str;
38 | _last_error = NativeMethods.rd_kafka_last_error;
39 | _topic_partition_list_new = NativeMethods.rd_kafka_topic_partition_list_new;
40 | _topic_partition_list_destroy = NativeMethods.rd_kafka_topic_partition_list_destroy;
41 | _topic_partition_list_add = NativeMethods.rd_kafka_topic_partition_list_add;
42 | _message_destroy = NativeMethods.rd_kafka_message_destroy;
43 | _conf_new = NativeMethods.rd_kafka_conf_new;
44 | _conf_destroy = NativeMethods.rd_kafka_conf_destroy;
45 | _conf_dup = NativeMethods.rd_kafka_conf_dup;
46 | _conf_set = NativeMethods.rd_kafka_conf_set;
47 | _conf_set_dr_msg_cb = NativeMethods.rd_kafka_conf_set_dr_msg_cb;
48 | _conf_set_rebalance_cb = NativeMethods.rd_kafka_conf_set_rebalance_cb;
49 | _conf_set_error_cb = NativeMethods.rd_kafka_conf_set_error_cb;
50 | _conf_set_offset_commit_cb = NativeMethods.rd_kafka_conf_set_offset_commit_cb;
51 | _conf_set_log_cb = NativeMethods.rd_kafka_conf_set_log_cb;
52 | _conf_set_stats_cb = NativeMethods.rd_kafka_conf_set_stats_cb;
53 | _conf_set_default_topic_conf = NativeMethods.rd_kafka_conf_set_default_topic_conf;
54 | _conf_get = NativeMethods.rd_kafka_conf_get;
55 | _topic_conf_get = NativeMethods.rd_kafka_topic_conf_get;
56 | _conf_dump = NativeMethods.rd_kafka_conf_dump;
57 | _topic_conf_dump = NativeMethods.rd_kafka_topic_conf_dump;
58 | _conf_dump_free = NativeMethods.rd_kafka_conf_dump_free;
59 | _topic_conf_new = NativeMethods.rd_kafka_topic_conf_new;
60 | _topic_conf_dup = NativeMethods.rd_kafka_topic_conf_dup;
61 | _topic_conf_destroy = NativeMethods.rd_kafka_topic_conf_destroy;
62 | _topic_conf_set = NativeMethods.rd_kafka_topic_conf_set;
63 | _topic_conf_set_partitioner_cb = NativeMethods.rd_kafka_topic_conf_set_partitioner_cb;
64 | _topic_partition_available = NativeMethods.rd_kafka_topic_partition_available;
65 | _new = NativeMethods.rd_kafka_new;
66 | _destroy = NativeMethods.rd_kafka_destroy;
67 | _name = NativeMethods.rd_kafka_name;
68 | _memberid = NativeMethods.rd_kafka_memberid;
69 | _topic_new = NativeMethods.rd_kafka_topic_new;
70 | _topic_destroy = NativeMethods.rd_kafka_topic_destroy;
71 | _topic_name = NativeMethods.rd_kafka_topic_name;
72 | _poll = NativeMethods.rd_kafka_poll;
73 | _query_watermark_offsets = NativeMethods.rd_kafka_query_watermark_offsets;
74 | _get_watermark_offsets = NativeMethods.rd_kafka_get_watermark_offsets;
75 | _mem_free = NativeMethods.rd_kafka_mem_free;
76 | _subscribe = NativeMethods.rd_kafka_subscribe;
77 | _unsubscribe = NativeMethods.rd_kafka_unsubscribe;
78 | _subscription = NativeMethods.rd_kafka_subscription;
79 | _consumer_poll = NativeMethods.rd_kafka_consumer_poll;
80 | _consumer_close = NativeMethods.rd_kafka_consumer_close;
81 | _assign = NativeMethods.rd_kafka_assign;
82 | _assignment = NativeMethods.rd_kafka_assignment;
83 | _commit = NativeMethods.rd_kafka_commit;
84 | _committed = NativeMethods.rd_kafka_committed;
85 | _position = NativeMethods.rd_kafka_position;
86 | _produce = NativeMethods.rd_kafka_produce;
87 | _metadata = NativeMethods.rd_kafka_metadata;
88 | _metadata_destroy = NativeMethods.rd_kafka_metadata_destroy;
89 | _list_groups = NativeMethods.rd_kafka_list_groups;
90 | _group_list_destroy = NativeMethods.rd_kafka_group_list_destroy;
91 | _brokers_add = NativeMethods.rd_kafka_brokers_add;
92 | _set_log_level = NativeMethods.rd_kafka_set_log_level;
93 | _outq_len = NativeMethods.rd_kafka_outq_len;
94 | _wait_destroyed = NativeMethods.rd_kafka_wait_destroyed;
95 |
96 | if ((long) version() < minVersion) {
97 | throw new FileLoadException($"Invalid librdkafka version {(long)version():x}, expected at least {minVersion:x}");
98 | }
99 | }
100 |
101 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
102 | internal delegate void DeliveryReportCallback(
103 | IntPtr rk,
104 | /* const rd_kafka_message_t * */ ref rd_kafka_message rkmessage,
105 | IntPtr opaque);
106 |
107 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
108 | internal delegate void CommitCallback(IntPtr rk,
109 | ErrorCode err,
110 | /* rd_kafka_topic_partition_list_t * */ IntPtr offsets,
111 | IntPtr opaque);
112 |
113 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
114 | internal delegate void ErrorCallback(IntPtr rk,
115 | ErrorCode err, string reason, IntPtr opaque);
116 |
117 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
118 | internal delegate void RebalanceCallback(IntPtr rk,
119 | ErrorCode err,
120 | /* rd_kafka_topic_partition_list_t * */ IntPtr partitions,
121 | IntPtr opaque);
122 |
123 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
124 | internal delegate void LogCallback(IntPtr rk, int level, string fac, string buf);
125 |
126 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
127 | internal delegate int StatsCallback(IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque);
128 |
129 | [UnmanagedFunctionPointer(callingConvention: CallingConvention.Cdecl)]
130 | internal delegate int PartitionerCallback(
131 | /* const rd_kafka_topic_t * */ IntPtr rkt,
132 | IntPtr keydata,
133 | UIntPtr keylen,
134 | int partition_cnt,
135 | IntPtr rkt_opaque,
136 | IntPtr msg_opaque);
137 |
138 |
139 | private static Func _version;
140 | internal static IntPtr version() => _version();
141 |
142 | private static Func _version_str;
143 | internal static IntPtr version_str() => _version_str();
144 |
145 | private static Func _get_debug_contexts;
146 | internal static IntPtr get_debug_contexts() => _get_debug_contexts();
147 |
148 | private static Func _err2str;
149 | internal static IntPtr err2str(ErrorCode err) => _err2str(err);
150 |
151 | private static Func _topic_partition_list_new;
152 | internal static IntPtr topic_partition_list_new(IntPtr size)
153 | => _topic_partition_list_new(size);
154 |
155 | private static Action _topic_partition_list_destroy;
156 | internal static void topic_partition_list_destroy(IntPtr rkparlist)
157 | => _topic_partition_list_destroy(rkparlist);
158 |
159 | private static Func _topic_partition_list_add;
160 | internal static IntPtr topic_partition_list_add(IntPtr rktparlist,
161 | string topic, int partition)
162 | => _topic_partition_list_add(rktparlist, topic, partition);
163 |
164 | private static Func _last_error;
165 | internal static ErrorCode last_error() => _last_error();
166 |
167 | private static Action _message_destroy;
168 | internal static void message_destroy(IntPtr rkmessage) => _message_destroy(rkmessage);
169 |
170 | private static Func _conf_new;
171 | internal static SafeConfigHandle conf_new() => _conf_new();
172 |
173 | private static Action _conf_destroy;
174 | internal static void conf_destroy(IntPtr conf) => _conf_destroy(conf);
175 |
176 | private static Func _conf_dup;
177 | internal static IntPtr conf_dup(IntPtr conf) => _conf_dup(conf);
178 |
179 | private static Func _conf_set;
180 | internal static ConfRes conf_set(IntPtr conf, string name,
181 | string value, StringBuilder errstr, UIntPtr errstr_size)
182 | => _conf_set(conf, name, value, errstr, errstr_size);
183 |
184 | private static Action _conf_set_dr_msg_cb;
185 | internal static void conf_set_dr_msg_cb(IntPtr conf, DeliveryReportCallback dr_msg_cb)
186 | => _conf_set_dr_msg_cb(conf, dr_msg_cb);
187 |
188 | private static Action _conf_set_rebalance_cb;
189 | internal static void conf_set_rebalance_cb(IntPtr conf, RebalanceCallback rebalance_cb)
190 | => _conf_set_rebalance_cb(conf, rebalance_cb);
191 |
192 | private static Action _conf_set_offset_commit_cb;
193 | internal static void conf_set_offset_commit_cb(IntPtr conf, CommitCallback commit_cb)
194 | => _conf_set_offset_commit_cb(conf, commit_cb);
195 |
196 | private static Action _conf_set_error_cb;
197 | internal static void conf_set_error_cb(IntPtr conf, ErrorCallback error_cb)
198 | => _conf_set_error_cb(conf, error_cb);
199 |
200 | private static Action _conf_set_log_cb;
201 | internal static void conf_set_log_cb(IntPtr conf, LogCallback log_cb)
202 | => _conf_set_log_cb(conf, log_cb);
203 |
204 | private static Action _conf_set_stats_cb;
205 | internal static void conf_set_stats_cb(IntPtr conf, StatsCallback stats_cb)
206 | => _conf_set_stats_cb(conf, stats_cb);
207 |
208 | private static Action _conf_set_default_topic_conf;
209 | internal static void conf_set_default_topic_conf(IntPtr conf, IntPtr tconf)
210 | => _conf_set_default_topic_conf(conf, tconf);
211 |
212 | private delegate ConfRes ConfGet(IntPtr conf, string name, StringBuilder dest,
213 | ref UIntPtr dest_size);
214 | private static ConfGet _conf_get;
215 | internal static ConfRes conf_get(IntPtr conf, string name,
216 | StringBuilder dest, ref UIntPtr dest_size)
217 | => _conf_get(conf, name, dest, ref dest_size);
218 |
219 | private static ConfGet _topic_conf_get;
220 | internal static ConfRes topic_conf_get(IntPtr conf, string name,
221 | StringBuilder dest, ref UIntPtr dest_size)
222 | => _topic_conf_get(conf, name, dest, ref dest_size);
223 |
224 | private delegate IntPtr ConfDump(IntPtr conf, out UIntPtr cntp);
225 | private static ConfDump _conf_dump;
226 | internal static IntPtr conf_dump(IntPtr conf, out UIntPtr cntp)
227 | => _conf_dump(conf, out cntp);
228 |
229 | private static ConfDump _topic_conf_dump;
230 | internal static IntPtr topic_conf_dump(IntPtr conf, out UIntPtr cntp)
231 | => _topic_conf_dump(conf, out cntp);
232 |
233 | private static Action _conf_dump_free;
234 | internal static void conf_dump_free(IntPtr arr, UIntPtr cnt)
235 | => _conf_dump_free(arr, cnt);
236 |
237 | private static Func _topic_conf_new;
238 | internal static SafeTopicConfigHandle topic_conf_new() => _topic_conf_new();
239 |
240 | private static Func _topic_conf_dup;
241 | internal static IntPtr topic_conf_dup(IntPtr conf) => _topic_conf_dup(conf);
242 |
243 | private static Action _topic_conf_destroy;
244 | internal static void topic_conf_destroy(IntPtr conf) => _topic_conf_destroy(conf);
245 |
246 | private static Func _topic_conf_set;
247 | internal static ConfRes topic_conf_set(IntPtr conf, string name,
248 | string value, StringBuilder errstr, UIntPtr errstr_size)
249 | => _topic_conf_set(conf, name, value, errstr, errstr_size);
250 |
251 | private static Action _topic_conf_set_partitioner_cb;
252 | internal static void topic_conf_set_partitioner_cb(
253 | IntPtr topic_conf, PartitionerCallback partitioner_cb)
254 | => _topic_conf_set_partitioner_cb(topic_conf, partitioner_cb);
255 |
256 | private static Func _topic_partition_available;
257 | internal static bool topic_partition_available(IntPtr rkt, int partition)
258 | => _topic_partition_available(rkt, partition);
259 |
260 | private static Func _new;
261 | internal static SafeKafkaHandle kafka_new(RdKafkaType type, IntPtr conf,
262 | StringBuilder errstr, UIntPtr errstr_size)
263 | => _new(type, conf, errstr, errstr_size);
264 |
265 | private static Action _destroy;
266 | internal static void destroy(IntPtr rk) => _destroy(rk);
267 |
268 | private static Func _name;
269 | internal static IntPtr name(IntPtr rk) => _name(rk);
270 |
271 | private static Func _memberid;
272 | internal static IntPtr memberid(IntPtr rk) => _memberid(rk);
273 |
274 | private static Func _topic_new;
275 | internal static SafeTopicHandle topic_new(IntPtr rk, string topic, IntPtr conf)
276 | => _topic_new(rk, topic, conf);
277 |
278 | private static Action _topic_destroy;
279 | internal static void topic_destroy(IntPtr rk) => _topic_destroy(rk);
280 |
281 | private static Func _topic_name;
282 | internal static IntPtr topic_name(IntPtr rkt) => _topic_name(rkt);
283 |
284 | private static Func _poll;
285 | internal static IntPtr poll(IntPtr rk, IntPtr timeout_ms) => _poll(rk, timeout_ms);
286 |
287 | private delegate ErrorCode QueryOffsets(IntPtr rk, string topic, int partition,
288 | out long low, out long high, IntPtr timeout_ms);
289 | private static QueryOffsets _query_watermark_offsets;
290 | internal static ErrorCode query_watermark_offsets(IntPtr rk, string topic, int partition,
291 | out long low, out long high, IntPtr timeout_ms)
292 | => _query_watermark_offsets(rk, topic, partition, out low, out high, timeout_ms);
293 |
294 | private delegate ErrorCode GetOffsets(IntPtr rk, string topic, int partition,
295 | out long low, out long high);
296 | private static GetOffsets _get_watermark_offsets;
297 | internal static ErrorCode get_watermark_offsets(IntPtr rk, string topic, int partition,
298 | out long low, out long high)
299 | => _get_watermark_offsets(rk, topic, partition, out low, out high);
300 |
301 | private static Action _mem_free;
302 | internal static void mem_free(IntPtr rk, IntPtr ptr)
303 | => _mem_free(rk, ptr);
304 |
305 | private static Func _subscribe;
306 | internal static ErrorCode subscribe(IntPtr rk, IntPtr topics) => _subscribe(rk, topics);
307 |
308 | private static Func _unsubscribe;
309 | internal static ErrorCode unsubscribe(IntPtr rk) => _unsubscribe(rk);
310 |
311 | private delegate ErrorCode Subscription(IntPtr rk, out IntPtr topics);
312 | private static Subscription _subscription;
313 | internal static ErrorCode subscription(IntPtr rk, out IntPtr topics)
314 | => _subscription(rk, out topics);
315 |
316 | private static Func _consumer_poll;
317 | internal static IntPtr consumer_poll(IntPtr rk, IntPtr timeout_ms)
318 | => _consumer_poll(rk, timeout_ms);
319 |
320 | private static Func _consumer_close;
321 | internal static ErrorCode consumer_close(IntPtr rk) => _consumer_close(rk);
322 |
323 | private static Func _assign;
324 | internal static ErrorCode assign(IntPtr rk, IntPtr partitions)
325 | => _assign(rk, partitions);
326 |
327 | private delegate ErrorCode Assignment(IntPtr rk, out IntPtr topics);
328 | private static Assignment _assignment;
329 | internal static ErrorCode assignment(IntPtr rk, out IntPtr topics)
330 | => _assignment(rk, out topics);
331 |
332 | private static Func _commit;
333 | internal static ErrorCode commit(IntPtr rk, IntPtr offsets, bool async)
334 | => _commit(rk, offsets, async);
335 |
336 | private static Func _committed;
337 | internal static ErrorCode committed(IntPtr rk, IntPtr partitions, IntPtr timeout_ms)
338 | => _committed(rk, partitions, timeout_ms);
339 |
340 | private static Func _position;
341 | internal static ErrorCode position(IntPtr rk, IntPtr partitions)
342 | => _position(rk, partitions);
343 |
344 | private static Func _produce;
346 | internal static IntPtr produce(
347 | IntPtr rkt,
348 | int partition,
349 | IntPtr msgflags,
350 | byte[] payload, UIntPtr len,
351 | byte[] key, UIntPtr keylen,
352 | IntPtr msg_opaque)
353 | => _produce(rkt, partition, msgflags, payload, len, key, keylen, msg_opaque);
354 |
355 | private delegate ErrorCode Metadata(IntPtr rk, bool all_topics,
356 | IntPtr only_rkt, out IntPtr metadatap, IntPtr timeout_ms);
357 | private static Metadata _metadata;
358 | internal static ErrorCode metadata(IntPtr rk, bool all_topics,
359 | IntPtr only_rkt, out IntPtr metadatap, IntPtr timeout_ms)
360 | => _metadata(rk, all_topics, only_rkt, out metadatap, timeout_ms);
361 |
362 | private static Action _metadata_destroy;
363 | internal static void metadata_destroy(IntPtr metadata)
364 | => _metadata_destroy(metadata);
365 |
366 | private delegate ErrorCode ListGroups(IntPtr rk, string group,
367 | out IntPtr grplistp, IntPtr timeout_ms);
368 | private static ListGroups _list_groups;
369 | internal static ErrorCode list_groups(IntPtr rk, string group,
370 | out IntPtr grplistp, IntPtr timeout_ms)
371 | => _list_groups(rk, group, out grplistp, timeout_ms);
372 |
373 | private static Action _group_list_destroy;
374 | internal static void group_list_destroy(IntPtr grplist)
375 | => _group_list_destroy(grplist);
376 |
377 | private static Func _brokers_add;
378 | internal static IntPtr brokers_add(IntPtr rk, string brokerlist)
379 | => _brokers_add(rk, brokerlist);
380 |
381 | private static Action _set_log_level;
382 | internal static void set_log_level(IntPtr rk, IntPtr level)
383 | => _set_log_level(rk, level);
384 |
385 | private static Func _outq_len;
386 | internal static IntPtr outq_len(IntPtr rk) => _outq_len(rk);
387 |
388 | private static Func _wait_destroyed;
389 | internal static IntPtr wait_destroyed(IntPtr timeout_ms)
390 | => _wait_destroyed(timeout_ms);
391 |
392 | private class NativeMethods
393 | {
394 | private const string DllName = "librdkafka";
395 |
396 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
397 | internal static extern IntPtr rd_kafka_version();
398 |
399 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
400 | internal static extern IntPtr rd_kafka_version_str();
401 |
402 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
403 | internal static extern IntPtr rd_kafka_get_debug_contexts();
404 |
405 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
406 | internal static extern IntPtr rd_kafka_err2str(ErrorCode err);
407 |
408 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
409 | internal static extern ErrorCode rd_kafka_last_error();
410 |
411 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
412 | internal static extern /* rd_kafka_topic_partition_list_t * */ IntPtr
413 | rd_kafka_topic_partition_list_new(IntPtr size);
414 |
415 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
416 | internal static extern void rd_kafka_topic_partition_list_destroy(
417 | /* rd_kafka_topic_partition_list_t * */ IntPtr rkparlist);
418 |
419 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
420 | internal static extern /* rd_kafka_topic_partition_t * */ IntPtr
421 | rd_kafka_topic_partition_list_add(
422 | /* rd_kafka_topic_partition_list_t * */ IntPtr rktparlist,
423 | string topic, int partition);
424 |
425 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
426 | internal static extern void rd_kafka_message_destroy(
427 | /* rd_kafka_message_t * */ IntPtr rkmessage);
428 |
429 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
430 | internal static extern SafeConfigHandle rd_kafka_conf_new();
431 |
432 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
433 | internal static extern void rd_kafka_conf_destroy(IntPtr conf);
434 |
435 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
436 | internal static extern IntPtr rd_kafka_conf_dup(IntPtr conf);
437 |
438 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
439 | internal static extern ConfRes rd_kafka_conf_set(
440 | IntPtr conf,
441 | [MarshalAs(UnmanagedType.LPStr)] string name,
442 | [MarshalAs(UnmanagedType.LPStr)] string value,
443 | StringBuilder errstr,
444 | UIntPtr errstr_size);
445 |
446 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
447 | internal static extern void rd_kafka_conf_set_dr_msg_cb(
448 | IntPtr conf,
449 | DeliveryReportCallback dr_msg_cb);
450 |
451 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
452 | internal static extern void rd_kafka_conf_set_rebalance_cb(
453 | IntPtr conf, RebalanceCallback rebalance_cb);
454 |
455 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
456 | internal static extern void rd_kafka_conf_set_offset_commit_cb(
457 | IntPtr conf, CommitCallback commit_cb);
458 |
459 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
460 | internal static extern void rd_kafka_conf_set_error_cb(
461 | IntPtr conf, ErrorCallback error_cb);
462 |
463 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
464 | internal static extern void rd_kafka_conf_set_log_cb(IntPtr conf, LogCallback log_cb);
465 |
466 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
467 | internal static extern void rd_kafka_conf_set_stats_cb(IntPtr conf, StatsCallback stats_cb);
468 |
469 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
470 | internal static extern void rd_kafka_conf_set_default_topic_conf(
471 | IntPtr conf, IntPtr tconf);
472 |
473 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
474 | internal static extern ConfRes rd_kafka_conf_get(
475 | IntPtr conf,
476 | [MarshalAs(UnmanagedType.LPStr)] string name,
477 | StringBuilder dest, ref UIntPtr dest_size);
478 |
479 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
480 | internal static extern ConfRes rd_kafka_topic_conf_get(
481 | IntPtr conf,
482 | [MarshalAs(UnmanagedType.LPStr)] string name,
483 | StringBuilder dest, ref UIntPtr dest_size);
484 |
485 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
486 | internal static extern /* const char ** */ IntPtr rd_kafka_conf_dump(
487 | IntPtr conf, /* size_t * */ out UIntPtr cntp);
488 |
489 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
490 | internal static extern /* const char ** */ IntPtr rd_kafka_topic_conf_dump(
491 | IntPtr conf, out UIntPtr cntp);
492 |
493 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
494 | internal static extern void rd_kafka_conf_dump_free(/* const char ** */ IntPtr arr, UIntPtr cnt);
495 |
496 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
497 | internal static extern SafeTopicConfigHandle rd_kafka_topic_conf_new();
498 |
499 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
500 | internal static extern /* rd_kafka_topic_conf_t * */ IntPtr rd_kafka_topic_conf_dup(
501 | /* const rd_kafka_topic_conf_t * */ IntPtr conf);
502 |
503 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
504 | internal static extern void rd_kafka_topic_conf_destroy(IntPtr conf);
505 |
506 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
507 | internal static extern ConfRes rd_kafka_topic_conf_set(
508 | IntPtr conf,
509 | [MarshalAs(UnmanagedType.LPStr)] string name,
510 | [MarshalAs(UnmanagedType.LPStr)] string value,
511 | StringBuilder errstr,
512 | UIntPtr errstr_size);
513 |
514 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
515 | internal static extern void rd_kafka_topic_conf_set_partitioner_cb(
516 | IntPtr topic_conf, PartitionerCallback partitioner_cb);
517 |
518 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
519 | internal static extern bool rd_kafka_topic_partition_available(
520 | IntPtr rkt, int partition);
521 |
522 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
523 | internal static extern SafeKafkaHandle rd_kafka_new(
524 | RdKafkaType type, IntPtr conf,
525 | StringBuilder errstr,
526 | UIntPtr errstr_size);
527 |
528 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
529 | internal static extern void rd_kafka_destroy(IntPtr rk);
530 |
531 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
532 | internal static extern /* const char * */ IntPtr rd_kafka_name(IntPtr rk);
533 |
534 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
535 | internal static extern /* char * */ IntPtr rd_kafka_memberid(IntPtr rk);
536 |
537 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
538 | internal static extern SafeTopicHandle rd_kafka_topic_new(
539 | IntPtr rk,
540 | [MarshalAs(UnmanagedType.LPStr)] string topic,
541 | /* rd_kafka_topic_conf_t * */ IntPtr conf);
542 |
543 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
544 | internal static extern void rd_kafka_topic_destroy(IntPtr rk);
545 |
546 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
547 | internal static extern /* const char * */ IntPtr rd_kafka_topic_name(IntPtr rkt);
548 |
549 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
550 | internal static extern IntPtr rd_kafka_poll(IntPtr rk, IntPtr timeout_ms);
551 |
552 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
553 | internal static extern ErrorCode rd_kafka_query_watermark_offsets(IntPtr rk,
554 | [MarshalAs(UnmanagedType.LPStr)] string topic,
555 | int partition, out long low, out long high, IntPtr timeout_ms);
556 |
557 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
558 | internal static extern ErrorCode rd_kafka_get_watermark_offsets(IntPtr rk,
559 | [MarshalAs(UnmanagedType.LPStr)] string topic,
560 | int partition, out long low, out long high);
561 |
562 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
563 | internal static extern void rd_kafka_mem_free(IntPtr rk, IntPtr ptr);
564 |
565 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
566 | internal static extern ErrorCode rd_kafka_subscribe(IntPtr rk,
567 | /* const rd_kafka_topic_partition_list_t * */ IntPtr topics);
568 |
569 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
570 | internal static extern ErrorCode rd_kafka_unsubscribe(IntPtr rk);
571 |
572 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
573 | internal static extern ErrorCode rd_kafka_subscription(IntPtr rk,
574 | /* rd_kafka_topic_partition_list_t ** */ out IntPtr topics);
575 |
576 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
577 | internal static extern /* rd_kafka_message_t * */ IntPtr rd_kafka_consumer_poll(
578 | IntPtr rk, IntPtr timeout_ms);
579 |
580 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
581 | internal static extern ErrorCode rd_kafka_consumer_close(IntPtr rk);
582 |
583 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
584 | internal static extern ErrorCode rd_kafka_assign(IntPtr rk,
585 | /* const rd_kafka_topic_partition_list_t * */ IntPtr partitions);
586 |
587 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
588 | internal static extern ErrorCode rd_kafka_assignment(IntPtr rk,
589 | /* rd_kafka_topic_partition_list_t ** */ out IntPtr topics);
590 |
591 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
592 | internal static extern ErrorCode rd_kafka_commit(
593 | IntPtr rk,
594 | /* const rd_kafka_topic_partition_list_t * */ IntPtr offsets,
595 | bool async);
596 |
597 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
598 | internal static extern ErrorCode rd_kafka_committed(
599 | IntPtr rk, IntPtr partitions, IntPtr timeout_ms);
600 |
601 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
602 | internal static extern ErrorCode rd_kafka_position(
603 | IntPtr rk, IntPtr partitions);
604 |
605 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
606 | internal static extern IntPtr rd_kafka_produce(
607 | IntPtr rkt,
608 | int partition,
609 | IntPtr msgflags,
610 | byte[] payload, UIntPtr len,
611 | byte[] key, UIntPtr keylen,
612 | IntPtr msg_opaque);
613 |
614 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
615 | internal static extern ErrorCode rd_kafka_metadata(
616 | IntPtr rk, bool all_topics,
617 | /* rd_kafka_topic_t * */ IntPtr only_rkt,
618 | /* const struct rd_kafka_metadata ** */ out IntPtr metadatap,
619 | IntPtr timeout_ms);
620 |
621 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
622 | internal static extern void rd_kafka_metadata_destroy(
623 | /* const struct rd_kafka_metadata * */ IntPtr metadata);
624 |
625 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
626 | internal static extern ErrorCode rd_kafka_list_groups(
627 | IntPtr rk, string group, out IntPtr grplistp,
628 | IntPtr timeout_ms);
629 |
630 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
631 | internal static extern void rd_kafka_group_list_destroy(
632 | IntPtr grplist);
633 |
634 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
635 | internal static extern IntPtr rd_kafka_brokers_add(IntPtr rk,
636 | [MarshalAs(UnmanagedType.LPStr)] string brokerlist);
637 |
638 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
639 | internal static extern void rd_kafka_set_log_level(IntPtr rk, IntPtr level);
640 |
641 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
642 | internal static extern IntPtr rd_kafka_outq_len(IntPtr rk);
643 |
644 | [DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
645 | internal static extern IntPtr rd_kafka_wait_destroyed(IntPtr timeout_ms);
646 | }
647 | }
648 | }
649 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/Metadata.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 |
4 | namespace RdKafka.Internal
5 | {
6 | [StructLayout(LayoutKind.Sequential)]
7 | struct rd_kafka_metadata_broker {
8 | internal int id;
9 | internal string host;
10 | internal int port;
11 | }
12 |
13 | [StructLayout(LayoutKind.Sequential)]
14 | struct rd_kafka_metadata_partition {
15 | internal int id;
16 | internal ErrorCode err;
17 | internal int leader;
18 | internal int replica_cnt;
19 | internal /* int32_t * */ IntPtr replicas;
20 | internal int isr_cnt;
21 | internal /* int32_t * */ IntPtr isrs;
22 | }
23 |
24 | [StructLayout(LayoutKind.Sequential)]
25 | struct rd_kafka_metadata_topic {
26 | internal string topic;
27 | internal int partition_cnt;
28 | internal /* struct rd_kafka_metadata_partition * */ IntPtr partitions;
29 | internal ErrorCode err;
30 | }
31 |
32 | [StructLayout(LayoutKind.Sequential)]
33 | struct rd_kafka_metadata {
34 | internal int broker_cnt;
35 | internal /* struct rd_kafka_metadata_broker * */ IntPtr brokers;
36 | internal int topic_cnt;
37 | internal /* struct rd_kafka_metadata_topic * */ IntPtr topics;
38 | internal int orig_broker_id;
39 | [MarshalAs(UnmanagedType.LPStr)]
40 | internal string orig_broker_name;
41 | };
42 |
43 | [StructLayout(LayoutKind.Sequential)]
44 | struct rd_kafka_group_member_info
45 | {
46 | internal string member_id;
47 | internal string client_id;
48 | internal string client_host;
49 | internal IntPtr member_metadata;
50 | internal IntPtr member_metadata_size;
51 | internal IntPtr member_assignment;
52 | internal IntPtr member_assignment_size;
53 | };
54 |
55 | [StructLayout(LayoutKind.Sequential)]
56 | struct rd_kafka_group_info
57 | {
58 | internal rd_kafka_metadata_broker broker;
59 | internal string group;
60 | internal ErrorCode err;
61 | internal string state;
62 | internal string protocol_type;
63 | internal string protocol;
64 | internal IntPtr members;
65 | internal int member_cnt;
66 | };
67 |
68 | [StructLayout(LayoutKind.Sequential)]
69 | struct rd_kafka_group_list
70 | {
71 | internal IntPtr groups;
72 | internal int group_cnt;
73 | };
74 | }
75 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/SafeConfigHandle.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Runtime.InteropServices;
5 | using System.Text;
6 |
7 | namespace RdKafka.Internal
8 | {
9 | enum ConfRes {
10 | Unknown = -2, /* Unknown configuration name. */
11 | Invalid = -1, /* Invalid configuration value. */
12 | Ok = 0 /* Configuration okay */
13 | }
14 |
15 | class SafeConfigHandle : SafeHandleZeroIsInvalid
16 | {
17 | private SafeConfigHandle()
18 | {
19 | }
20 |
21 | internal static SafeConfigHandle Create()
22 | {
23 | var ch = LibRdKafka.conf_new();
24 | if (ch.IsInvalid)
25 | {
26 | throw new Exception("Failed to create config");
27 | }
28 | return ch;
29 | }
30 |
31 | protected override bool ReleaseHandle()
32 | {
33 | LibRdKafka.conf_destroy(handle);
34 | return true;
35 | }
36 |
37 | internal IntPtr Dup() => LibRdKafka.conf_dup(handle);
38 |
39 | internal Dictionary Dump()
40 | {
41 | UIntPtr cntp = (UIntPtr) 0;
42 | IntPtr data = LibRdKafka.conf_dump(handle, out cntp);
43 |
44 | if (data == IntPtr.Zero)
45 | {
46 | throw new Exception("Zero data");
47 | }
48 |
49 | try
50 | {
51 | if (((int) cntp & 1) != 0)
52 | {
53 | // Expect Key -> Value, so even number of strings
54 | throw new Exception("Invalid number of config entries");
55 | }
56 |
57 | var dict = new Dictionary();
58 | for (int i = 0; i < (int) cntp / 2; i++)
59 | {
60 | dict.Add(Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, 2 * i * Marshal.SizeOf())),
61 | Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, (2 * i + 1) * Marshal.SizeOf())));
62 | }
63 | // Filter out callback pointers
64 | return dict.Where(kv => !kv.Key.EndsWith("_cb")).ToDictionary(kv => kv.Key, kv => kv.Value);
65 | }
66 | finally
67 | {
68 | LibRdKafka.conf_dump_free(data, cntp);
69 | }
70 | }
71 |
72 | internal void Set(string name, string value)
73 | {
74 | // TODO: Constant instead of 512?
75 | var errorStringBuilder = new StringBuilder(512);
76 | ConfRes res = LibRdKafka.conf_set(handle, name, value,
77 | errorStringBuilder, (UIntPtr) errorStringBuilder.Capacity);
78 | if (res == ConfRes.Ok)
79 | {
80 | return;
81 | }
82 | else if (res == ConfRes.Invalid)
83 | {
84 | throw new ArgumentException(errorStringBuilder.ToString());
85 | }
86 | else if (res == ConfRes.Unknown)
87 | {
88 | throw new InvalidOperationException(errorStringBuilder.ToString());
89 | }
90 | else
91 | {
92 | throw new Exception("Unknown error while setting configuration property");
93 | }
94 | }
95 |
96 | internal string Get(string name)
97 | {
98 | UIntPtr destSize = (UIntPtr) 0;
99 | StringBuilder sb = null;
100 |
101 | ConfRes res = LibRdKafka.conf_get(handle, name, null, ref destSize);
102 | if (res == ConfRes.Ok)
103 | {
104 | sb = new StringBuilder((int) destSize);
105 | res = LibRdKafka.conf_get(handle, name, sb, ref destSize);
106 | }
107 | if (res != ConfRes.Ok)
108 | {
109 | if (res == ConfRes.Unknown)
110 | {
111 | throw new InvalidOperationException($"No such configuration property: {name}");
112 | }
113 | throw new Exception("Unknown error while getting configuration property");
114 | }
115 | return sb?.ToString();
116 | }
117 | }
118 | }
119 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/SafeHandleZeroIsInvalid.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 |
4 | namespace RdKafka
5 | {
6 | abstract class SafeHandleZeroIsInvalid : SafeHandle
7 | {
8 | internal SafeHandleZeroIsInvalid() : base(IntPtr.Zero, true) { }
9 |
10 | internal SafeHandleZeroIsInvalid(bool ownsHandle) : base(IntPtr.Zero, ownsHandle) { }
11 |
12 | public override bool IsInvalid => handle == IntPtr.Zero;
13 |
14 | protected override bool ReleaseHandle() => true;
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/SafeKafkaHandle.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Runtime.InteropServices;
6 |
7 | namespace RdKafka.Internal
8 | {
9 | enum RdKafkaType
10 | {
11 | Producer,
12 | Consumer
13 | }
14 |
15 | [StructLayout(LayoutKind.Sequential)]
16 | struct rd_kafka_message
17 | {
18 | internal ErrorCode err; /* Non-zero for error signaling. */
19 | internal /* rd_kafka_topic_t * */ IntPtr rkt; /* Topic */
20 | internal int partition; /* Partition */
21 | internal /* void * */ IntPtr payload; /* err==0: Message payload
22 | * err!=0: Error string */
23 | internal UIntPtr len; /* err==0: Message payload length
24 | * err!=0: Error string length */
25 | internal /* void * */ IntPtr key; /* err==0: Optional message key */
26 | internal UIntPtr key_len; /* err==0: Optional message key length */
27 | internal long offset; /* Consume:
28 | * Message offset (or offset for error
29 | * if err!=0 if applicable).
30 | * dr_msg_cb:
31 | * Message offset assigned by broker.
32 | * If produce.offset.report is set then
33 | * each message will have this field set,
34 | * otherwise only the last message in
35 | * each produced internal batch will
36 | * have this field set, otherwise 0. */
37 | internal /* void * */ IntPtr _private; /* Consume:
38 | * rdkafka private pointer: DO NOT MODIFY
39 | * dr_msg_cb:
40 | * mgs_opaque from produce() call */
41 | }
42 |
43 | [StructLayout(LayoutKind.Sequential)]
44 | internal struct rd_kafka_topic_partition
45 | {
46 | internal string topic;
47 | internal int partition;
48 | internal long offset;
49 | /* void * */ IntPtr metadata;
50 | UIntPtr metadata_size;
51 | /* void * */ IntPtr opaque;
52 | ErrorCode err; /* Error code, depending on use. */
53 | /* void * */ IntPtr _private; /* INTERNAL USE ONLY,
54 | * INITIALIZE TO ZERO, DO NOT TOUCH */
55 | };
56 |
57 | [StructLayout(LayoutKind.Sequential)]
58 | struct rd_kafka_topic_partition_list
59 | {
60 | internal int cnt; /* Current number of elements */
61 | internal int size; /* Allocated size */
62 | internal /* rd_kafka_topic_partition_t * */ IntPtr elems;
63 | };
64 |
65 | internal sealed class SafeKafkaHandle : SafeHandleZeroIsInvalid
66 | {
67 | const int RD_KAFKA_PARTITION_UA = -1;
68 |
69 | private SafeKafkaHandle() {}
70 |
71 | internal static SafeKafkaHandle Create(RdKafkaType type, IntPtr config)
72 | {
73 | var errorStringBuilder = new StringBuilder(512);
74 | var skh = LibRdKafka.kafka_new(type, config, errorStringBuilder,
75 | (UIntPtr) errorStringBuilder.Capacity);
76 | if (skh.IsInvalid)
77 | {
78 | throw new InvalidOperationException(errorStringBuilder.ToString());
79 | }
80 | return skh;
81 | }
82 |
83 | protected override bool ReleaseHandle()
84 | {
85 | LibRdKafka.destroy(handle);
86 | return true;
87 | }
88 |
89 | internal string GetName() => Marshal.PtrToStringAnsi(LibRdKafka.name(handle));
90 |
91 | internal long GetOutQueueLength() => (long)LibRdKafka.outq_len(handle);
92 |
93 | internal long AddBrokers(string brokers) => (long)LibRdKafka.brokers_add(handle, brokers);
94 |
95 | internal long Poll(IntPtr timeoutMs) => (long)LibRdKafka.poll(handle, timeoutMs);
96 |
97 | internal SafeTopicHandle Topic(string topic, IntPtr config)
98 | {
99 | // Increase the refcount to this handle to keep it alive for
100 | // at least as long as the topic handle.
101 | // Will be decremented by the topic handle ReleaseHandle.
102 | bool success = false;
103 | DangerousAddRef(ref success);
104 | if (!success)
105 | {
106 | LibRdKafka.topic_conf_destroy(config);
107 | throw new Exception("Failed to create topic (DangerousAddRef failed)");
108 | }
109 | var topicHandle = LibRdKafka.topic_new(handle, topic, config);
110 | if (topicHandle.IsInvalid)
111 | {
112 | DangerousRelease();
113 | throw RdKafkaException.FromErr(LibRdKafka.last_error(), "Failed to create topic");
114 | }
115 | topicHandle.kafkaHandle = this;
116 | return topicHandle;
117 | }
118 |
119 | private static int[] MarshalCopy(IntPtr source, int length)
120 | {
121 | int[] res = new int[length];
122 | Marshal.Copy(source, res, 0, length);
123 | return res;
124 | }
125 |
126 | /*
127 | * allTopics - if true: request info about all topics in cluster,
128 | * else: only request info about locally known topics.
129 | * onlyTopic - only request info about this topic
130 | * timeout - maximum response time before failing.
131 | */
132 | internal Metadata Metadata(bool allTopics,
133 | SafeTopicHandle onlyTopic,
134 | bool includeInternal,
135 | TimeSpan timeout)
136 | {
137 | if (timeout == default(TimeSpan))
138 | {
139 | timeout = TimeSpan.FromSeconds(10);
140 | }
141 |
142 | IntPtr metaPtr;
143 | ErrorCode err = LibRdKafka.metadata(
144 | handle, allTopics,
145 | onlyTopic?.DangerousGetHandle() ?? IntPtr.Zero,
146 | /* const struct rd_kafka_metadata ** */ out metaPtr,
147 | (IntPtr) timeout.TotalMilliseconds);
148 |
149 | if (err == ErrorCode.NO_ERROR)
150 | {
151 | try {
152 | var meta = (rd_kafka_metadata) Marshal.PtrToStructure(metaPtr);
153 |
154 | var brokers = Enumerable.Range(0, meta.broker_cnt)
155 | .Select(i => Marshal.PtrToStructure(
156 | meta.brokers + i * Marshal.SizeOf()))
157 | .Select(b => new BrokerMetadata() { BrokerId = b.id, Host = b.host, Port = b.port })
158 | .ToList();
159 |
160 | // TODO: filter our topics starting with __, as those are internal. Maybe add a flag to not ignore them.
161 | var topics = Enumerable.Range(0, meta.topic_cnt)
162 | .Select(i => Marshal.PtrToStructure(
163 | meta.topics + i * Marshal.SizeOf()))
164 | .Where(t => includeInternal || !t.topic.StartsWith("__"))
165 | .Select(t => new TopicMetadata()
166 | {
167 | Topic = t.topic,
168 | Error = t.err,
169 | Partitions =
170 | Enumerable.Range(0, t.partition_cnt)
171 | .Select(j => Marshal.PtrToStructure(
172 | t.partitions + j * Marshal.SizeOf()))
173 | .Select(p => new PartitionMetadata()
174 | {
175 | PartitionId = p.id,
176 | Error = p.err,
177 | Leader = p.leader,
178 | Replicas = MarshalCopy(p.replicas, p.replica_cnt),
179 | InSyncReplicas = MarshalCopy(p.isrs, p.isr_cnt)
180 | })
181 | .ToList()
182 | })
183 | .ToList();
184 |
185 | return new Metadata()
186 | {
187 | Brokers = brokers,
188 | Topics = topics,
189 | OriginatingBrokerId = meta.orig_broker_id,
190 | OriginatingBrokerName = meta.orig_broker_name
191 | };
192 | }
193 | finally
194 | {
195 | LibRdKafka.metadata_destroy(metaPtr);
196 | }
197 | }
198 | else
199 | {
200 | throw RdKafkaException.FromErr(err, "Could not retrieve metadata");
201 | }
202 | }
203 |
204 | internal Offsets QueryWatermarkOffsets(string topic, int partition, TimeSpan timeout)
205 | {
206 | long low;
207 | long high;
208 |
209 | ErrorCode err = LibRdKafka.query_watermark_offsets(handle, topic, partition, out low, out high,
210 | timeout == default(TimeSpan) ? new IntPtr(-1) : (IntPtr) timeout.TotalMilliseconds);
211 | if (err != ErrorCode.NO_ERROR)
212 | {
213 | throw RdKafkaException.FromErr(err, "Failed to query watermark offsets");
214 | }
215 |
216 | return new Offsets { Low = low, High = high };
217 | }
218 |
219 | internal Offsets GetWatermarkOffsets(string topic, int partition)
220 | {
221 | long low;
222 | long high;
223 |
224 | ErrorCode err = LibRdKafka.get_watermark_offsets(handle, topic, partition, out low, out high);
225 | if (err != ErrorCode.NO_ERROR)
226 | {
227 | throw RdKafkaException.FromErr(err, "Failed to get watermark offsets");
228 | }
229 |
230 | return new Offsets { Low = low, High = high };
231 | }
232 |
233 | // Consumer API
234 | internal void Subscribe(ICollection topics)
235 | {
236 | IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr) topics.Count);
237 | if (list == IntPtr.Zero)
238 | {
239 | throw new Exception("Failed to create topic partition list");
240 | }
241 | foreach (string topic in topics)
242 | {
243 | LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA);
244 | }
245 |
246 | ErrorCode err = LibRdKafka.subscribe(handle, list);
247 | LibRdKafka.topic_partition_list_destroy(list);
248 | if (err != ErrorCode.NO_ERROR)
249 | {
250 | throw RdKafkaException.FromErr(err, "Failed to subscribe to topics");
251 | }
252 | }
253 |
254 | internal void Unsubscribe()
255 | {
256 | ErrorCode err = LibRdKafka.unsubscribe(handle);
257 | if (err != ErrorCode.NO_ERROR)
258 | {
259 | throw RdKafkaException.FromErr(err, "Failed to unsubscribe");
260 | }
261 | }
262 |
263 | internal MessageAndError? ConsumerPoll(IntPtr timeoutMs)
264 | {
265 | IntPtr msgPtr = LibRdKafka.consumer_poll(handle, timeoutMs);
266 | if (msgPtr == IntPtr.Zero)
267 | {
268 | return null;
269 | }
270 | var msg = Marshal.PtrToStructure(msgPtr);
271 | byte[] payload = null;
272 | byte[] key = null;
273 | if (msg.payload != IntPtr.Zero)
274 | {
275 | payload = new byte[(int) msg.len];
276 | Marshal.Copy(msg.payload, payload, 0, (int) msg.len);
277 | }
278 | if (msg.key != IntPtr.Zero)
279 | {
280 | key = new byte[(int) msg.key_len];
281 | Marshal.Copy(msg.key, key, 0, (int) msg.key_len);
282 | }
283 | string topic = null;
284 | if (msg.rkt != IntPtr.Zero)
285 | {
286 | topic = Marshal.PtrToStringAnsi(LibRdKafka.topic_name(msg.rkt));
287 | }
288 | LibRdKafka.message_destroy(msgPtr);
289 |
290 | var message = new Message()
291 | {
292 | Topic = topic,
293 | Partition = msg.partition,
294 | Offset = msg.offset,
295 | Payload = payload,
296 | Key = key
297 | };
298 |
299 | return new MessageAndError()
300 | {
301 | Message = message,
302 | Error = msg.err
303 | };
304 | }
305 |
306 | internal void ConsumerClose()
307 | {
308 | ErrorCode err = LibRdKafka.consumer_close(handle);
309 | if (err != ErrorCode.NO_ERROR)
310 | {
311 | throw RdKafkaException.FromErr(err, "Failed to close consumer");
312 | }
313 | }
314 |
315 | internal List GetAssignment()
316 | {
317 | IntPtr listPtr = IntPtr.Zero;
318 | ErrorCode err = LibRdKafka.assignment(handle, out listPtr);
319 | if (err != ErrorCode.NO_ERROR)
320 | {
321 | throw RdKafkaException.FromErr(err, "Failed to get assignment");
322 | }
323 | // TODO: need to free anything here?
324 | return GetTopicPartitionList(listPtr);
325 | }
326 |
327 | internal List GetSubscription()
328 | {
329 | IntPtr listPtr = IntPtr.Zero;
330 | ErrorCode err = LibRdKafka.subscription(handle, out listPtr);
331 | if (err != ErrorCode.NO_ERROR)
332 | {
333 | throw RdKafkaException.FromErr(err, "Failed to get subscription");
334 | }
335 | // TODO: need to free anything here?
336 | return GetTopicList(listPtr);
337 | }
338 |
339 | internal void Assign(ICollection partitions)
340 | {
341 | IntPtr list = IntPtr.Zero;
342 | if (partitions != null)
343 | {
344 | list = LibRdKafka.topic_partition_list_new((IntPtr) partitions.Count);
345 | if (list == IntPtr.Zero)
346 | {
347 | throw new Exception("Failed to create topic partition list");
348 | }
349 | foreach (var partition in partitions)
350 | {
351 | IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
352 | Marshal.WriteInt64(ptr,
353 | (int) Marshal.OffsetOf("offset"),
354 | partition.Offset);
355 | }
356 | }
357 |
358 | ErrorCode err = LibRdKafka.assign(handle, list);
359 | if (list != IntPtr.Zero)
360 | {
361 | LibRdKafka.topic_partition_list_destroy(list);
362 | }
363 | if (err != ErrorCode.NO_ERROR)
364 | {
365 | throw RdKafkaException.FromErr(err, "Failed to assign partitions");
366 | }
367 | }
368 |
369 | internal void Commit()
370 | {
371 | ErrorCode err = LibRdKafka.commit(handle, IntPtr.Zero, false);
372 | if (err != ErrorCode.NO_ERROR)
373 | {
374 | throw RdKafkaException.FromErr(err, "Failed to commit offsets");
375 | }
376 | }
377 |
378 | internal void Commit(ICollection offsets)
379 | {
380 | IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr) offsets.Count);
381 | if (list == IntPtr.Zero)
382 | {
383 | throw new Exception("Failed to create offset commit list");
384 | }
385 | foreach (var offset in offsets)
386 | {
387 | IntPtr ptr = LibRdKafka.topic_partition_list_add(list, offset.Topic, offset.Partition);
388 | Marshal.WriteInt64(ptr,
389 | (int) Marshal.OffsetOf("offset"),
390 | offset.Offset);
391 | }
392 | ErrorCode err = LibRdKafka.commit(handle, list, false);
393 | LibRdKafka.topic_partition_list_destroy(list);
394 | if (err != ErrorCode.NO_ERROR)
395 | {
396 | throw RdKafkaException.FromErr(err, "Failed to commit offsets");
397 | }
398 | }
399 |
400 | internal List Committed(ICollection partitions, IntPtr timeout_ms)
401 | {
402 | IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr) partitions.Count);
403 | if (list == IntPtr.Zero)
404 | {
405 | throw new Exception("Failed to create committed partition list");
406 | }
407 | foreach (var partition in partitions)
408 | {
409 | LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
410 | }
411 | ErrorCode err = LibRdKafka.committed(handle, list, timeout_ms);
412 | var result = GetTopicPartitionOffsetList(list);
413 | LibRdKafka.topic_partition_list_destroy(list);
414 | if (err != ErrorCode.NO_ERROR)
415 | {
416 | throw RdKafkaException.FromErr(err, "Failed to fetch committed offsets");
417 | }
418 | return result;
419 | }
420 |
421 | internal List Position(ICollection partitions)
422 | {
423 | IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr) partitions.Count);
424 | if (list == IntPtr.Zero)
425 | {
426 | throw new Exception("Failed to create position list");
427 | }
428 | foreach (var partition in partitions)
429 | {
430 | LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
431 | }
432 | ErrorCode err = LibRdKafka.position(handle, list);
433 | var result = GetTopicPartitionOffsetList(list);
434 | LibRdKafka.topic_partition_list_destroy(list);
435 | if (err != ErrorCode.NO_ERROR)
436 | {
437 | throw RdKafkaException.FromErr(err, "Failed to fetch position");
438 | }
439 | return result;
440 | }
441 |
442 | internal string MemberId()
443 | {
444 | IntPtr strPtr = LibRdKafka.memberid(handle);
445 | if (strPtr == null)
446 | {
447 | return null;
448 | }
449 |
450 | string memberId = Marshal.PtrToStringAnsi(strPtr);
451 | LibRdKafka.mem_free(handle, strPtr);
452 | return memberId;
453 | }
454 |
455 | internal void SetLogLevel(int level)
456 | {
457 | LibRdKafka.set_log_level(handle, (IntPtr) level);
458 | }
459 |
460 | internal static List GetTopicList(IntPtr listPtr)
461 | {
462 | if (listPtr == IntPtr.Zero)
463 | {
464 | return new List();
465 | }
466 |
467 | var list = Marshal.PtrToStructure(listPtr);
468 | return Enumerable.Range(0, list.cnt)
469 | .Select(i => Marshal.PtrToStructure(
470 | list.elems + i * Marshal.SizeOf()))
471 | .Select(ktp => ktp.topic)
472 | .ToList();
473 | }
474 |
475 | internal static List GetTopicPartitionList(IntPtr listPtr)
476 | {
477 | if (listPtr == IntPtr.Zero)
478 | {
479 | return new List();
480 | }
481 |
482 | var list = Marshal.PtrToStructure(listPtr);
483 | return Enumerable.Range(0, list.cnt)
484 | .Select(i => Marshal.PtrToStructure(
485 | list.elems + i * Marshal.SizeOf()))
486 | .Select(ktp => new TopicPartition()
487 | {
488 | Topic = ktp.topic,
489 | Partition = ktp.partition,
490 | })
491 | .ToList();
492 | }
493 |
494 | internal static List GetTopicPartitionOffsetList(IntPtr listPtr)
495 | {
496 | if (listPtr == IntPtr.Zero)
497 | {
498 | return new List();
499 | }
500 |
501 | var list = Marshal.PtrToStructure(listPtr);
502 | return Enumerable.Range(0, list.cnt)
503 | .Select(i => Marshal.PtrToStructure(
504 | list.elems + i * Marshal.SizeOf()))
505 | .Select(ktp => new TopicPartitionOffset()
506 | {
507 | Topic = ktp.topic,
508 | Partition = ktp.partition,
509 | Offset = ktp.offset
510 | })
511 | .ToList();
512 | }
513 |
514 | static byte[] CopyBytes(IntPtr ptr, IntPtr len)
515 | {
516 | byte[] data = null;
517 | if (ptr != IntPtr.Zero)
518 | {
519 | data = new byte[(int) len];
520 | Marshal.Copy(ptr, data, 0, (int) len);
521 | }
522 | return data;
523 | }
524 |
525 | internal List ListGroups(string group, IntPtr timeoutMs)
526 | {
527 | IntPtr grplistPtr;
528 | ErrorCode err = LibRdKafka.list_groups(handle, group, out grplistPtr, timeoutMs);
529 | if (err == ErrorCode.NO_ERROR)
530 | {
531 | var list = Marshal.PtrToStructure(grplistPtr);
532 | var groups = Enumerable.Range(0, list.group_cnt)
533 | .Select(i => Marshal.PtrToStructure(
534 | list.groups + i * Marshal.SizeOf()))
535 | .Select(gi => new GroupInfo()
536 | {
537 | Broker = new BrokerMetadata()
538 | {
539 | BrokerId = gi.broker.id,
540 | Host = gi.broker.host,
541 | Port = gi.broker.port
542 | },
543 | Group = gi.group,
544 | Error = gi.err,
545 | State = gi.state,
546 | ProtocolType = gi.protocol_type,
547 | Protocol = gi.protocol,
548 | Members = Enumerable.Range(0, gi.member_cnt)
549 | .Select(j => Marshal.PtrToStructure(
550 | gi.members + j * Marshal.SizeOf()))
551 | .Select(mi => new GroupMemberInfo()
552 | {
553 | MemberId = mi.member_id,
554 | ClientId = mi.client_id,
555 | ClientHost = mi.client_host,
556 | MemberMetadata = CopyBytes(mi.member_metadata,
557 | mi.member_metadata_size),
558 | MemberAssignment = CopyBytes(mi.member_assignment,
559 | mi.member_assignment_size)
560 | })
561 | .ToList()
562 | })
563 | .ToList();
564 | LibRdKafka.group_list_destroy(grplistPtr);
565 | return groups;
566 | }
567 | else
568 | {
569 | throw RdKafkaException.FromErr(err, "Failed to fetch group list");
570 | }
571 | }
572 | }
573 | }
574 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/SafeTopicConfigHandle.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Runtime.InteropServices;
5 | using System.Text;
6 |
7 | namespace RdKafka.Internal
8 | {
9 | internal sealed class SafeTopicConfigHandle : SafeHandleZeroIsInvalid
10 | {
11 | private SafeTopicConfigHandle()
12 | {
13 | }
14 |
15 | internal static SafeTopicConfigHandle Create()
16 | {
17 | var ch = LibRdKafka.topic_conf_new();
18 | if (ch.IsInvalid)
19 | {
20 | throw new Exception("Failed to create TopicConfig");
21 | }
22 | return ch;
23 | }
24 |
25 | protected override bool ReleaseHandle()
26 | {
27 | LibRdKafka.topic_conf_destroy(handle);
28 | return true;
29 | }
30 |
31 | internal IntPtr Dup() => LibRdKafka.topic_conf_dup(handle);
32 |
33 | // TODO: deduplicate, merge with other one
34 | internal Dictionary Dump()
35 | {
36 | UIntPtr cntp = (UIntPtr) 0;
37 | IntPtr data = LibRdKafka.topic_conf_dump(handle, out cntp);
38 |
39 | if (data == IntPtr.Zero)
40 | {
41 | throw new Exception("Zero data");
42 | }
43 |
44 | try
45 | {
46 | if (((int) cntp & 1) != 0)
47 | {
48 | // Expect Key -> Value, so even number of strings
49 | throw new Exception("Invalid number of config entries");
50 | }
51 |
52 | var dict = new Dictionary();
53 | for (int i = 0; i < (int) cntp / 2; i++)
54 | {
55 | dict.Add(Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, 2 * i * Marshal.SizeOf())),
56 | Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, (2 * i + 1) * Marshal.SizeOf())));
57 | }
58 | // Filter out callback pointers
59 | return dict.Where(kv => !kv.Key.EndsWith("_cb")).ToDictionary(kv => kv.Key, kv => kv.Value);
60 | }
61 | finally
62 | {
63 | LibRdKafka.conf_dump_free(data, cntp);
64 | }
65 | }
66 |
67 | internal void Set(string name, string value)
68 | {
69 | // TODO: Constant instead of 512?
70 | var errorStringBuilder = new StringBuilder(512);
71 | ConfRes res = LibRdKafka.topic_conf_set(handle, name, value,
72 | errorStringBuilder, (UIntPtr) errorStringBuilder.Capacity);
73 | if (res == ConfRes.Ok)
74 | {
75 | return;
76 | }
77 | else if (res == ConfRes.Invalid)
78 | {
79 | throw new InvalidOperationException(errorStringBuilder.ToString());
80 | }
81 | else if (res == ConfRes.Unknown)
82 | {
83 | throw new InvalidOperationException(errorStringBuilder.ToString());
84 | }
85 | else
86 | {
87 | throw new Exception("Unknown error while setting configuration property");
88 | }
89 | }
90 |
91 | internal string Get(string name)
92 | {
93 | UIntPtr destSize = (UIntPtr) 0;
94 | StringBuilder sb = null;
95 |
96 | ConfRes res = LibRdKafka.topic_conf_get(handle, name, null, ref destSize);
97 | if (res == ConfRes.Ok)
98 | {
99 | sb = new StringBuilder((int) destSize);
100 | res = LibRdKafka.topic_conf_get(handle, name, sb, ref destSize);
101 | }
102 | if (res != ConfRes.Ok)
103 | {
104 | if (res == ConfRes.Unknown)
105 | {
106 | throw new InvalidOperationException($"No such configuration property: {name}");
107 | }
108 | throw new Exception("Unknown error while getting configuration property");
109 | }
110 | return sb?.ToString();
111 | }
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/src/RdKafka/Internal/SafeTopicHandle.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 |
4 | namespace RdKafka.Internal
5 | {
6 | enum MsgFlags
7 | {
8 | MSG_F_FREE = 1,
9 | MSG_F_COPY = 2,
10 | MSG_F_BLOCK = 4
11 | }
12 |
13 | internal sealed class SafeTopicHandle : SafeHandleZeroIsInvalid
14 | {
15 | const int RD_KAFKA_PARTITION_UA = -1;
16 |
17 | internal SafeKafkaHandle kafkaHandle;
18 |
19 | private SafeTopicHandle() { }
20 |
21 | protected override bool ReleaseHandle()
22 | {
23 | LibRdKafka.topic_destroy(handle);
24 | // See SafeKafkaHandle.Topic
25 | kafkaHandle.DangerousRelease();
26 | return true;
27 | }
28 |
29 | internal string GetName() => Marshal.PtrToStringAnsi(LibRdKafka.topic_name(handle));
30 |
31 | internal long Produce(byte[] payload, int payloadCount, byte[] key, int keyCount, int partition, IntPtr opaque, bool blockIfQueueFull)
32 | => (long) LibRdKafka.produce(
33 | handle,
34 | partition,
35 | (IntPtr) (MsgFlags.MSG_F_COPY | (blockIfQueueFull ? MsgFlags.MSG_F_BLOCK : 0)),
36 | payload, (UIntPtr) payloadCount,
37 | key, (UIntPtr) keyCount,
38 | opaque);
39 |
40 | internal bool PartitionAvailable(int partition) => LibRdKafka.topic_partition_available(handle, partition);
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/RdKafka/Library.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 | using RdKafka.Internal;
4 |
5 | namespace RdKafka
6 | {
7 | ///
8 | /// Miscellaneous APIs for the RdKafka library itself.
9 | ///
10 | public static class Library
11 | {
12 | ///
13 | /// Returns the librdkafka version as integer.
14 | ///
15 | /// Interpreted as hex MM.mm.rr.xx:
16 | /// - MM = Major
17 | /// - mm = minor
18 | /// - rr = revision
19 | /// - xx = pre-release id (0xff is the final release)
20 | ///
21 | /// E.g.: 0x000901ff = 0.9.1
22 | ///
23 | public static int Version => (int) LibRdKafka.version();
24 |
25 | ///
26 | /// The librdkafka version as string.
27 | ///
28 | public static string VersionString =>
29 | Marshal.PtrToStringAnsi(LibRdKafka.version_str());
30 |
31 | ///
32 | /// List of the supported debug contexts.
33 | ///
34 | public static string[] DebugContexts =>
35 | Marshal.PtrToStringAnsi(LibRdKafka.get_debug_contexts()).Split(',');
36 |
37 | public static void SetLogLevel(int logLevel)
38 | {
39 | LibRdKafka.set_log_level(IntPtr.Zero, (IntPtr) logLevel);
40 | }
41 |
42 | ///
43 | /// Wait for all rdkafka objects to be destroyed.
44 | ///
45 | /// Returns if all kafka objects are now destroyed,
46 | /// or throws TimeoutException if the timeout was reached.
47 | ///
48 | /// Since RdKafka handle deletion is an async operation the
49 | /// WaitDestroyed() function can be used for applications where
50 | /// a clean shutdown is required.
51 | ///
52 | /// Timeout was reached before all objects were destroyed.
53 | public static void WaitDestroyed(TimeSpan timeout)
54 | {
55 | if ((long) LibRdKafka.wait_destroyed((IntPtr) timeout.TotalMilliseconds) != 0)
56 | {
57 | throw new TimeoutException();
58 | }
59 | }
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/RdKafka/Message.cs:
--------------------------------------------------------------------------------
1 | namespace RdKafka
2 | {
3 | public struct Message
4 | {
5 | public string Topic { get; set; }
6 | public int Partition { get; set; }
7 | public long Offset { get; set; }
8 | public byte[] Payload { get; set; }
9 | public byte[] Key { get; set; }
10 |
11 | public TopicPartitionOffset TopicPartitionOffset =>
12 | new TopicPartitionOffset()
13 | {
14 | Topic = Topic,
15 | Partition = Partition,
16 | Offset = Offset
17 | };
18 | }
19 |
20 | public struct MessageAndError
21 | {
22 | public Message Message;
23 | public ErrorCode Error;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/src/RdKafka/Metadata.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 |
3 | namespace RdKafka
4 | {
5 | public struct Metadata
6 | {
7 | public List Brokers { get; set; }
8 | public List Topics { get; set; }
9 | public int OriginatingBrokerId { get; set; }
10 | public string OriginatingBrokerName { get; set; }
11 | }
12 |
13 | public struct BrokerMetadata
14 | {
15 | public int BrokerId { get; set; }
16 | public string Host { get; set; }
17 | public int Port { get; set; }
18 | }
19 |
20 | public struct PartitionMetadata
21 | {
22 | public int PartitionId { get; set; }
23 | public int Leader { get; set; }
24 | public int[] Replicas { get; set; }
25 | public int[] InSyncReplicas { get; set; }
26 | public ErrorCode Error { get; set; }
27 | }
28 |
29 | public struct TopicMetadata
30 | {
31 | public string Topic { get; set; }
32 | public List Partitions { get; set; }
33 | public ErrorCode Error { get; set; }
34 | }
35 |
36 | public struct TopicPartition
37 | {
38 | public TopicPartition(string topic, int partition)
39 | {
40 | Topic = topic;
41 | Partition = partition;
42 | }
43 |
44 | public string Topic { get; set; }
45 | public int Partition { get; set; }
46 |
47 | public override string ToString() => Topic + " " + Partition;
48 | }
49 |
50 | public struct TopicPartitionOffset
51 | {
52 | public TopicPartitionOffset(string topic, int partition, long offset)
53 | {
54 | Topic = topic;
55 | Partition = partition;
56 | Offset = offset;
57 | }
58 |
59 | public string Topic { get; set; }
60 | public int Partition { get; set; }
61 | public long Offset { get; set; }
62 |
63 | public override string ToString() => Topic + " " + Partition + " " + Offset;
64 | }
65 |
66 | public struct Offsets
67 | {
68 | public long Low { get; set; }
69 | public long High { get; set; }
70 | }
71 |
72 | public struct GroupInfo
73 | {
74 | public BrokerMetadata Broker { get; set; } /**< Originating broker info */
75 | public string Group { get; set; } /**< Group name */
76 | public ErrorCode Error { get; set; } /**< Broker-originated error */
77 | public string State { get; set; } /**< Group state */
78 | public string ProtocolType { get; set; } /**< Group protocol type */
79 | public string Protocol { get; set; } /**< Group protocol */
80 | public List Members { get; set; } /**< Group members */
81 | }
82 |
83 | public struct GroupMemberInfo {
84 | public string MemberId { get; set; } /**< Member id (generated by broker) */
85 | public string ClientId { get; set; } /**< Client's \p client.id */
86 | public string ClientHost { get; set; } /**< Client's hostname */
87 | public byte[] MemberMetadata { get; set; } /**< Member metadata (binary),
88 | * format depends on \p protocol_type. */
89 | public byte[] MemberAssignment { get; set; } /**< Member assignment (binary),
90 | * format depends on \p protocol_type. */
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/src/RdKafka/Offset.cs:
--------------------------------------------------------------------------------
1 |
2 | namespace RdKafka
3 | {
4 | public static class Offset
5 | {
6 | ///
7 | /// Start consuming from beginning of kafka partition queue: oldest msg
8 | ///
9 | public const long Beginning = -2;
10 |
11 | ///
12 | /// Start consuming from end of kafka partition queue: next msg
13 | ///
14 | public const long End = -1;
15 |
16 | ///
17 | /// Start consuming from offset retrieved from offset store
18 | ///
19 | public const long Stored = -1000;
20 |
21 | ///
22 | /// Invalid offset
23 | ///
24 | public const long Invalid = -1001;
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/src/RdKafka/Producer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 | using RdKafka.Internal;
4 | using System.Collections.Concurrent;
5 |
6 | namespace RdKafka
7 | {
8 | ///
9 | /// High-level, asynchronous message producer.
10 | ///
11 | public class Producer : Handle
12 | {
13 | BlockingCollection topicPartitioners
14 | = new BlockingCollection();
15 |
16 | public Producer(string brokerList) : this(null, brokerList) {}
17 |
18 | public Producer(Config config, string brokerList = null)
19 | {
20 | config = config ?? new Config();
21 |
22 | IntPtr cfgPtr = config.handle.Dup();
23 | LibRdKafka.conf_set_dr_msg_cb(cfgPtr, DeliveryReportDelegate);
24 | Init(RdKafkaType.Producer, cfgPtr, config.Logger);
25 |
26 | if (brokerList != null)
27 | {
28 | handle.AddBrokers(brokerList);
29 | }
30 | }
31 |
32 | public Topic Topic(string topic, TopicConfig config = null)
33 | {
34 | LibRdKafka.PartitionerCallback partitionerDelegate;
35 | var kafkaTopic = new Topic(handle, this, topic, config, out partitionerDelegate);
36 | if (config?.CustomPartitioner != null)
37 | {
38 | // kafkaTopic may be GC before partitionerDelegate is called on all produced mesages
39 | // so we need to keep a reference to it.
40 | // We can't make it static in Topic as the partitioner will be different for each topic
41 | // we could make it in a static collection in Topic, but we can clear it when producer is closed,
42 | // (as it wait for all message to be produced)
43 | // so putting it in an instance collection allows us to free it eventually
44 |
45 | // it's not very effective for people creating a lot of topics
46 | // we should find a way to clear the list
47 | // when there is no more messages in queue related to the topic
48 | topicPartitioners.Add(partitionerDelegate);
49 | }
50 | return kafkaTopic;
51 | }
52 |
53 | // Explicitly keep reference to delegate so it stays alive
54 | private static readonly LibRdKafka.DeliveryReportCallback DeliveryReportDelegate = DeliveryReportCallback;
55 |
56 | private static void DeliveryReportCallback(IntPtr rk, ref rd_kafka_message rkmessage, IntPtr opaque)
57 | {
58 | // msg_opaque was set by Topic.Produce
59 | var gch = GCHandle.FromIntPtr(rkmessage._private);
60 | var deliveryHandler = (IDeliveryHandler) gch.Target;
61 | gch.Free();
62 |
63 | if (rkmessage.err != 0)
64 | {
65 | deliveryHandler.SetException(
66 | RdKafkaException.FromErr(
67 | rkmessage.err,
68 | "Failed to produce message"));
69 | return;
70 | }
71 |
72 | deliveryHandler.SetResult(new DeliveryReport {
73 | Offset = rkmessage.offset,
74 | Partition = rkmessage.partition
75 | });
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/src/RdKafka/RdKafka.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | b2ddb635-4423-45d7-b3dc-f701e6010868
10 | RdKafka
11 | .\obj
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
--------------------------------------------------------------------------------
/src/RdKafka/RdKafkaException.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 | using RdKafka.Internal;
4 |
5 | namespace RdKafka
6 | {
7 | public class RdKafkaException : Exception
8 | {
9 | public RdKafkaException(string message, ErrorCode errorCode)
10 | : base(message)
11 | {
12 | ErrorCode = errorCode;
13 | }
14 |
15 | internal static string ErrorToString(ErrorCode errorCode) => Marshal.PtrToStringAnsi(LibRdKafka.err2str(errorCode));
16 |
17 | internal static RdKafkaException FromErr(ErrorCode err, string message)
18 | {
19 | var errorMessage = $"Error {err} - {ErrorToString(err)}";
20 | if (message == null)
21 | {
22 | return new RdKafkaException(errorMessage, err);
23 | }
24 | else
25 | {
26 | return new RdKafkaException($"{message} ({errorMessage})", err);
27 | }
28 | }
29 |
30 | public ErrorCode ErrorCode { get; }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/RdKafka/Topic.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Runtime.InteropServices;
3 | using System.Threading.Tasks;
4 | using RdKafka.Internal;
5 | using System.Collections.Concurrent;
6 |
7 | namespace RdKafka
8 | {
9 | public struct DeliveryReport
10 | {
11 | public int Partition;
12 | public long Offset;
13 | }
14 |
15 | ///
16 | /// Handle to a topic obtained from .
17 | ///
18 | public class Topic : IDisposable
19 | {
20 | private sealed class TaskDeliveryHandler : TaskCompletionSource, IDeliveryHandler
21 | {
22 | }
23 |
24 | const int RD_KAFKA_PARTITION_UA = -1;
25 |
26 | internal readonly SafeTopicHandle handle;
27 | readonly Producer producer;
28 |
29 |
30 | internal Topic(SafeKafkaHandle kafkaHandle, Producer producer, string topic, TopicConfig config,
31 | out LibRdKafka.PartitionerCallback partitionerDelegate)
32 | {
33 | // PartitionerDelegate is an out parameter as its reference must be kept outside of Topic
34 | // it may be called after topic is GC, and it may be different for different topics
35 | // so we can't simply make it static here
36 | this.producer = producer;
37 |
38 | config = config ?? new TopicConfig();
39 | config["produce.offset.report"] = "true";
40 | IntPtr configPtr = config.handle.Dup();
41 |
42 | if (config.CustomPartitioner != null)
43 | {
44 | partitionerDelegate = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt,
45 | IntPtr rkt_opaque, IntPtr msg_opaque) =>
46 | {
47 | byte[] key = null;
48 | if (keydata != IntPtr.Zero)
49 | {
50 | key = new byte[(int) keylen];
51 | Marshal.Copy(keydata, key, 0, (int) keylen);
52 | }
53 | return config.CustomPartitioner(this, key, partition_cnt);
54 | };
55 | LibRdKafka.topic_conf_set_partitioner_cb(configPtr, partitionerDelegate);
56 | }
57 | else
58 | {
59 | partitionerDelegate = null;
60 | }
61 |
62 | handle = kafkaHandle.Topic(topic, configPtr);
63 | }
64 |
65 | public void Dispose()
66 | {
67 | handle.Dispose();
68 | }
69 |
70 | public string Name => handle.GetName();
71 |
72 | public Task Produce(byte[] payload, byte[] key = null, Int32 partition = RD_KAFKA_PARTITION_UA, bool blockIfQueueFull = true)
73 | {
74 | return Produce(payload, payload?.Length ?? 0, key, key?.Length ?? 0, partition, blockIfQueueFull);
75 | }
76 |
77 | public Task Produce(byte[] payload, int payloadCount, byte[] key = null, int keyCount = 0, Int32 partition = RD_KAFKA_PARTITION_UA, bool blockIfQueueFull = true)
78 | {
79 | // Passes the TaskCompletionSource to the delivery report callback
80 | // via the msg_opaque pointer
81 | var deliveryCompletionSource = new TaskDeliveryHandler();
82 | Produce(payload, payloadCount, key, keyCount, partition, deliveryCompletionSource, blockIfQueueFull);
83 | return deliveryCompletionSource.Task;
84 | }
85 |
86 | ///
87 | /// Produces a keyed message to a partition of the current Topic and notifies the caller of progress via a callback interface.
88 | ///
89 | /// Payload to send to Kafka. Can be null.
90 | /// IDeliveryHandler implementation used to notify the caller when the given produce request completes or an error occurs.
91 | /// (Optional) The key associated with (or null if no key is specified).
92 | /// (Optional) The topic partition to which will be sent (or -1 if no partition is specified).
93 | /// Thrown if is null.
94 | /// Methods of will be executed in an RdKafka-internal thread and will block other operations - consider this when implementing IDeliveryHandler.
95 | /// Use this overload for high-performance use cases as it does not use TPL and reduces the number of allocations.
96 | public void Produce(byte[] payload, IDeliveryHandler deliveryHandler, byte[] key = null, Int32 partition = RD_KAFKA_PARTITION_UA, bool blockIfQueueFull = true)
97 | {
98 | Produce(payload, payload?.Length ?? 0, deliveryHandler, key, key?.Length ?? 0, partition, blockIfQueueFull);
99 | }
100 |
101 | ///
102 | /// Produces a keyed message to a partition of the current Topic and notifies the caller of progress via a callback interface.
103 | ///
104 | /// Payload to send to Kafka. Can be null.
105 | /// Number of bytes to use from payload buffer
106 | /// IDeliveryHandler implementation used to notify the caller when the given produce request completes or an error occurs.
107 | /// (Optional) The key associated with (or null if no key is specified).
108 | /// Number of bytes to use from key buffer
109 | /// (Optional) The topic partition to which will be sent (or -1 if no partition is specified).
110 | /// Thrown if is null.
111 | /// Methods of will be executed in an RdKafka-internal thread and will block other operations - consider this when implementing IDeliveryHandler.
112 | /// Use this overload for high-performance use cases as it does not use TPL and reduces the number of allocations.
113 | public void Produce(byte[] payload, int payloadCount, IDeliveryHandler deliveryHandler, byte[] key = null, int keyCount = 0, Int32 partition = RD_KAFKA_PARTITION_UA, bool blockIfQueueFull = true)
114 | {
115 | if (deliveryHandler == null)
116 | throw new ArgumentNullException(nameof(deliveryHandler));
117 | Produce(payload, payloadCount, key, keyCount, partition, deliveryHandler, blockIfQueueFull);
118 | }
119 |
120 |
121 | private void Produce(byte[] payload, int payloadCount, byte[] key, int keyCount, Int32 partition, object deliveryHandler, bool blockIfQueueFull)
122 | {
123 | var gch = GCHandle.Alloc(deliveryHandler);
124 | var ptr = GCHandle.ToIntPtr(gch);
125 |
126 | if (handle.Produce(payload, payloadCount, key, keyCount, partition, ptr, blockIfQueueFull) != 0)
127 | {
128 | var err = LibRdKafka.last_error();
129 | gch.Free();
130 | throw RdKafkaException.FromErr(err, "Could not produce message");
131 | }
132 | }
133 |
134 | ///
135 | /// Check if partition is available (has a leader broker).
136 | ///
137 | /// Return true if the partition is available, else false.
138 | ///
139 | /// This function must only be called from inside a partitioner function.
140 | ///
141 | public bool PartitionAvailable(int partition) => handle.PartitionAvailable(partition);
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/src/RdKafka/TopicConfig.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 | using RdKafka.Internal;
3 |
4 | namespace RdKafka
5 | {
6 | ///
7 | /// Topic-specific configuration.
8 | ///
9 | public class TopicConfig
10 | {
11 | internal readonly SafeTopicConfigHandle handle;
12 |
13 | public TopicConfig()
14 | {
15 | handle = SafeTopicConfigHandle.Create();
16 | }
17 |
18 | ///
19 | /// Dump all configuration names and values into a dictionary.
20 | ///
21 | public Dictionary Dump() => handle.Dump();
22 |
23 | ///
24 | /// Get or set a configuration value directly.
25 | ///
26 | /// See CONFIGURATION.md for the full list of supported properties.
27 | ///
28 | /// The configuration property name.
29 | /// The configuration property value.
30 | /// is invalid.
31 | /// Configuration property does not exist.
32 | public string this[string name]
33 | {
34 | set
35 | {
36 | handle.Set(name, value);
37 | }
38 | get
39 | {
40 | return handle.Get(name);
41 | }
42 | }
43 |
44 | ///
45 | /// The partitioner may be called in any thread at any time,
46 | /// it may be called multiple times for the same message/key.
47 | ///
48 | /// Partitioner function constraints:
49 | /// - MUST NOT call any RdKafka methods except for
50 | /// Topic.PartitionAvailable
51 | /// - MUST NOT block or execute for prolonged periods of time.
52 | /// - MUST return a value between 0 and partition_cnt-1, or the
53 | /// special RD_KAFKA_PARTITION_UA
54 | /// value if partitioning could not be performed.
55 | ///
56 | public delegate int Partitioner(Topic topic, byte[] key, int partitionCount);
57 |
58 | ///
59 | /// Sets a custom Partitioner
60 | /// delegate to control assignment of messages to partitions.
61 | ///
62 | /// See Topic.Produce for details.
63 | ///
64 | public Partitioner CustomPartitioner { get; set; }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/RdKafka/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.9.2-*",
3 |
4 | "packOptions": {
5 | "description": "C# Apache Kafka client",
6 | "authors": ["Andreas Heider"],
7 | "tags": ["kafka", "rdkafka"]
8 | },
9 |
10 | "dependencies": {
11 | "RdKafka.Internal.librdkafka": "0.9.2-ci-28"
12 | },
13 |
14 | "frameworks": {
15 | "net451": { },
16 | "netstandard1.3": {
17 | "dependencies": {
18 | "System.Collections.Concurrent": "4.3.0",
19 | "System.Console": "4.3.0",
20 | "System.Linq": "4.3.0",
21 | "System.Runtime.InteropServices": "4.3.0"
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/test/RdKafka.Tests/ConfigTests.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using Xunit;
4 | using RdKafka;
5 |
6 | namespace RdKafka.Tests
7 | {
8 | public class ConfigTests
9 | {
10 | [Fact]
11 | public void SetAndGetParameterWorks()
12 | {
13 | var config = new Config();
14 | config["client.id"] = "test";
15 | Assert.Equal(config["client.id"], "test");
16 | }
17 |
18 | [Fact]
19 | public void SettingUnknownParameterThrows()
20 | {
21 | var config = new Config();
22 | Assert.Throws(() => config["unknown"] = "something");
23 | }
24 |
25 | [Fact]
26 | public void SettingParameterToInvalidValueThrows()
27 | {
28 | var config = new Config();
29 | Assert.Throws(() => config["session.timeout.ms"] = "string");
30 | }
31 |
32 | [Fact]
33 | public void GettingUnknownParameterThrows()
34 | {
35 | var config = new Config();
36 | Assert.Throws(() => config["unknown"]);
37 | }
38 |
39 | [Fact]
40 | public void DumpedConfigLooksReasonable()
41 | {
42 | var config = new Config();
43 | config["client.id"] = "test";
44 | Dictionary dump = config.Dump();
45 | Assert.Equal(dump["client.id"], "test");
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/test/RdKafka.Tests/RdKafka.Tests.xproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 14.0
5 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)
6 |
7 |
8 |
9 | 33151be2-c10b-41bc-8c5e-e55211a1722d
10 | RdKafka.Tests
11 | ..\..\artifacts\obj\$(MSBuildProjectName)
12 | .\bin\
13 |
14 |
15 | 2.0
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/test/RdKafka.Tests/project.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "RdKafka": {
4 | "target": "project"
5 | }
6 | },
7 |
8 | "frameworks" : {
9 | "netcoreapp1.0": {
10 | "dependencies": {
11 | "Microsoft.NETCore.App": {
12 | "type": "platform",
13 | "version": "1.0.0"
14 | },
15 | "xunit": "2.1.0",
16 | "dotnet-test-xunit": "1.0.0-rc2-build10025"
17 | },
18 | "imports": [
19 | "dnxcore50",
20 | "portable-net45+win8"
21 | ]
22 | }
23 | },
24 | "testRunner": "xunit"
25 | }
26 |
27 |
--------------------------------------------------------------------------------