├── .gitattributes
├── .gitignore
├── LICENSE
├── README.md
└── src
├── .nuget
├── NuGet.Config
├── NuGet.exe
└── NuGet.targets
├── KafkaNetClient.sln
├── KafkaNetClient
├── BrokerRouter.cs
├── Common
│ ├── AsyncCollection.cs
│ ├── AsyncLock.cs
│ ├── AsyncManualResetEvent.cs
│ ├── BigEndianBinaryReader.cs
│ ├── BigEndianBinaryWriter.cs
│ ├── ConcurrentCircularBuffer.cs
│ ├── Crc32Provider.cs
│ ├── Extensions.cs
│ ├── KafkaMessagePacker.cs
│ ├── ScheduledTimer.cs
│ └── TaskExtensions.cs
├── Consumer.cs
├── Default
│ ├── DefaultKafkaConnectionFactory.cs
│ ├── DefaultPartitionSelector.cs
│ └── DefaultTraceLog.cs
├── Interfaces
│ ├── IBrokerRouter.cs
│ ├── IKafkaConnection.cs
│ ├── IKafkaConnectionFactory.cs
│ ├── IKafkaLog.cs
│ ├── IKafkaRequest.cs
│ ├── IKafkaTcpSocket.cs
│ ├── IManualConsumer.cs
│ ├── IMetadataQueries.cs
│ └── IPartitionSelector.cs
├── KafkaConnection.cs
├── KafkaMetadataProvider.cs
├── KafkaNetClient.csproj
├── KafkaNetClient.nuspec
├── KafkaTcpSocket.cs
├── ManualConsumer.cs
├── MetadataQueries.cs
├── Model
│ ├── BrokerRoute.cs
│ ├── ConsumerOptions.cs
│ ├── KafkaEndpoint.cs
│ ├── KafkaOptions.cs
│ └── StatisticsTrackerOptions.cs
├── Producer.cs
├── Properties
│ └── AssemblyInfo.cs
├── Protocol
│ ├── BaseRequest.cs
│ ├── Broker.cs
│ ├── ConsumerMetadataRequest.cs
│ ├── FetchRequest.cs
│ ├── IBaseResponse.cs
│ ├── Message.cs
│ ├── MetadataRequest.cs
│ ├── OffsetCommitRequest.cs
│ ├── OffsetFetchRequest.cs
│ ├── OffsetRequest.cs
│ ├── ProduceRequest.cs
│ ├── Protocol.cs
│ └── Topic.cs
├── ProtocolGateway.cs
└── Statistics
│ └── StatisticsTracker.cs
├── StatisticsTestLoader
├── Configuration.cs
├── ConsoleLogger.cs
├── DestinationKafka.cs
├── IRecordSource.cs
├── KafkaRecord.cs
├── Program.cs
├── Properties
│ └── AssemblyInfo.cs
├── SourcePropertyChanges.cs
├── StatisticsTestLoader.csproj
└── packages.config
├── TestHarness
├── App.config
├── Program.cs
├── Properties
│ └── AssemblyInfo.cs
├── TestHarness.csproj
└── packages.config
└── kafka-tests
├── App.config
├── Fakes
├── BrokerRouterProxy.cs
├── FakeBrokerRouter.cs
├── FakeKafkaConnection.cs
└── FakeTcpServer.cs
├── Helpers
├── IntegrationConfig.cs
├── MessageHelper.cs
└── TaskTest.cs
├── Integration
├── GzipProducerConsumerTests.cs
├── KafkaConnectionIntegrationTests.cs
├── KafkaMetadataProviderUnitTests.cs
├── ManualConsumerTests.cs
├── ManualTesting.cs
├── OffsetManagementTests.cs
├── ProducerConsumerIntegrationTests.cs
├── ProducerIntegrationTests.cs
└── ProtocolGatewayTest.cs
├── Properties
└── AssemblyInfo.cs
├── RequestFactory.cs
├── Unit
├── AsyncCollectionTests.cs
├── AsyncLockTests.cs
├── BigEndianBinaryReaderTests.cs
├── BigEndianBinaryWriterTests.cs
├── BinaryFormatterSerializationTests.cs
├── BrokerRouterTests.cs
├── CircularBufferTests.cs
├── ConsumerTests.cs
├── DefaultPartitionSelectorTests.cs
├── FakeTcpServerTests.cs
├── KafkaConnectionTests.cs
├── KafkaEndpointTests.cs
├── KafkaMetadataProviderTests.cs
├── KafkaTcpSocketTests.cs
├── MetadataQueriesTests.cs
├── ProducerTests.cs
├── ProtocolBaseRequestTests.cs
├── ProtocolGatewayTest.cs
├── ProtocolMessageTests.cs
├── ProtocolTests.cs
└── ScheduleTimerTests.cs
├── kafka-tests.csproj
└── packages.config
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 | *.sln merge=union
7 | *.csproj merge=union
8 | *.vbproj merge=union
9 | *.fsproj merge=union
10 | *.dbproj merge=union
11 |
12 | # Standard to msysgit
13 | *.doc diff=astextplain
14 | *.DOC diff=astextplain
15 | *.docx diff=astextplain
16 | *.DOCX diff=astextplain
17 | *.dot diff=astextplain
18 | *.DOT diff=astextplain
19 | *.pdf diff=astextplain
20 | *.PDF diff=astextplain
21 | *.rtf diff=astextplain
22 | *.RTF diff=astextplain
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | #################
2 | ## Eclipse
3 | #################
4 |
5 | *.pydevproject
6 | .project
7 | .metadata
8 | bin/
9 | tmp/
10 | *.tmp
11 | *.bak
12 | *.swp
13 | *~.nib
14 | local.properties
15 | .classpath
16 | .settings/
17 | .loadpath
18 |
19 | # External tool builders
20 | .externalToolBuilders/
21 |
22 | # Locally stored "Eclipse launch configurations"
23 | *.launch
24 |
25 | # CDT-specific
26 | .cproject
27 |
28 | # PDT-specific
29 | .buildpath
30 |
31 | # FAKE
32 | .build
33 | .tools
34 | .deploy
35 |
36 |
37 | #################
38 | ## Visual Studio
39 | #################
40 |
41 | ## Ignore Visual Studio temporary files, build results, and
42 | ## files generated by popular Visual Studio add-ons.
43 |
44 | # User-specific files
45 | *.suo
46 | *.user
47 | *.sln.docstates
48 | *.sln.ide/
49 |
50 | # Build results
51 |
52 | [Dd]ebug/
53 | [Rr]elease/
54 | x64/
55 | build/
56 | [Bb]in/
57 | [Oo]bj/
58 |
59 | # MSTest test Results
60 | [Tt]est[Rr]esult*/
61 | [Bb]uild[Ll]og.*
62 |
63 | *_i.c
64 | *_p.c
65 | *.ilk
66 | *.meta
67 | *.obj
68 | *.pch
69 | *.pdb
70 | *.pgc
71 | *.pgd
72 | *.rsp
73 | *.sbr
74 | *.tlb
75 | *.tli
76 | *.tlh
77 | *.tmp
78 | *.tmp_proj
79 | *.log
80 | *.vspscc
81 | *.vssscc
82 | .builds
83 | *.pidb
84 | *.log
85 | *.scc
86 |
87 | # Visual C++ cache files
88 | ipch/
89 | *.aps
90 | *.ncb
91 | *.opensdf
92 | *.sdf
93 | *.cachefile
94 |
95 | # Visual Studio profiler
96 | *.psess
97 | *.vsp
98 | *.vspx
99 |
100 | # Guidance Automation Toolkit
101 | *.gpState
102 |
103 | # ReSharper is a .NET coding add-in
104 | _ReSharper*/
105 | *.[Rr]e[Ss]harper
106 |
107 | # TeamCity is a build add-in
108 | _TeamCity*
109 |
110 | # DotCover is a Code Coverage Tool
111 | *.dotCover
112 |
113 | # NCrunch
114 | *.ncrunch*
115 | .*crunch*.local.xml
116 |
117 | # Installshield output folder
118 | [Ee]xpress/
119 |
120 | # DocProject is a documentation generator add-in
121 | DocProject/buildhelp/
122 | DocProject/Help/*.HxT
123 | DocProject/Help/*.HxC
124 | DocProject/Help/*.hhc
125 | DocProject/Help/*.hhk
126 | DocProject/Help/*.hhp
127 | DocProject/Help/Html2
128 | DocProject/Help/html
129 |
130 | # Click-Once directory
131 | publish/
132 |
133 | # Publish Web Output
134 | *.Publish.xml
135 | *.pubxml
136 |
137 | # NuGet Packages Directory
138 | ## TODO: If you have NuGet Package Restore enabled, uncomment the next line
139 | packages/
140 |
141 | # Windows Azure Build Output
142 | csx
143 | *.build.csdef
144 |
145 | # Windows Store app package directory
146 | AppPackages/
147 |
148 | # Others
149 | sql/
150 | *.Cache
151 | ClientBin/
152 | [Ss]tyle[Cc]op.*
153 | ~$*
154 | *~
155 | *.dbmdl
156 | *.[Pp]ublish.xml
157 | *.pfx
158 | *.publishsettings
159 |
160 | # RIA/Silverlight projects
161 | Generated_Code/
162 |
163 | # Backup & report files from converting an old project file to a newer
164 | # Visual Studio version. Backup files are not needed, because we have git ;-)
165 | _UpgradeReport_Files/
166 | Backup*/
167 | UpgradeLog*.XML
168 | UpgradeLog*.htm
169 |
170 | # SQL Server files
171 | App_Data/*.mdf
172 | App_Data/*.ldf
173 |
174 | #############
175 | ## Windows detritus
176 | #############
177 |
178 | # Windows image file caches
179 | Thumbs.db
180 | ehthumbs.db
181 |
182 | # Folder config file
183 | Desktop.ini
184 |
185 | # Recycle Bin used on file shares
186 | $RECYCLE.BIN/
187 |
188 | # Mac crap
189 | .DS_Store
190 |
191 |
192 | #############
193 | ## Python
194 | #############
195 |
196 | *.py[co]
197 |
198 | # Packages
199 | *.egg
200 | *.egg-info
201 | dist/
202 | build/
203 | eggs/
204 | parts/
205 | var/
206 | sdist/
207 | develop-eggs/
208 | .installed.cfg
209 |
210 | # Installer logs
211 | pip-log.txt
212 |
213 | # Unit test / coverage reports
214 | .coverage
215 | .tox
216 |
217 | #Translations
218 | *.mo
219 |
220 | #Mr Developer
221 | .mr.developer.cfg
222 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | KafkaNetClient
2 | =========
3 |
4 | Native C# client for Apache Kafka.
5 |
6 | License
7 | -----------
8 | Copyright 2015, Gigya Inc under Apache License, V2.0. See LICENSE file.
9 |
10 | Summary
11 | -----------
12 |
13 | This library is a fork of Jroland's [kafka-net](https://github.com/Jroland/kafka-net) library, with adjustments and improvements (not interchangeable with kafka-net, as there are breaking changes).
14 |
15 | The original project is a .NET implementation of the [Apache Kafka] protocol. The wire protocol portion is based on the [kafka-python] library writen by [David Arthur] and the general class layout attempts to follow a similar pattern as his project. To that end, this project builds up from the low level KafkaConnection object for handling async requests to/from the kafka server, all the way up to a higher level Producer/Consumer classes.
16 |
17 | ##### Improvements and Changes:
18 |
19 | - All the code is now async all the way and all blocking operations were removed (**except for the high-level `Consumer` class**).
20 | - `ProtocolGateway`:
21 | * New class that allows simple handling of Kafka protocol messages with error recovery and metadata refreshes.
22 | - `BrokerRouter`:
23 | * Breaking changes were made in order to make it async all-the-way.
24 | * Interface was changed to allow greater control working with broker metadata.
25 | - `Producer`:
26 | * Was refactored to use `ProtocolGateway` when sending messages to Kafka (for better error recovery).
27 | - `ManualConsumer`:
28 | * New class (uses `ProtocolGateway`) that allows simple fetches from Kafka brokers (in contrast to the high-level `Consumer` class that includes internal caching and other optimizations).
29 | - Bug fixes
30 | * When sending messages to the same partition with the same Ack level, order is guaranteed.
31 |
32 |
33 |
34 |
35 |
36 | Examples
37 | -----------
38 | ##### Producer
39 | ```sh
40 | var options = new KafkaOptions(new Uri("http://SERVER1:9092"), new Uri("http://SERVER2:9092"));
41 | var router = new BrokerRouter(options);
42 | using(var client = new Producer(router))
43 | {
44 | await client.SendMessageAsync("TestTopic", new Message("hello world"));
45 | }
46 |
47 |
48 | ```
49 | ##### Consumer
50 | ```sh
51 | var options = new KafkaOptions(new Uri("http://SERVER1:9092"), new Uri("http://SERVER2:9092"));
52 | var router = new BrokerRouter(options);
53 | var consumer = new Consumer(new ConsumerOptions("TestHarness", new BrokerRouter(options)));
54 |
55 | //Consume returns a blocking IEnumerable (ie: never ending stream)
56 | foreach (var message in consumer.Consume())
57 | {
58 | Console.WriteLine("Response: P{0},O{1} : {2}",
59 | message.Meta.PartitionId, message.Meta.Offset, message.Value);
60 | }
61 | ```
62 |
63 | ##### TestHarness
64 | The TestHarness project it a simple example console application that will read message from a kafka server and write them to the screen. It will also take anything typed in the console and send this as a message to the kafka servers.
65 |
66 | Simply modify the kafka server Uri in the code to point to a functioning test server.
67 |
68 |
69 | Pieces of the Puzzle
70 | -----------
71 | ##### Protocol
72 | The protocol has been divided up into concrete classes for each request/response pair. Each class knows how to encode and decode itself into/from their appropriate Kafka protocol byte array. One benefit of this is that it allows for a nice generic send method on the KafkaConnection.
73 |
74 | ##### KafkaConnection
75 | Provides async methods on a persistent connection to a kafka broker (server). The send method uses the TcpClient send async function and the read stream has a dedicated thread which uses the correlation Id to match send responses to the correct request.
76 |
77 | ##### BrokerRouter
78 | Provides metadata based routing of messages to the correct Kafka partition. This class also manages the multiple KafkaConnections for each Kafka server returned by the broker section in the metadata response. Routing logic is provided by the IPartitionSelector.
79 |
80 | ##### ProtocolGateway
81 | A convenience class that allows sending Kafka protocol messages easily, including error handling and metadata refresh on failure.
82 |
83 | ##### IPartitionSelector
84 | Provides the logic for routing which partition the BrokerRouter should choose. The default selector is the DefaultPartitionSelector which will use round robin partition selection if the key property on the message is null and a mod/hash of the key value if present.
85 |
86 | ##### Producer
87 | Provides a higher level class which uses the combination of the BrokerRouter and KafkaConnection to send batches of messages to a Kafka broker.
88 |
89 | ##### Consumer
90 | Provides a higher level class which will consumer messages from a whitelist of partitions from a single topic. The consumption mechanism is a blocking IEnumerable of messages. If no whitelist is provided then all partitions will be consumed creating one KafkaConnection for each partition leader.
91 |
92 | #### ManualConsumer
93 | A class which enables simple manual consuming of messages which encapsulates the Kafka protocol details and enables message fetching, offset fetching, and offset updating. All of this operations are on demand.
94 |
95 | Status
96 | -----------
97 | Tested with Kafka 0.8.2.
98 |
99 | This library is still work in progress and was still not deployed to production. We will update when it does.
100 |
101 |
102 | ##### The major items that needs work are:
103 | * Better handling of options for providing customization of internal behaviour of the base API. (right now the classes pass around option parameters)
104 | * General structure of the classes is not finalized and breaking changes will occur.
105 | * Only Gzip compression is implemented, snappy on the todo.
106 | * Currently only works with .NET Framework 4.5 as it uses the await command.
107 | * Test coverage.
108 | * Documentation.
109 |
110 |
111 |
112 |
113 |
114 | [kafka-python]:https://github.com/mumrah/kafka-python
115 | [Apache Kafka]:http://kafka.apache.org
116 | [David Arthur]:https://github.com/mumrah
--------------------------------------------------------------------------------
/src/.nuget/NuGet.Config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/.nuget/NuGet.exe:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gigya/KafkaNetClient/56814644451c238624165c76bf8e7159578d1e14/src/.nuget/NuGet.exe
--------------------------------------------------------------------------------
/src/KafkaNetClient.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 2013
4 | VisualStudioVersion = 12.0.40629.0
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaNetClient", "KafkaNetClient\KafkaNetClient.csproj", "{1343EB68-55CB-4452-8386-24A9989DE1C0}"
7 | EndProject
8 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "kafka-tests", "kafka-tests\kafka-tests.csproj", "{D80AE407-BB81-4C11-BFDC-5DD463F8B1BF}"
9 | EndProject
10 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TestHarness", "TestHarness\TestHarness.csproj", "{53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}"
11 | EndProject
12 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = ".nuget", ".nuget", "{106F20D4-E22F-4C73-9D48-7F38E2A77163}"
13 | ProjectSection(SolutionItems) = preProject
14 | .nuget\NuGet.Config = .nuget\NuGet.Config
15 | .nuget\NuGet.exe = .nuget\NuGet.exe
16 | .nuget\NuGet.targets = .nuget\NuGet.targets
17 | EndProjectSection
18 | EndProject
19 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{F4C67622-F19D-4D3D-9C60-C2A9E717A197}"
20 | ProjectSection(SolutionItems) = preProject
21 | ..\.gitignore = ..\.gitignore
22 | ..\LICENSE = ..\LICENSE
23 | ..\README.md = ..\README.md
24 | EndProjectSection
25 | EndProject
26 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Testing", "Testing", "{204BDC88-0B3E-4E9D-9140-CF0DDF184FE8}"
27 | EndProject
28 | Global
29 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
30 | Debug|Any CPU = Debug|Any CPU
31 | Release|Any CPU = Release|Any CPU
32 | EndGlobalSection
33 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
34 | {1343EB68-55CB-4452-8386-24A9989DE1C0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
35 | {1343EB68-55CB-4452-8386-24A9989DE1C0}.Debug|Any CPU.Build.0 = Debug|Any CPU
36 | {1343EB68-55CB-4452-8386-24A9989DE1C0}.Release|Any CPU.ActiveCfg = Release|Any CPU
37 | {1343EB68-55CB-4452-8386-24A9989DE1C0}.Release|Any CPU.Build.0 = Release|Any CPU
38 | {D80AE407-BB81-4C11-BFDC-5DD463F8B1BF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
39 | {D80AE407-BB81-4C11-BFDC-5DD463F8B1BF}.Debug|Any CPU.Build.0 = Debug|Any CPU
40 | {D80AE407-BB81-4C11-BFDC-5DD463F8B1BF}.Release|Any CPU.ActiveCfg = Release|Any CPU
41 | {D80AE407-BB81-4C11-BFDC-5DD463F8B1BF}.Release|Any CPU.Build.0 = Release|Any CPU
42 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
43 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}.Debug|Any CPU.Build.0 = Debug|Any CPU
44 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}.Release|Any CPU.ActiveCfg = Release|Any CPU
45 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}.Release|Any CPU.Build.0 = Release|Any CPU
46 | EndGlobalSection
47 | GlobalSection(SolutionProperties) = preSolution
48 | HideSolutionNode = FALSE
49 | EndGlobalSection
50 | GlobalSection(NestedProjects) = preSolution
51 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0} = {204BDC88-0B3E-4E9D-9140-CF0DDF184FE8}
52 | EndGlobalSection
53 | EndGlobal
54 |
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/AsyncCollection.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Threading;
5 | using System.Threading.Tasks;
6 |
7 | namespace KafkaNet.Common
8 | {
9 | public class AsyncCollection
10 | {
11 | private readonly object _lock = new object();
12 | private readonly AsyncManualResetEvent _dataAvailableEvent = new AsyncManualResetEvent();
13 | private readonly ConcurrentQueue _queue = new ConcurrentQueue();
14 | private long _dataInBufferCount = 0;
15 |
16 | public int Count
17 | {
18 | get { return _queue.Count + (int)Interlocked.Read(ref _dataInBufferCount); }
19 | }
20 |
21 | public bool IsCompleted { get; private set; }
22 |
23 | public void CompleteAdding()
24 | {
25 | IsCompleted = true;
26 | }
27 |
28 | public Task OnHasDataAvailable(CancellationToken token)
29 | {
30 | return _dataAvailableEvent.WaitAsync().WithCancellation(token);
31 | }
32 |
33 | public Task OnHasDataAvailablebool(CancellationToken token)
34 | {
35 | return _dataAvailableEvent.WaitAsync().WithCancellationBool(token);
36 | }
37 |
38 | public void Add(T data)
39 | {
40 | if (IsCompleted)
41 | {
42 | throw new ObjectDisposedException("AsyncCollection has been marked as complete. No new documents can be added.");
43 | }
44 |
45 | _queue.Enqueue(data);
46 |
47 | TriggerDataAvailability();
48 | }
49 |
50 | public void AddRange(IEnumerable data)
51 | {
52 | if (IsCompleted)
53 | {
54 | throw new ObjectDisposedException("AsyncCollection has been marked as complete. No new documents can be added.");
55 | }
56 |
57 | foreach (var item in data)
58 | {
59 | _queue.Enqueue(item);
60 | }
61 |
62 | TriggerDataAvailability();
63 | }
64 |
65 | public T Pop()
66 | {
67 | T data;
68 | return TryTake(out data) ? data : default(T);
69 | }
70 |
71 | public async Task> TakeAsync(int count, TimeSpan timeout, CancellationToken token)
72 | {
73 | var batch = new List(count);
74 | var timeoutTask = Task.Delay(timeout, token);
75 |
76 | try
77 | {
78 | do
79 | {
80 | T data;
81 | while (TryTake(out data))
82 | {
83 | batch.Add(data);
84 | Interlocked.Increment(ref _dataInBufferCount);
85 | if (--count <= 0 || timeoutTask.IsCompleted) return batch;
86 | }
87 | } while (await Task.WhenAny(_dataAvailableEvent.WaitAsync(), timeoutTask).ConfigureAwait(false) != timeoutTask);
88 |
89 | return batch;
90 | }
91 | catch
92 | {
93 | return batch;
94 | }
95 | finally
96 | {
97 | Interlocked.Add(ref _dataInBufferCount, -1 * batch.Count);
98 | }
99 | }
100 |
101 | public void DrainAndApply(Action appliedFunc)
102 | {
103 | var nb = _queue.Count;
104 | for (int i = 0; i < nb; i++)
105 | {
106 | T data;
107 | if (!_queue.TryDequeue(out data))
108 | break;
109 | appliedFunc(data);
110 | }
111 |
112 | TriggerDataAvailability();
113 | }
114 |
115 | public IEnumerable Drain()
116 | {
117 | T data;
118 | while (_queue.TryDequeue(out data))
119 | {
120 | yield return data;
121 | }
122 |
123 | TriggerDataAvailability();
124 | }
125 |
126 | public bool TryTake(out T data)
127 | {
128 | try
129 | {
130 | return _queue.TryDequeue(out data);
131 | }
132 | finally
133 | {
134 | if (_queue.IsEmpty) TriggerDataAvailability();
135 | }
136 | }
137 |
138 | private void TriggerDataAvailability()
139 | {
140 | if (_queue.IsEmpty && _dataAvailableEvent.IsOpen)
141 | {
142 | lock (_lock)
143 | {
144 | if (_queue.IsEmpty && _dataAvailableEvent.IsOpen)
145 | {
146 | _dataAvailableEvent.Close();
147 | }
148 | }
149 | }
150 |
151 | if (_queue.IsEmpty == false && _dataAvailableEvent.IsOpen == false)
152 | {
153 | lock (_lock)
154 | {
155 | if (_queue.IsEmpty == false && _dataAvailableEvent.IsOpen == false)
156 | {
157 | _dataAvailableEvent.Open();
158 | }
159 | }
160 | }
161 | }
162 | }
163 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/AsyncLock.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Threading;
3 | using System.Threading.Tasks;
4 |
5 | namespace KafkaNet.Common
6 | {
7 | ///
8 | /// An asynchronous locking construct.
9 | ///
10 | ///
11 | /// This is based on Stephen Toub's implementation here: http://blogs.msdn.com/b/pfxteam/archive/2012/02/12/10266988.aspx
12 | /// However, we're using SemaphoreSlim as the basis rather than AsyncSempahore, since in .NET 4.5 SemaphoreSlim implements the WaitAsync() method.
13 | ///
14 | public class AsyncLock : IDisposable
15 | {
16 | private readonly SemaphoreSlim _semaphore;
17 | private readonly Task _releaser;
18 |
19 | public AsyncLock()
20 | {
21 | _semaphore = new SemaphoreSlim(1, 1);
22 | _releaser = Task.FromResult(new Releaser(this));
23 | }
24 |
25 | public bool IsLocked
26 | {
27 | get { return _semaphore.CurrentCount == 0; }
28 | }
29 |
30 | public Task LockAsync(CancellationToken canceller)
31 | {
32 | var wait = _semaphore.WaitAsync(canceller);
33 |
34 | if (wait.IsCanceled) throw new OperationCanceledException("Unable to aquire lock within timeout alloted.");
35 |
36 | return wait.IsCompleted ?
37 | _releaser :
38 | wait.ContinueWith((t, state) =>
39 | {
40 | if (t.IsCanceled) throw new OperationCanceledException("Unable to aquire lock within timeout alloted.");
41 | return new Releaser((AsyncLock)state);
42 | }, this, canceller, TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);
43 | }
44 |
45 | public Task LockAsync()
46 | {
47 | var wait = _semaphore.WaitAsync();
48 | return wait.IsCompleted ?
49 | _releaser :
50 | wait.ContinueWith((_, state) => new Releaser((AsyncLock)state),
51 | this, CancellationToken.None,
52 | TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);
53 | }
54 |
55 | public void Dispose()
56 | {
57 | Dispose(true);
58 | }
59 |
60 | protected void Dispose(bool disposing)
61 | {
62 | if (disposing)
63 | {
64 | using (_semaphore) { }
65 | using (_releaser) { }
66 | }
67 | }
68 |
69 | public struct Releaser : IDisposable
70 | {
71 | private readonly AsyncLock _toRelease;
72 |
73 | internal Releaser(AsyncLock toRelease)
74 | {
75 | _toRelease = toRelease;
76 | }
77 |
78 | public void Dispose()
79 | {
80 | if (_toRelease != null)
81 | {
82 | _toRelease._semaphore.Release();
83 | }
84 | }
85 | }
86 | }
87 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/AsyncManualResetEvent.cs:
--------------------------------------------------------------------------------
1 | using System.Threading;
2 | using System.Threading.Tasks;
3 |
4 | // original idea by Stephen Toub: http://blogs.msdn.com/b/pfxteam/archive/2012/02/11/10266920.aspx
5 |
6 | namespace KafkaNet.Common
7 | {
8 | ///
9 | /// Async version of a manual reset event.
10 | ///
11 | public sealed class AsyncManualResetEvent
12 | {
13 | private TaskCompletionSource _tcs;
14 |
15 | public bool IsOpen
16 | {
17 | get { return _tcs.Task.IsCompleted; }
18 | }
19 |
20 | ///
21 | /// Async version of a manual reset event.
22 | ///
23 | /// Sets whether the initial state of the event is true=open or false=blocking.
24 | public AsyncManualResetEvent(bool set = false)
25 | {
26 | _tcs = new TaskCompletionSource();
27 | if (set)
28 | {
29 | _tcs.SetResult(true);
30 | }
31 | }
32 |
33 | ///
34 | /// Async wait for the manual reset event to be triggered.
35 | ///
36 | ///
37 | public Task WaitAsync()
38 | {
39 | return _tcs.Task;
40 | }
41 |
42 | ///
43 | /// Set the event and complete, releasing all WaitAsync requests.
44 | ///
45 | public void Open()
46 | {
47 | _tcs.TrySetResult(true);
48 | }
49 |
50 | ///
51 | /// Reset the event making all WaitAsync requests block, does nothing if already reset.
52 | ///
53 | public void Close()
54 | {
55 | while (true)
56 | {
57 | var tcs = _tcs;
58 | if (!tcs.Task.IsCompleted || Interlocked.CompareExchange(ref _tcs, new TaskCompletionSource(), tcs) == tcs)
59 | return;
60 | }
61 | }
62 | }
63 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/BigEndianBinaryWriter.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics.Contracts;
3 | using System.IO;
4 | using System.Text;
5 |
6 | namespace KafkaNet.Common
7 | {
8 | ///
9 | /// A BinaryWriter that stores values in BigEndian format.
10 | ///
11 | ///
12 | /// Booleans, bytes and byte arrays will be written directly.
13 | /// All other values will be converted to a byte array in BigEndian byte order and written.
14 | /// Characters and Strings will all be encoded in UTF-8 (which is byte order independent).
15 | ///
16 | ///
17 | /// BigEndianBinaryWriter code provided by Zoltu
18 | /// https://github.com/Zoltu/Zoltu.EndianAwareBinaryReaderWriter
19 | /// The code was modified to implement Kafka specific byte handling.
20 | ///
21 | public class BigEndianBinaryWriter : BinaryWriter
22 | {
23 | public BigEndianBinaryWriter(Stream stream)
24 | : base(stream, Encoding.UTF8)
25 | {
26 | Contract.Requires(stream != null);
27 | }
28 |
29 | public BigEndianBinaryWriter(Stream stream, Boolean leaveOpen)
30 | : base(stream, Encoding.UTF8, leaveOpen)
31 | {
32 | Contract.Requires(stream != null);
33 | }
34 |
35 | public override void Write(Decimal value)
36 | {
37 | var ints = Decimal.GetBits(value);
38 | Contract.Assume(ints != null);
39 | Contract.Assume(ints.Length == 4);
40 |
41 | if (BitConverter.IsLittleEndian)
42 | Array.Reverse(ints);
43 |
44 | for (var i = 0; i < 4; ++i)
45 | {
46 | var bytes = BitConverter.GetBytes(ints[i]);
47 | if (BitConverter.IsLittleEndian)
48 | Array.Reverse(bytes);
49 |
50 | Write(bytes);
51 | }
52 | }
53 |
54 | public override void Write(Single value)
55 | {
56 | var bytes = BitConverter.GetBytes(value);
57 | WriteBigEndian(bytes);
58 | }
59 |
60 | public override void Write(Double value)
61 | {
62 | var bytes = BitConverter.GetBytes(value);
63 | WriteBigEndian(bytes);
64 | }
65 |
66 | public override void Write(Int16 value)
67 | {
68 | var bytes = BitConverter.GetBytes(value);
69 | WriteBigEndian(bytes);
70 | }
71 |
72 | public override void Write(Int32 value)
73 | {
74 | var bytes = BitConverter.GetBytes(value);
75 | WriteBigEndian(bytes);
76 | }
77 |
78 | public override void Write(Int64 value)
79 | {
80 | var bytes = BitConverter.GetBytes(value);
81 | WriteBigEndian(bytes);
82 | }
83 |
84 | public override void Write(UInt16 value)
85 | {
86 | var bytes = BitConverter.GetBytes(value);
87 | WriteBigEndian(bytes);
88 | }
89 |
90 | public override void Write(UInt32 value)
91 | {
92 | var bytes = BitConverter.GetBytes(value);
93 | WriteBigEndian(bytes);
94 | }
95 |
96 | public override void Write(UInt64 value)
97 | {
98 | var bytes = BitConverter.GetBytes(value);
99 | WriteBigEndian(bytes);
100 | }
101 |
102 | public override void Write(string value)
103 | {
104 | throw new NotSupportedException("Kafka requires specific string length prefix encoding.");
105 | }
106 |
107 | public void Write(byte[] value, StringPrefixEncoding encoding)
108 | {
109 | if (value == null)
110 | {
111 | Write(-1);
112 | return;
113 | }
114 |
115 | switch (encoding)
116 | {
117 | case StringPrefixEncoding.Int16:
118 | Write((Int16)value.Length);
119 | break;
120 |
121 | case StringPrefixEncoding.Int32:
122 | Write(value.Length);
123 | break;
124 | }
125 |
126 | Write(value);
127 | }
128 |
129 | public void Write(string value, StringPrefixEncoding encoding)
130 | {
131 | if (value == null)
132 | {
133 | switch (encoding)
134 | {
135 | case StringPrefixEncoding.Int16:
136 | Write((Int16)(-1));
137 | return;
138 |
139 | default:
140 | Write(-1);
141 | return;
142 | }
143 | }
144 |
145 | switch (encoding)
146 | {
147 | case StringPrefixEncoding.Int16:
148 | Write((Int16)value.Length);
149 | break;
150 |
151 | case StringPrefixEncoding.Int32:
152 | Write(value.Length);
153 | break;
154 | }
155 |
156 | Write(Encoding.UTF8.GetBytes(value));
157 | }
158 |
159 | private void WriteBigEndian(Byte[] bytes)
160 | {
161 | Contract.Requires(bytes != null);
162 |
163 | if (BitConverter.IsLittleEndian)
164 | Array.Reverse(bytes);
165 |
166 | Write(bytes);
167 | }
168 | }
169 |
170 | public enum StringPrefixEncoding
171 | {
172 | Int16,
173 | Int32,
174 | None
175 | };
176 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/ConcurrentCircularBuffer.cs:
--------------------------------------------------------------------------------
1 | using System.Collections;
2 | using System.Collections.Generic;
3 | using System.Threading;
4 |
5 | namespace KafkaNet.Common
6 | {
7 | ///
8 | /// don't use this class out side the StatisticsTracker has unexpected behavior
9 | ///
10 | public class ConcurrentCircularBuffer : IEnumerable
11 | {
12 | private readonly int _maxSize;
13 |
14 | private long _head = -1;
15 | private readonly T[] _values;
16 |
17 | public ConcurrentCircularBuffer(int max)
18 | {
19 | _maxSize = max;
20 | _values = new T[_maxSize];
21 | }
22 |
23 | public int MaxSize { get { return _maxSize; } }
24 |
25 | public long Count
26 | {
27 | get
28 | {
29 | long head = Interlocked.Read(ref _head);
30 | if (head == -1) return 0;
31 | if (head >= MaxSize) return MaxSize;
32 | return head + 1;
33 | }
34 | }
35 |
36 | public ConcurrentCircularBuffer Enqueue(T obj)
37 | {
38 | //if more then MaxSize thread will do Enqueue the order in not guaranteed and with object may erase each other
39 | var currentHead = Interlocked.Increment(ref _head);
40 | long index = currentHead % MaxSize;
41 | _values[index] = obj;
42 | return this;
43 | }
44 |
45 | public IEnumerator GetEnumerator()
46 | {
47 | long head = Interlocked.Read(ref _head);
48 | for (int i = 0; i < Count; i++)
49 | {
50 | yield return _values[(head % MaxSize) + i];
51 | }
52 | }
53 |
54 | IEnumerator IEnumerable.GetEnumerator()
55 | {
56 | return GetEnumerator();
57 | }
58 | }
59 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/Crc32Provider.cs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Damien Guard. All rights reserved.
2 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
3 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
4 | // Originally published at http://damieng.com/blog/2006/08/08/calculating_crc32_in_c_and_net
5 |
6 | using System;
7 |
8 | namespace KafkaNet.Common
9 | {
10 | ///
11 | /// This code was originally from the copyrighted code listed above but was modified significantly
12 | /// as the original code was not thread safe and did not match was was required of this driver. This
13 | /// class now provides a static lib which will do the simple CRC calculation required by Kafka servers.
14 | ///
15 | public static class Crc32Provider
16 | {
17 | public const UInt32 DefaultPolynomial = 0xedb88320u;
18 | public const UInt32 DefaultSeed = 0xffffffffu;
19 | private static readonly UInt32[] PolynomialTable;
20 |
21 | static Crc32Provider()
22 | {
23 | PolynomialTable = InitializeTable(DefaultPolynomial);
24 | }
25 |
26 | public static UInt32 Compute(byte[] buffer)
27 | {
28 | return ~CalculateHash(buffer, 0, buffer.Length);
29 | }
30 |
31 | public static UInt32 Compute(byte[] buffer, int offset, int length)
32 | {
33 | return ~CalculateHash(buffer, offset, length);
34 | }
35 |
36 | public static byte[] ComputeHash(byte[] buffer)
37 | {
38 | return UInt32ToBigEndianBytes(Compute(buffer));
39 | }
40 |
41 | public static byte[] ComputeHash(byte[] buffer, int offset, int length)
42 | {
43 | return UInt32ToBigEndianBytes(Compute(buffer, offset, length));
44 | }
45 |
46 | private static UInt32[] InitializeTable(UInt32 polynomial)
47 | {
48 | var createTable = new UInt32[256];
49 | for (var i = 0; i < 256; i++)
50 | {
51 | var entry = (UInt32)i;
52 | for (var j = 0; j < 8; j++)
53 | if ((entry & 1) == 1)
54 | entry = (entry >> 1) ^ polynomial;
55 | else
56 | entry = entry >> 1;
57 | createTable[i] = entry;
58 | }
59 |
60 | return createTable;
61 | }
62 |
63 | private static UInt32 CalculateHash(byte[] buffer, int offset, int length)
64 | {
65 | var crc = DefaultSeed;
66 | for (var i = offset; i < length; i++)
67 | {
68 | crc = (crc >> 8) ^ PolynomialTable[buffer[i] ^ crc & 0xff];
69 | }
70 | return crc;
71 | }
72 |
73 | private static byte[] UInt32ToBigEndianBytes(UInt32 uint32)
74 | {
75 | var result = BitConverter.GetBytes(uint32);
76 |
77 | if (BitConverter.IsLittleEndian)
78 | Array.Reverse(result);
79 |
80 | return result;
81 | }
82 | }
83 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/KafkaMessagePacker.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 |
5 | namespace KafkaNet.Common
6 | {
7 | public class KafkaMessagePacker : IDisposable
8 | {
9 | private const int IntegerByteSize = 4;
10 | private readonly BigEndianBinaryWriter _stream;
11 |
12 | public KafkaMessagePacker()
13 | {
14 | _stream = new BigEndianBinaryWriter(new MemoryStream());
15 | Pack(IntegerByteSize); //pre-allocate space for buffer length
16 | }
17 |
18 | public KafkaMessagePacker Pack(byte value)
19 | {
20 | _stream.Write(value);
21 | return this;
22 | }
23 |
24 | public KafkaMessagePacker Pack(Int32 ints)
25 | {
26 | _stream.Write(ints);
27 | return this;
28 | }
29 |
30 | public KafkaMessagePacker Pack(Int16 ints)
31 | {
32 | _stream.Write(ints);
33 | return this;
34 | }
35 |
36 | public KafkaMessagePacker Pack(Int64 ints)
37 | {
38 | _stream.Write(ints);
39 | return this;
40 | }
41 |
42 | public KafkaMessagePacker Pack(byte[] buffer, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
43 | {
44 | _stream.Write(buffer, encoding);
45 | return this;
46 | }
47 |
48 | public KafkaMessagePacker Pack(string data, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
49 | {
50 | _stream.Write(data, encoding);
51 | return this;
52 | }
53 |
54 | public KafkaMessagePacker Pack(IEnumerable data, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
55 | {
56 | foreach (var item in data)
57 | {
58 | _stream.Write(item, encoding);
59 | }
60 |
61 | return this;
62 | }
63 |
64 | public byte[] Payload()
65 | {
66 | var buffer = new byte[_stream.BaseStream.Length];
67 | _stream.BaseStream.Position = 0;
68 | Pack((Int32)(_stream.BaseStream.Length - IntegerByteSize));
69 | _stream.BaseStream.Position = 0;
70 | _stream.BaseStream.Read(buffer, 0, (int)_stream.BaseStream.Length);
71 | return buffer;
72 | }
73 |
74 | public byte[] PayloadNoLength()
75 | {
76 | var payloadLength = _stream.BaseStream.Length - IntegerByteSize;
77 | var buffer = new byte[payloadLength];
78 | _stream.BaseStream.Position = IntegerByteSize;
79 | _stream.BaseStream.Read(buffer, 0, (int)payloadLength);
80 | return buffer;
81 | }
82 |
83 | public byte[] CrcPayload()
84 | {
85 | var buffer = new byte[_stream.BaseStream.Length];
86 |
87 | //copy the payload over
88 | _stream.BaseStream.Position = 0;
89 | _stream.BaseStream.Read(buffer, 0, (int)_stream.BaseStream.Length);
90 |
91 | //calculate the crc
92 | var crc = Crc32Provider.ComputeHash(buffer, IntegerByteSize, buffer.Length);
93 | buffer[0] = crc[0];
94 | buffer[1] = crc[1];
95 | buffer[2] = crc[2];
96 | buffer[3] = crc[3];
97 |
98 | return buffer;
99 | }
100 |
101 | public void Dispose()
102 | {
103 | using (_stream) { }
104 | }
105 | }
106 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Common/TaskExtensions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics.CodeAnalysis;
3 | using System.Threading;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet.Common
7 | {
8 | ///
9 | /// Utility functions for dealing with Task's.
10 | ///
11 | ///
12 | /// This is copy of orleans TaskExtensions here:https://github.com/dotnet/orleans/blob/master/src/Orleans/Async/TaskExtensions.cs#L218
13 | ///
14 |
15 | public static class TaskExtensions
16 | {
17 |
18 | ///
19 | /// Observes and ignores a potential exception on a given Task.
20 | /// If a Task fails and throws an exception which is never observed, it will be caught by the .NET finalizer thread.
21 | /// This function awaits the given task and if the exception is thrown, it observes this exception and simply ignores it.
22 | /// This will prevent the escalation of this exception to the .NET finalizer thread.
23 | ///
24 | /// The task to be ignored.
25 | [SuppressMessage("Microsoft.Performance", "CA1804:RemoveUnusedLocals", MessageId = "ignored")]
26 | public static void Ignore(this Task task)
27 | {
28 | if (task.IsCompleted)
29 | {
30 | var ignored = task.Exception;
31 | }
32 | else
33 | {
34 | task.ContinueWith(
35 | t => { var ignored = t.Exception; },
36 | CancellationToken.None,
37 | TaskContinuationOptions.OnlyOnFaulted | TaskContinuationOptions.ExecuteSynchronously,
38 | TaskScheduler.Default);
39 | }
40 | }
41 |
42 | ///
43 | /// This will apply a timeout delay to the task, allowing us to exit early
44 | ///
45 | /// The task we will timeout after timeSpan
46 | /// Amount of time to wait before timing out
47 | /// If we time out we will get this exception
48 | /// The value of the completed task
49 |
50 | public static async Task WithTimeout(this Task taskToComplete, TimeSpan timeSpan)
51 | {
52 | if (taskToComplete.IsCompleted)
53 | {
54 | return await taskToComplete;
55 | }
56 |
57 | var timeoutCancellationTokenSource = new CancellationTokenSource();
58 | var completedTask = await Task.WhenAny(taskToComplete, Task.Delay(timeSpan, timeoutCancellationTokenSource.Token));
59 |
60 | // We got done before the timeout, or were able to complete before this code ran, return the result
61 | if (taskToComplete == completedTask)
62 | {
63 | timeoutCancellationTokenSource.Cancel();
64 | // Await this so as to propagate the exception correctly
65 | return await taskToComplete;
66 | }
67 |
68 | // We did not complete before the timeout, we fire and forget to ensure we observe any exceptions that may occur
69 | taskToComplete.Ignore();
70 | throw new TimeoutException(String.Format("WithTimeout has timed out after {0}.", timeSpan));
71 | }
72 |
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/src/KafkaNetClient/Default/DefaultKafkaConnectionFactory.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using KafkaNet.Protocol;
3 | using System;
4 | using System.Linq;
5 | using System.Net;
6 | using System.Net.Sockets;
7 |
8 | namespace KafkaNet
9 | {
10 | public class DefaultKafkaConnectionFactory : IKafkaConnectionFactory
11 | {
12 | public IKafkaConnection Create(KafkaEndpoint endpoint, TimeSpan responseTimeoutMs, IKafkaLog log, int maxRetry, TimeSpan? maximumReconnectionTimeout = null, StatisticsTrackerOptions statisticsTrackerOptions = null)
13 | {
14 | KafkaTcpSocket socket = new KafkaTcpSocket(log, endpoint, maxRetry, maximumReconnectionTimeout, statisticsTrackerOptions);
15 | return new KafkaConnection(socket, responseTimeoutMs, log);
16 | }
17 |
18 | public KafkaEndpoint Resolve(Uri kafkaAddress, IKafkaLog log)
19 | {
20 | var ipAddress = GetFirstAddress(kafkaAddress.Host, log);
21 | var ipEndpoint = new IPEndPoint(ipAddress, kafkaAddress.Port);
22 |
23 | var kafkaEndpoint = new KafkaEndpoint()
24 | {
25 | ServeUri = kafkaAddress,
26 | Endpoint = ipEndpoint
27 | };
28 |
29 | return kafkaEndpoint;
30 | }
31 |
32 | private static IPAddress GetFirstAddress(string hostname, IKafkaLog log)
33 | {
34 | try
35 | {
36 | //lookup the IP address from the provided host name
37 | var addresses = Dns.GetHostAddresses(hostname);
38 |
39 | if (addresses.Length > 0)
40 | {
41 | Array.ForEach(addresses, address => log.DebugFormat("Found address {0} for {1}", address, hostname));
42 |
43 | var selectedAddress = addresses.FirstOrDefault(item => item.AddressFamily == AddressFamily.InterNetwork) ?? addresses.First();
44 |
45 | log.DebugFormat("Using address {0} for {1}", selectedAddress, hostname);
46 |
47 | return selectedAddress;
48 | }
49 | }
50 | catch
51 | {
52 | throw new UnresolvedHostnameException("Could not resolve the following hostname: {0}", hostname);
53 | }
54 |
55 | throw new UnresolvedHostnameException("Could not resolve the following hostname: {0}", hostname);
56 | }
57 | }
58 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Default/DefaultPartitionSelector.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using KafkaNet.Protocol;
3 | using System;
4 | using System.Collections.Concurrent;
5 | using System.Linq;
6 |
7 | namespace KafkaNet
8 | {
9 | public class DefaultPartitionSelector : IPartitionSelector
10 | {
11 | private readonly ConcurrentDictionary _roundRobinTracker = new ConcurrentDictionary();
12 |
13 | public Partition Select(Topic topic, byte[] key)
14 | {
15 | if (topic == null) throw new ArgumentNullException("topic");
16 | if (topic.Partitions.Count <= 0) throw new ApplicationException(string.Format("Topic ({0}) has no partitions.", topic.Name));
17 |
18 | //use round robin
19 | var partitions = topic.Partitions;
20 | if (key == null)
21 | {
22 | //use round robin
23 | var paritionIndex = _roundRobinTracker.AddOrUpdate(topic.Name, p => 0, (s, i) =>
24 | {
25 | return ((i + 1) % partitions.Count);
26 | });
27 |
28 | return partitions[paritionIndex];
29 | }
30 |
31 | //use key hash
32 | var partitionId = Crc32Provider.Compute(key) % partitions.Count;
33 | var partition = partitions.FirstOrDefault(x => x.PartitionId == partitionId);
34 |
35 | if (partition == null)
36 | throw new InvalidPartitionException(string.Format("Hash function return partition id: {0}, but the available partitions are:{1}",
37 | partitionId, string.Join(",", partitions.Select(x => x.PartitionId))));
38 |
39 | return partition;
40 | }
41 | }
42 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Default/DefaultTraceLog.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics;
3 |
4 | namespace KafkaNet
5 | {
6 | ///
7 | /// This class simply logs all information out to the Trace log provided by windows.
8 | /// The reason Trace is being used as the default it to remove extenal references from
9 | /// the base kafka-net package. A proper logging framework like log4net is recommended.
10 | ///
11 | public class DefaultTraceLog : IKafkaLog
12 | {
13 | private readonly LogLevel _minLevel;
14 |
15 | public DefaultTraceLog(LogLevel minLevel)
16 | {
17 | _minLevel = minLevel;
18 | }
19 |
20 | public DefaultTraceLog()
21 | {
22 | _minLevel = LogLevel.Debug;
23 | }
24 |
25 | private void Log(string message, LogLevel level)
26 | {
27 | //%timestamp [%thread] %level %message
28 | //TODO: static log to each add class!!
29 | if (level >= _minLevel)
30 | {
31 | string logMessage = string.Format("{0} thread:[{1}] level:[{2}] Message:{3}", DateTime.Now.ToString("hh:mm:ss-ffffff"),
32 | System.Threading.Thread.CurrentThread.ManagedThreadId, level, message);
33 | Trace.WriteLine(logMessage);
34 | }
35 | }
36 |
37 | public void DebugFormat(string format, params object[] args)
38 | {
39 | Log(string.Format(format, args), LogLevel.Debug);
40 | }
41 |
42 | public void InfoFormat(string format, params object[] args)
43 | {
44 | Log(string.Format(format, args), LogLevel.Info);
45 | }
46 |
47 | public void WarnFormat(string format, params object[] args)
48 | {
49 | Log(string.Format(format, args), LogLevel.Warn);
50 | }
51 |
52 | public void ErrorFormat(string format, params object[] args)
53 | {
54 | Log(string.Format(format, args), LogLevel.Error);
55 | }
56 |
57 | public void FatalFormat(string format, params object[] args)
58 | {
59 | Log(string.Format(format, args), LogLevel.Fata);
60 | }
61 | }
62 |
63 | public enum LogLevel
64 | {
65 | Debug = 0, Info = 1, Warn = 2, Error = 3, Fata = 4
66 | }
67 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IBrokerRouter.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet
7 | {
8 | public interface IBrokerRouter : IDisposable
9 | {
10 | ///
11 | /// Select a broker for a specific topic and partitionId.
12 | ///
13 | /// The topic name to select a broker for.
14 | /// The exact partition to select a broker for.
15 | /// A broker route for the given partition of the given topic.
16 | ///
17 | /// This function does not use any selector criteria. If the given partitionId does not exist an exception will be thrown.
18 | ///
19 | /// Thrown if the returned metadata for the given topic is invalid or missing.
20 | /// Thrown if the give partitionId does not exist for the given topic.
21 | /// Thrown if none of the Default Brokers can be contacted.
22 | BrokerRoute SelectBrokerRouteFromLocalCache(string topic, int partitionId);
23 |
24 | ///
25 | /// Select a broker for a given topic using the IPartitionSelector function.
26 | ///
27 | /// The topic to retreive a broker route for.
28 | /// The key used by the IPartitionSelector to collate to a consistent partition. Null value means key will be ignored in selection process.
29 | /// A broker route for the given topic.
30 | /// Thrown if the returned metadata for the given topic is invalid or missing.
31 | /// Thrown if none of the Default Brokers can be contacted.
32 | BrokerRoute SelectBrokerRouteFromLocalCache(string topic, byte[] key = null);
33 |
34 | ///
35 | /// Returns Topic metadata for each topic requested.
36 | ///
37 | /// Collection of topics to request metadata for.
38 | /// List of Topics as provided by Kafka.
39 | /// The topic metadata will by default check the cache first and then request metadata from the server if it does not exist in cache.
40 | List GetTopicMetadataFromLocalCache(params string[] topics);
41 |
42 | ///
43 | /// Force a call to the kafka servers to refresh metadata for the given topics.
44 | ///
45 | /// List of topics to update metadata for.
46 | ///
47 | /// This method will initiate a call to the kafka servers and retrieve metadata for all given topics, updating the broke cache in the process.
48 | ///
49 | Task RefreshTopicMetadata(params string[] topics);
50 |
51 | ///
52 | /// Returns Topic metadata for each topic.
53 | ///
54 | /// List of topics as provided by Kafka.
55 | ///
56 | /// The topic metadata will by default check the cache.
57 | ///
58 | List GetAllTopicMetadataFromLocalCache();
59 |
60 | ///
61 | /// Force a call to the kafka servers to refresh metadata for all topics.
62 | ///
63 | ///
64 | /// This method will ignore the cache and initiate a call to the kafka servers for all topics, updating the cache with the resulting metadata.
65 | ///
66 | Task RefreshAllTopicMetadata();
67 |
68 | Task RefreshMissingTopicMetadata(params string[] topics);
69 |
70 | DateTime GetTopicMetadataRefreshTime(string topic);
71 |
72 | IKafkaLog Log { get; }
73 | }
74 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IKafkaConnection.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet
7 | {
8 | public interface IKafkaConnection : IDisposable
9 | {
10 | ///
11 | /// The unique endpoint location of this connection.
12 | ///
13 | KafkaEndpoint Endpoint { get; }
14 |
15 | ///
16 | /// Value indicating the read polling thread is still active.
17 | ///
18 | bool ReadPolling { get; }
19 |
20 | ///
21 | /// Send raw payload data up to the connected endpoint.
22 | ///
23 | /// The raw data to send to the connected endpoint.
24 | /// Task representing the future success or failure of query.
25 | Task SendAsync(KafkaDataPayload payload);
26 |
27 | ///
28 | /// Send a specific IKafkaRequest to the connected endpoint.
29 | ///
30 | /// The type of the KafkaResponse expected from the request being sent.
31 | /// The KafkaRequest to send to the connected endpoint.
32 | /// Task representing the future responses from the sent request.
33 | Task> SendAsync(IKafkaRequest request);
34 | }
35 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IKafkaConnectionFactory.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using System;
3 |
4 | namespace KafkaNet
5 | {
6 | public interface IKafkaConnectionFactory
7 | {
8 | ///
9 | /// Create a new KafkaConnection.
10 | ///
11 | /// The specific KafkaEndpoint of the server to connect to.
12 | /// The amount of time to wait for a message response to be received after sending a message to Kafka
13 | /// Logging interface used to record any log messages created by the connection.
14 | /// The maximum time to wait when backing off on reconnection attempts.
15 | /// The maximum retry attempt before throwing socket exception.
16 | /// IKafkaConnection initialized to connecto to the given endpoint.
17 | IKafkaConnection Create(KafkaEndpoint endpoint, TimeSpan responseTimeoutMs, IKafkaLog log, int maxRetry, TimeSpan? maximumReconnectionTimeout = null, StatisticsTrackerOptions statisticsTrackerOptions = null);
18 |
19 | ///
20 | /// Resolves a generic Uri into a uniquely identifiable KafkaEndpoint.
21 | ///
22 | /// The address to the kafka server to resolve.
23 | /// Logging interface used to record any log messages created by the Resolving process.
24 | /// KafkaEndpoint with resolved IP and Address.
25 | KafkaEndpoint Resolve(Uri kafkaAddress, IKafkaLog log);
26 | }
27 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IKafkaLog.cs:
--------------------------------------------------------------------------------
1 | namespace KafkaNet
2 | {
3 | //TODO add [CallerMemberName], [CallerFilePath], [CallerLineNumber] .
4 | //TODO add log Exception.
5 | public interface IKafkaLog
6 | {
7 | ///
8 | /// Record debug information using the String.Format syntax.
9 | ///
10 | /// Format string template. e.g. "Exception = {0}"
11 | /// Arguments which will fill the template string in order of apperance.
12 | void DebugFormat(string format, params object[] args);
13 |
14 | ///
15 | /// Record info information using the String.Format syntax.
16 | ///
17 | /// Format string template. e.g. "Exception = {0}"
18 | /// Arguments which will fill the template string in order of apperance.
19 | void InfoFormat(string format, params object[] args);
20 |
21 | ///
22 | /// Record warning information using the String.Format syntax.
23 | ///
24 | /// Format string template. e.g. "Exception = {0}"
25 | /// Arguments which will fill the template string in order of apperance.
26 | void WarnFormat(string format, params object[] args);
27 |
28 | ///
29 | /// Record error information using the String.Format syntax.
30 | ///
31 | /// Format string template. e.g. "Exception = {0}"
32 | /// Arguments which will fill the template string in order of apperance.
33 | void ErrorFormat(string format, params object[] args);
34 |
35 | ///
36 | /// Record fatal information using the String.Format syntax.
37 | ///
38 | /// Format string template. e.g. "Exception = {0}"
39 | /// Arguments which will fill the template string in order of apperance.
40 | void FatalFormat(string format, params object[] args);
41 | }
42 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IKafkaRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System.Collections.Generic;
3 |
4 | namespace KafkaNet
5 | {
6 | ///
7 | /// KafkaRequest represents a Kafka request messages as an object which can Encode itself into the appropriate
8 | /// binary request and Decode any responses to that request.
9 | ///
10 | /// The type of the KafkaResponse expected back from the request.
11 | public interface IKafkaRequest
12 | {
13 | ///
14 | /// Indicates this request should wait for a response from the broker
15 | ///
16 | bool ExpectResponse { get; }
17 |
18 | ///
19 | /// Descriptive name used to identify the source of this request.
20 | ///
21 | string ClientId { get; set; }
22 |
23 | ///
24 | /// Id which will be echoed back by Kafka to correlate responses to this request. Usually automatically assigned by driver.
25 | ///
26 | int CorrelationId { get; set; }
27 |
28 | ///
29 | /// Enum identifying the specific type of request message being represented.
30 | ///
31 | ApiKeyRequestType ApiKey { get; }
32 |
33 | ///
34 | /// Encode this request into the Kafka wire protocol.
35 | ///
36 | /// Byte[] representing the binary wire protocol of this request.
37 | KafkaDataPayload Encode();
38 |
39 | ///
40 | /// Decode a response payload from Kafka into an enumerable of T responses.
41 | ///
42 | /// Buffer data returned by Kafka servers.
43 | ///
44 | IEnumerable Decode(byte[] payload);
45 | }
46 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IKafkaTcpSocket.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using System;
3 | using System.Threading;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet
7 | {
8 | public interface IKafkaTcpSocket : IDisposable
9 | {
10 | ///
11 | /// The IP endpoint to the server.
12 | ///
13 | KafkaEndpoint Endpoint { get; }
14 |
15 | ///
16 | /// Read a certain byte array size return only when all bytes received.
17 | ///
18 | /// The size in bytes to receive from server.
19 | /// Returns a byte[] array with the size of readSize.
20 | Task ReadAsync(int readSize);
21 |
22 | ///
23 | /// Read a certain byte array size return only when all bytes received.
24 | ///
25 | /// The size in bytes to receive from server.
26 | /// A cancellation token which will cancel the request.
27 | /// Returns a byte[] array with the size of readSize.
28 | Task ReadAsync(int readSize, CancellationToken cancellationToken);
29 |
30 | ///
31 | /// Convenience function to write full buffer data to the server.
32 | ///
33 | /// The buffer data to send.
34 | /// Returns Task handle to the write operation with size of written bytes.
35 | Task WriteAsync(KafkaDataPayload payload);
36 |
37 | ///
38 | /// Write the buffer data to the server.
39 | ///
40 | /// The buffer data to send.
41 | /// A cancellation token which will cancel the request.
42 | /// Returns Task handle to the write operation ith size of written bytes..
43 | Task WriteAsync(KafkaDataPayload payload, CancellationToken cancellationToken);
44 | }
45 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IManualConsumer.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System.Collections.Generic;
3 | using System.Threading.Tasks;
4 |
5 | namespace KafkaNet.Interfaces
6 | {
7 | public interface IManualConsumer
8 | {
9 | Task UpdateOrCreateOffset(string consumerGroup, long offset);
10 |
11 | Task FetchLastOffset();
12 |
13 | Task FetchOffset(string consumerGroup);
14 |
15 | Task> FetchMessages(int maxCount, long offset);
16 | }
17 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IMetadataQueries.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet
7 | {
8 | ///
9 | /// Contains common metadata query commands that are used by both a consumer and producer.
10 | ///
11 | internal interface IMetadataQueries : IDisposable
12 | {
13 | ///
14 | /// Get metadata on the given topic.
15 | ///
16 | /// The metadata on the requested topic.
17 | /// Topic object containing the metadata on the requested topic.
18 | Topic GetTopicFromCache(string topic);
19 |
20 | ///
21 | /// Get offsets for each partition from a given topic.
22 | ///
23 | /// Name of the topic to get offset information from.
24 | ///
25 | ///
26 | ///
27 | Task> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1);
28 | }
29 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Interfaces/IPartitionSelector.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 |
3 | namespace KafkaNet
4 | {
5 | public interface IPartitionSelector
6 | {
7 | ///
8 | /// Select the appropriate partition post a message based on topic and key data.
9 | ///
10 | /// The topic at which the message will be sent.
11 | /// The data used to consistently route a message to a particular partition. Value can be null.
12 | /// The partition to send the message to.
13 | Partition Select(Topic topic, byte[] key);
14 | }
15 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/KafkaNetClient.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | AnyCPU
7 | {1343EB68-55CB-4452-8386-24A9989DE1C0}
8 | Library
9 | Properties
10 | KafkaNet
11 | KafkaNetClient
12 | v4.5
13 | 512
14 |
15 |
16 |
17 | true
18 | full
19 | false
20 | bin\Debug\
21 | DEBUG;TRACE
22 | prompt
23 | 4
24 | false
25 |
26 |
27 | pdbonly
28 | true
29 | bin\Release\
30 | TRACE
31 | prompt
32 | 4
33 | false
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
112 |
--------------------------------------------------------------------------------
/src/KafkaNetClient/KafkaNetClient.nuspec:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | KafkaNetClient
5 | $version$
6 | Gigya Inc
7 | Gigya Inc
8 | https://github.com/gigya/KafkaNetClient/blob/master/LICENSE
9 | https://github.com/gigya/KafkaNetClient
10 | http://kafka.apache.org/images/kafka_logo.png
11 | KafkaNetClient
12 | false
13 | Provides high and low level classes for communicating with an Apache Kafka cluster. A fork of kafka-net by Jroland, with improvements and adjustments (see project site).
14 | Native C# client for Apache Kafka.
15 | Copyright Gigya 2015
16 | C# Apache Kafka
17 | https://github.com/gigya/KafkaNetClient/releases
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/src/KafkaNetClient/MetadataQueries.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Threading.Tasks;
5 |
6 | namespace KafkaNet
7 | {
8 | ///
9 | /// This class provides a set of common queries that are useful for both the Consumer and Producer classes.
10 | ///
11 | public class MetadataQueries : IMetadataQueries
12 | {
13 | private readonly IBrokerRouter _brokerRouter;
14 |
15 | public MetadataQueries(IBrokerRouter brokerRouter)
16 | {
17 | _brokerRouter = brokerRouter;
18 | }
19 |
20 | ///
21 | /// Get offsets for each partition from a given topic.
22 | ///
23 | /// Name of the topic to get offset information from.
24 | ///
25 | ///
26 | ///
27 | public async Task> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1)
28 | {
29 | await _brokerRouter.RefreshMissingTopicMetadata(topic).ConfigureAwait(false);
30 | var topicMetadata = GetTopicFromCache(topic);
31 |
32 | //send the offset request to each partition leader
33 | var sendRequests = topicMetadata.Partitions
34 | .GroupBy(x => x.PartitionId)
35 | .Select(p =>
36 | {
37 | var route = _brokerRouter.SelectBrokerRouteFromLocalCache(topic, p.Key);
38 | var request = new OffsetRequest
39 | {
40 | Offsets = new List
41 | {
42 | new Offset
43 | {
44 | Topic = topic,
45 | PartitionId = p.Key,
46 | MaxOffsets = maxOffsets,
47 | Time = time
48 | }
49 | }
50 | };
51 |
52 | return route.Connection.SendAsync(request);
53 | }).ToArray();
54 |
55 | await Task.WhenAll(sendRequests).ConfigureAwait(false);
56 | return sendRequests.SelectMany(x => x.Result).ToList();
57 | }
58 |
59 | ///
60 | /// Get metadata on the given topic.
61 | ///
62 | /// The metadata on the requested topic.
63 | /// Topic object containing the metadata on the requested topic.
64 | public Topic GetTopicFromCache(string topic)
65 | {
66 | var response = _brokerRouter.GetTopicMetadataFromLocalCache(topic);
67 |
68 | if (response.Count <= 0) throw new InvalidTopicMetadataException(ErrorResponseCode.NoError, "No metadata could be found for topic: {0}", topic);
69 |
70 | return response.First();
71 | }
72 |
73 | public void Dispose()
74 | {
75 | using (_brokerRouter) { }
76 | }
77 | }
78 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Model/BrokerRoute.cs:
--------------------------------------------------------------------------------
1 | namespace KafkaNet
2 | {
3 | public class BrokerRoute
4 | {
5 | public string Topic { get; set; }
6 | public int PartitionId { get; set; }
7 | public IKafkaConnection Connection { get; set; }
8 |
9 | public override string ToString()
10 | {
11 | return string.Format("{0} Topic:{1} PartitionId:{2}", Connection.Endpoint.ServeUri, Topic, PartitionId);
12 | }
13 |
14 | #region Equals Override...
15 |
16 | protected bool Equals(BrokerRoute other)
17 | {
18 | return string.Equals(Topic, other.Topic) && PartitionId == other.PartitionId;
19 | }
20 |
21 | public override int GetHashCode()
22 | {
23 | unchecked
24 | {
25 | return ((Topic != null ? Topic.GetHashCode() : 0) * 397) ^ PartitionId;
26 | }
27 | }
28 |
29 | public override bool Equals(object obj)
30 | {
31 | if (ReferenceEquals(null, obj)) return false;
32 | if (ReferenceEquals(this, obj)) return true;
33 | if (obj.GetType() != this.GetType()) return false;
34 | return Equals((BrokerRoute)obj);
35 | }
36 |
37 | #endregion Equals Override...
38 | }
39 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Model/ConsumerOptions.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System;
3 | using System.Collections.Generic;
4 |
5 | namespace KafkaNet.Model
6 | {
7 | public class ConsumerOptions
8 | {
9 | private const int DefaultMaxConsumerBufferSize = 50;
10 | private const int DefaultBackoffIntervalMS = 1000;
11 | private const double DefaulFetchBufferMultiplier = 1.5;
12 |
13 | ///
14 | /// The topic to consume messages from.
15 | ///
16 | public string Topic { get; set; }
17 |
18 | ///
19 | /// Whitelist of partitions to consume from. Empty list indicates all partitions.
20 | ///
21 | public List PartitionWhitelist { get; set; }
22 |
23 | ///
24 | /// Log object to record operational messages.
25 | ///
26 | public IKafkaLog Log { get; set; }
27 |
28 | ///
29 | /// The broker router used to provide connection to each partition server.
30 | ///
31 | public IBrokerRouter Router { get; set; }
32 |
33 | ///
34 | /// The time in milliseconds between queries to look for any new partitions being created.
35 | ///
36 | public int TopicPartitionQueryTimeMs { get; set; }
37 |
38 | ///
39 | /// The size of the internal buffer queue which stores messages from Kafka.
40 | ///
41 | public int ConsumerBufferSize { get; set; }
42 |
43 | ///
44 | /// The interval for the consumer to sleep before try fetch next message if previous fetch received no message.
45 | ///
46 | public TimeSpan BackoffInterval { get; set; }
47 |
48 | ///
49 | /// The max wait time is the maximum amount of time in milliseconds to block waiting if insufficient data is available at the time the request is issued.
50 | ///
51 | public TimeSpan MaxWaitTimeForMinimumBytes { get; set; }
52 |
53 | ///
54 | /// This is the minimum number of bytes of messages that must be available to give a response. If the client sets this to 0 the server will always respond immediately,
55 | /// however if there is no new data since their last request they will just get back empty message sets. If this is set to 1, the server will respond as soon as at least
56 | /// one partition has at least 1 byte of data or the specified timeout occurs. By setting higher values in combination with the timeout the consumer can tune for throughput
57 | /// and trade a little additional latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k would allow the server to wait
58 | /// up to 100ms to try to accumulate 64k of data before responding).
59 | ///
60 | /// long poling!!
61 | /// Watch out If you are pulling (or sending) from the same leader different partitions it going to block each other Because operations are performed serially
62 | /// this property can tall kafka Leave the connection open until MaxWaitTimeForMinimumBytes.
63 | ///
64 | public int MinimumBytes { get; set; }
65 |
66 | ///
67 | /// In the event of a buffer under run, this multiplier will allow padding the new buffer size.
68 | ///
69 | public double FetchBufferMultiplier { get; set; }
70 |
71 | public ConsumerOptions(string topic, IBrokerRouter router)
72 | {
73 | Topic = topic;
74 | Router = router;
75 | PartitionWhitelist = new List();
76 | Log = router.Log;
77 | TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds;
78 | ConsumerBufferSize = DefaultMaxConsumerBufferSize;
79 | BackoffInterval = TimeSpan.FromMilliseconds(DefaultBackoffIntervalMS);
80 | FetchBufferMultiplier = DefaulFetchBufferMultiplier;
81 | MaxWaitTimeForMinimumBytes = TimeSpan.FromMilliseconds(FetchRequest.DefaultMaxBlockingWaitTime);
82 | MinimumBytes = FetchRequest.DefaultMinBlockingByteBufferSize;
83 | }
84 | }
85 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Model/KafkaEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Net;
3 |
4 | namespace KafkaNet.Model
5 | {
6 | public class KafkaEndpoint
7 | {
8 | public Uri ServeUri { get; set; }
9 | public IPEndPoint Endpoint { get; set; }
10 |
11 | protected bool Equals(KafkaEndpoint other)
12 | {
13 | return Equals(Endpoint, other.Endpoint);
14 | }
15 |
16 | public override int GetHashCode()
17 | {
18 | //calculated like this to ensure ports on same address sort in the desc order
19 | return (Endpoint != null ? Endpoint.Address.GetHashCode() + Endpoint.Port : 0);
20 | }
21 |
22 | public override bool Equals(object obj)
23 | {
24 | if (ReferenceEquals(null, obj)) return false;
25 | if (ReferenceEquals(this, obj)) return true;
26 | if (obj.GetType() != this.GetType()) return false;
27 | return Equals((KafkaEndpoint)obj);
28 | }
29 |
30 | public override string ToString()
31 | {
32 | return ServeUri.ToString();
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Model/KafkaOptions.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 |
6 | namespace KafkaNet.Model
7 | {
8 | public class KafkaOptions
9 | {
10 | private const int DefaultResponseTimeout = 60000;
11 | private const int DefaultCacheExpirationTimeoutMS = 10;
12 | private const int DefaultRefreshMetadataTimeout = 200000;
13 | private const int DefaultMaxRetry = 5;
14 | ///
15 | /// Refresh metadata Request will try to refresh only the topics that were expired in the cache.
16 | ///
17 |
18 | public StatisticsTrackerOptions StatisticsTrackerOptions { get; set; }
19 | public TimeSpan CacheExpiration { get; set; }
20 | public TimeSpan RefreshMetadataTimeout { get; set; }
21 | public int MaxRetry { get; set; }
22 |
23 | ///
24 | /// List of Uri connections to kafka servers. The are used to query for metadata from Kafka. More than one is recommended.
25 | ///
26 | public List KafkaServerUri { get; set; }
27 |
28 | ///
29 | /// Safely attempts to resolve endpoints from the KafkaServerUri, ignoreing all resolvable ones.
30 | ///
31 | public IEnumerable KafkaServerEndpoints
32 | {
33 | get
34 | {
35 | foreach (var uri in KafkaServerUri)
36 | {
37 | KafkaEndpoint endpoint = null;
38 | try
39 | {
40 | endpoint = KafkaConnectionFactory.Resolve(uri, Log);
41 | }
42 | catch (UnresolvedHostnameException ex)
43 | {
44 | Log.WarnFormat("Ignoring the following uri as it could not be resolved. Uri:{0} Exception:{1}", uri, ex);
45 | }
46 |
47 | if (endpoint != null) yield return endpoint;
48 | }
49 | }
50 | }
51 |
52 | ///
53 | /// Provides a factory for creating new kafka connections.
54 | ///
55 | public IKafkaConnectionFactory KafkaConnectionFactory { get; set; }
56 |
57 | ///
58 | /// Selector function for routing messages to partitions. Default is key/hash and round robin.
59 | ///
60 | public IPartitionSelector PartitionSelector { get; set; }
61 |
62 | ///
63 | /// Timeout length in milliseconds waiting for a response from kafka.
64 | ///
65 | public TimeSpan ResponseTimeoutMs { get; set; }
66 |
67 | ///
68 | /// Log object to record operational messages.
69 | ///
70 | public IKafkaLog Log { get; set; }
71 |
72 | ///
73 | /// The maximum time to wait when backing off on reconnection attempts.
74 | ///
75 | public TimeSpan? MaximumReconnectionTimeout { get; set; }
76 |
77 | public KafkaOptions(params Uri[] kafkaServerUri)
78 | {
79 | KafkaServerUri = kafkaServerUri.ToList();
80 | PartitionSelector = new DefaultPartitionSelector();
81 | Log = new DefaultTraceLog();
82 | KafkaConnectionFactory = new DefaultKafkaConnectionFactory();
83 | ResponseTimeoutMs = TimeSpan.FromMilliseconds(DefaultResponseTimeout);
84 | CacheExpiration = TimeSpan.FromMilliseconds(DefaultCacheExpirationTimeoutMS);
85 | RefreshMetadataTimeout = TimeSpan.FromMilliseconds(DefaultRefreshMetadataTimeout);
86 | MaxRetry = DefaultMaxRetry;
87 | StatisticsTrackerOptions = new StatisticsTrackerOptions();
88 | }
89 | }
90 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Model/StatisticsTrackerOptions.cs:
--------------------------------------------------------------------------------
1 | namespace KafkaNet.Model
2 | {
3 | public class StatisticsTrackerOptions
4 | {
5 | public bool Enable { get; set; }
6 | }
7 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.InteropServices;
3 |
4 | [assembly: AssemblyTitle("KafkaNetClient")]
5 | [assembly: AssemblyDescription("Native C# client for Apache Kafka.")]
6 | [assembly: AssemblyConfiguration("")]
7 | [assembly: AssemblyCompany("Gigya Inc")]
8 | [assembly: AssemblyProduct("KafkaNetClient")]
9 | [assembly: AssemblyCopyright("Copyright ©Gigya Inc 2015")]
10 | [assembly: AssemblyTrademark("")]
11 | [assembly: AssemblyCulture("")]
12 | [assembly: ComVisible(false)]
13 | [assembly: GuidAttribute("eb234ec0-d838-4abd-9224-479ca06f969d")]
14 | [assembly: AssemblyVersion("1.0.2.0")]
15 | [assembly: AssemblyFileVersion("1.0.2.0")]
16 |
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/BaseRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 |
4 | namespace KafkaNet.Protocol
5 | {
6 | public abstract class BaseRequest
7 | {
8 | ///
9 | /// From Documentation:
10 | /// The replica id indicates the node id of the replica initiating this request. Normal client consumers should always specify this as -1 as they have no node id.
11 | /// Other brokers set this to be their own node id. The value -2 is accepted to allow a non-broker to issue fetch requests as if it were a replica broker for debugging purposes.
12 | ///
13 | /// Kafka Protocol implementation:
14 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
15 | ///
16 | protected const int ReplicaId = -1;
17 |
18 | protected const Int16 ApiVersion = 0;
19 | private string _clientId = "Kafka-Net";
20 | private int _correlationId = 1;
21 |
22 | ///
23 | /// Descriptive name of the source of the messages sent to kafka
24 | ///
25 | public string ClientId { get { return _clientId; } set { _clientId = value; } }
26 |
27 | ///
28 | /// Value supplied will be passed back in the response by the server unmodified.
29 | /// It is useful for matching request and response between the client and server.
30 | ///
31 | public int CorrelationId { get { return _correlationId; } set { _correlationId = value; } }
32 |
33 | ///
34 | /// Flag which tells the broker call to expect a response for this request.
35 | ///
36 | public virtual bool ExpectResponse { get { return true; } }
37 |
38 | ///
39 | /// Encode the common head for kafka request.
40 | ///
41 | /// KafkaMessagePacker with header populated
42 | /// Format: (hhihs)
43 | public static KafkaMessagePacker EncodeHeader(IKafkaRequest request)
44 | {
45 | return new KafkaMessagePacker()
46 | .Pack(((Int16)request.ApiKey))
47 | .Pack(ApiVersion)
48 | .Pack(request.CorrelationId)
49 | .Pack(request.ClientId, StringPrefixEncoding.Int16);
50 | }
51 | }
52 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/Broker.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 |
4 | namespace KafkaNet.Protocol
5 | {
6 | public class Broker
7 | {
8 | public int BrokerId { get; set; }
9 | public string Host { get; set; }
10 | public int Port { get; set; }
11 | public Uri Address { get { return new Uri(string.Format("http://{0}:{1}", Host, Port)); } }
12 |
13 | public static Broker FromStream(BigEndianBinaryReader stream)
14 | {
15 | return new Broker
16 | {
17 | BrokerId = stream.ReadInt32(),
18 | Host = stream.ReadInt16String(),
19 | Port = stream.ReadInt32()
20 | };
21 | }
22 | }
23 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/ConsumerMetadataRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 | using System.Collections.Generic;
4 |
5 | namespace KafkaNet.Protocol
6 | {
7 | ///
8 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest
9 | /// The offsets for a given consumer group is maintained by a specific broker called the offset coordinator. i.e., a consumer needs
10 | /// to issue its offset commit and fetch requests to this specific broker. It can discover the current offset coordinator by issuing a consumer metadata request.
11 | ///
12 | public class ConsumerMetadataRequest : BaseRequest, IKafkaRequest
13 | {
14 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.ConsumerMetadataRequest; } }
15 | public string ConsumerGroup { get; set; }
16 |
17 | public KafkaDataPayload Encode()
18 | {
19 | return EncodeConsumerMetadataRequest(this);
20 | }
21 |
22 | public IEnumerable Decode(byte[] payload)
23 | {
24 | return DecodeConsumerMetadataResponse(payload);
25 | }
26 |
27 | private KafkaDataPayload EncodeConsumerMetadataRequest(ConsumerMetadataRequest request)
28 | {
29 | using (var message = EncodeHeader(request).Pack(request.ConsumerGroup, StringPrefixEncoding.Int16))
30 | {
31 | return new KafkaDataPayload
32 | {
33 | Buffer = message.Payload(),
34 | CorrelationId = request.CorrelationId,
35 | ApiKey = ApiKey
36 | };
37 | }
38 | }
39 |
40 | private IEnumerable DecodeConsumerMetadataResponse(byte[] data)
41 | {
42 | using (var stream = new BigEndianBinaryReader(data))
43 | {
44 | var correlationId = stream.ReadInt32();
45 |
46 | var response = new ConsumerMetadataResponse
47 | {
48 | Error = stream.ReadInt16(),
49 | CoordinatorId = stream.ReadInt32(),
50 | CoordinatorHost = stream.ReadInt16String(),
51 | CoordinatorPort = stream.ReadInt32()
52 | };
53 |
54 | yield return response;
55 | }
56 | }
57 | }
58 |
59 | public class ConsumerMetadataResponse
60 | {
61 | ///
62 | /// Error code of exception that occured during the request. Zero if no error.
63 | ///
64 | public Int16 Error;
65 |
66 | public int CoordinatorId;
67 | public string CoordinatorHost;
68 | public int CoordinatorPort;
69 | }
70 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/IBaseResponse.cs:
--------------------------------------------------------------------------------
1 | using System;
2 |
3 | namespace KafkaNet.Protocol
4 | {
5 | public interface IBaseResponse
6 | {
7 | Int16 Error { get; set; }
8 | string Topic { get; set; }
9 | int PartitionId { get; set; }
10 | }
11 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/MetadataRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System.Collections.Generic;
3 |
4 | namespace KafkaNet.Protocol
5 | {
6 | public class MetadataRequest : BaseRequest, IKafkaRequest
7 | {
8 | ///
9 | /// Indicates the type of kafka encoding this request is
10 | ///
11 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.MetaData; } }
12 |
13 | ///
14 | /// The list of topics to get metadata for.
15 | ///
16 | public List Topics { get; set; }
17 |
18 | public KafkaDataPayload Encode()
19 | {
20 | return EncodeMetadataRequest(this);
21 | }
22 |
23 | public IEnumerable Decode(byte[] payload)
24 | {
25 | return new[] { DecodeMetadataResponse(payload) };
26 | }
27 |
28 | ///
29 | /// Encode a request for metadata about topic and broker information.
30 | ///
31 | /// The MetaDataRequest to encode.
32 | /// Encoded byte[] representing the request.
33 | /// Format: (PayloadSize), Header, ix(hs)
34 | private KafkaDataPayload EncodeMetadataRequest(MetadataRequest request)
35 | {
36 | if (request.Topics == null) request.Topics = new List();
37 |
38 | using (var message = EncodeHeader(request)
39 | .Pack(request.Topics.Count)
40 | .Pack(request.Topics, StringPrefixEncoding.Int16))
41 | {
42 | return new KafkaDataPayload
43 | {
44 | Buffer = message.Payload(),
45 | CorrelationId = request.CorrelationId,
46 | ApiKey = ApiKey
47 | };
48 | }
49 | }
50 |
51 | ///
52 | /// Decode the metadata response from kafka server.
53 | ///
54 | ///
55 | ///
56 | private MetadataResponse DecodeMetadataResponse(byte[] data)
57 | {
58 | using (var stream = new BigEndianBinaryReader(data))
59 | {
60 | var response = new MetadataResponse();
61 | response.CorrelationId = stream.ReadInt32();
62 |
63 | var brokerCount = stream.ReadInt32();
64 | for (var i = 0; i < brokerCount; i++)
65 | {
66 | response.Brokers.Add(Broker.FromStream(stream));
67 | }
68 |
69 | var topicCount = stream.ReadInt32();
70 | for (var i = 0; i < topicCount; i++)
71 | {
72 | response.Topics.Add(Topic.FromStream(stream));
73 | }
74 |
75 | return response;
76 | }
77 | }
78 | }
79 |
80 | public class MetadataResponse
81 | {
82 | public int CorrelationId { get; set; }
83 |
84 | public MetadataResponse()
85 | {
86 | Brokers = new List();
87 | Topics = new List();
88 | }
89 |
90 | public List Brokers { get; set; }
91 | public List Topics { get; set; }
92 | }
93 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/OffsetCommitRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | ///
9 | /// Class that represents the api call to commit a specific set of offsets for a given topic. The offset is saved under the
10 | /// arbitrary ConsumerGroup name provided by the call.
11 | ///
12 | public class OffsetCommitRequest : BaseRequest, IKafkaRequest
13 | {
14 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.OffsetCommit; } }
15 | public string ConsumerGroup { get; set; }
16 | public List OffsetCommits { get; set; }
17 |
18 | public KafkaDataPayload Encode()
19 | {
20 | return EncodeOffsetCommitRequest(this);
21 | }
22 |
23 | public IEnumerable Decode(byte[] payload)
24 | {
25 | return DecodeOffsetCommitResponse(payload);
26 | }
27 |
28 | private KafkaDataPayload EncodeOffsetCommitRequest(OffsetCommitRequest request)
29 | {
30 | if (request.OffsetCommits == null) request.OffsetCommits = new List();
31 |
32 | using (var message = EncodeHeader(request).Pack(request.ConsumerGroup, StringPrefixEncoding.Int16))
33 | {
34 | var topicGroups = request.OffsetCommits.GroupBy(x => x.Topic).ToList();
35 | message.Pack(topicGroups.Count);
36 |
37 | foreach (var topicGroup in topicGroups)
38 | {
39 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
40 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
41 | .Pack(partitions.Count);
42 |
43 | foreach (var partition in partitions)
44 | {
45 | foreach (var commit in partition)
46 | {
47 | message.Pack(partition.Key)
48 | .Pack(commit.Offset)
49 | .Pack(commit.TimeStamp)
50 | .Pack(commit.Metadata, StringPrefixEncoding.Int16);
51 | }
52 | }
53 | }
54 |
55 | return new KafkaDataPayload
56 | {
57 | Buffer = message.Payload(),
58 | CorrelationId = request.CorrelationId,
59 | ApiKey = ApiKey
60 | };
61 | }
62 | }
63 |
64 | private IEnumerable DecodeOffsetCommitResponse(byte[] data)
65 | {
66 | using (var stream = new BigEndianBinaryReader(data))
67 | {
68 | var correlationId = stream.ReadInt32();
69 |
70 | var topicCount = stream.ReadInt32();
71 | for (int i = 0; i < topicCount; i++)
72 | {
73 | var topic = stream.ReadInt16String();
74 |
75 | var partitionCount = stream.ReadInt32();
76 | for (int j = 0; j < partitionCount; j++)
77 | {
78 | var response = new OffsetCommitResponse()
79 | {
80 | Topic = topic,
81 | PartitionId = stream.ReadInt32(),
82 | Error = stream.ReadInt16()
83 | };
84 |
85 | yield return response;
86 | }
87 | }
88 | }
89 | }
90 | }
91 |
92 | public class OffsetCommit
93 | {
94 | ///
95 | /// The topic the offset came from.
96 | ///
97 | public string Topic { get; set; }
98 |
99 | ///
100 | /// The partition the offset came from.
101 | ///
102 | public int PartitionId { get; set; }
103 |
104 | ///
105 | /// The offset number to commit as completed.
106 | ///
107 | public long Offset { get; set; }
108 |
109 | ///
110 | /// If the time stamp field is set to -1, then the broker sets the time stamp to the receive time before committing the offset.
111 | ///
112 | public long TimeStamp { get; set; }
113 |
114 | ///
115 | /// Descriptive metadata about this commit.
116 | ///
117 | public string Metadata { get; set; }
118 |
119 | public OffsetCommit()
120 | {
121 | TimeStamp = -1;
122 | }
123 | }
124 |
125 | public class OffsetCommitResponse : IBaseResponse
126 | {
127 | ///
128 | /// The name of the topic this response entry is for.
129 | ///
130 | public string Topic { get; set; }
131 |
132 | ///
133 | /// The id of the partition this response is for.
134 | ///
135 | public Int32 PartitionId { get; set; }
136 |
137 | ///
138 | /// Error code of exception that occured during the request. Zero if no error.
139 | ///
140 | public Int16 Error { get; set; }
141 | }
142 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/OffsetFetchRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | ///
9 | /// Class that represents both the request and the response from a kafka server of requesting a stored offset value
10 | /// for a given consumer group. Essentially this part of the api allows a user to save/load a given offset position
11 | /// under any abritrary name.
12 | ///
13 | public class OffsetFetchRequest : BaseRequest, IKafkaRequest
14 | {
15 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.OffsetFetch; } }
16 | public string ConsumerGroup { get; set; }
17 | public List Topics { get; set; }
18 |
19 | public KafkaDataPayload Encode()
20 | {
21 | return EncodeOffsetFetchRequest(this);
22 | }
23 |
24 | protected KafkaDataPayload EncodeOffsetFetchRequest(OffsetFetchRequest request)
25 | {
26 | if (request.Topics == null) request.Topics = new List();
27 |
28 | using (var message = EncodeHeader(request))
29 | {
30 | var topicGroups = request.Topics.GroupBy(x => x.Topic).ToList();
31 |
32 | message.Pack(ConsumerGroup, StringPrefixEncoding.Int16)
33 | .Pack(topicGroups.Count);
34 |
35 | foreach (var topicGroup in topicGroups)
36 | {
37 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
38 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
39 | .Pack(partitions.Count);
40 |
41 | foreach (var partition in partitions)
42 | {
43 | foreach (var offset in partition)
44 | {
45 | message.Pack(offset.PartitionId);
46 | }
47 | }
48 | }
49 |
50 | return new KafkaDataPayload
51 | {
52 | Buffer = message.Payload(),
53 | CorrelationId = request.CorrelationId,
54 | ApiKey = ApiKey
55 | };
56 | }
57 | }
58 |
59 | public IEnumerable Decode(byte[] payload)
60 | {
61 | return DecodeOffsetFetchResponse(payload);
62 | }
63 |
64 | protected IEnumerable DecodeOffsetFetchResponse(byte[] data)
65 | {
66 | using (var stream = new BigEndianBinaryReader(data))
67 | {
68 | var correlationId = stream.ReadInt32();
69 |
70 | var topicCount = stream.ReadInt32();
71 | for (int i = 0; i < topicCount; i++)
72 | {
73 | var topic = stream.ReadInt16String();
74 |
75 | var partitionCount = stream.ReadInt32();
76 | for (int j = 0; j < partitionCount; j++)
77 | {
78 | var response = new OffsetFetchResponse()
79 | {
80 | Topic = topic,
81 | PartitionId = stream.ReadInt32(),
82 | Offset = stream.ReadInt64(),
83 | MetaData = stream.ReadInt16String(),
84 | Error = stream.ReadInt16()
85 | };
86 | yield return response;
87 | }
88 | }
89 | }
90 | }
91 | }
92 |
93 | public class OffsetFetch
94 | {
95 | ///
96 | /// The topic the offset came from.
97 | ///
98 | public string Topic { get; set; }
99 |
100 | ///
101 | /// The partition the offset came from.
102 | ///
103 | public int PartitionId { get; set; }
104 | }
105 |
106 | public class OffsetFetchResponse : IBaseResponse
107 | {
108 | ///
109 | /// The name of the topic this response entry is for.
110 | ///
111 | public string Topic { get; set; }
112 |
113 | ///
114 | /// The id of the partition this response is for.
115 | ///
116 | public Int32 PartitionId { get; set; }
117 |
118 | ///
119 | /// The offset position saved to the server.
120 | ///
121 | public Int64 Offset { get; set; }
122 |
123 | ///
124 | /// Any arbitrary metadata stored during a CommitRequest.
125 | ///
126 | public string MetaData { get; set; }
127 |
128 | ///
129 | /// Error code of exception that occured during the request. Zero if no error.
130 | ///
131 | public Int16 Error { get; set; }
132 |
133 | public override string ToString()
134 | {
135 | return string.Format("[OffsetFetchResponse TopicName={0}, PartitionID={1}, Offset={2}, MetaData={3}, ErrorCode={4}]", Topic, PartitionId, Offset, MetaData, Error);
136 | }
137 | }
138 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/OffsetRequest.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | ///
9 | /// A funky Protocol for requesting the starting offset of each segment for the requested partition
10 | ///
11 | public class OffsetRequest : BaseRequest, IKafkaRequest
12 | {
13 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.Offset; } }
14 | public List Offsets { get; set; }
15 |
16 | public KafkaDataPayload Encode()
17 | {
18 | return EncodeOffsetRequest(this);
19 | }
20 |
21 | public IEnumerable Decode(byte[] payload)
22 | {
23 | return DecodeOffsetResponse(payload);
24 | }
25 |
26 | private KafkaDataPayload EncodeOffsetRequest(OffsetRequest request)
27 | {
28 | if (request.Offsets == null) request.Offsets = new List();
29 | using (var message = EncodeHeader(request))
30 | {
31 | var topicGroups = request.Offsets.GroupBy(x => x.Topic).ToList();
32 | message.Pack(ReplicaId)
33 | .Pack(topicGroups.Count);
34 |
35 | foreach (var topicGroup in topicGroups)
36 | {
37 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
38 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
39 | .Pack(partitions.Count);
40 |
41 | foreach (var partition in partitions)
42 | {
43 | foreach (var offset in partition)
44 | {
45 | message.Pack(partition.Key)
46 | .Pack(offset.Time)
47 | .Pack(offset.MaxOffsets);
48 | }
49 | }
50 | }
51 |
52 | return new KafkaDataPayload
53 | {
54 | Buffer = message.Payload(),
55 | CorrelationId = request.CorrelationId,
56 | ApiKey = ApiKey
57 | };
58 | }
59 | }
60 |
61 | private IEnumerable DecodeOffsetResponse(byte[] data)
62 | {
63 | using (var stream = new BigEndianBinaryReader(data))
64 | {
65 | var correlationId = stream.ReadInt32();
66 |
67 | var topicCount = stream.ReadInt32();
68 | for (int i = 0; i < topicCount; i++)
69 | {
70 | var topic = stream.ReadInt16String();
71 |
72 | var partitionCount = stream.ReadInt32();
73 | for (int j = 0; j < partitionCount; j++)
74 | {
75 | var response = new OffsetResponse()
76 | {
77 | Topic = topic,
78 | PartitionId = stream.ReadInt32(),
79 | Error = stream.ReadInt16(),
80 | Offsets = new List()
81 | };
82 | var offsetCount = stream.ReadInt32();
83 | for (int k = 0; k < offsetCount; k++)
84 | {
85 | response.Offsets.Add(stream.ReadInt64());
86 | }
87 |
88 | yield return response;
89 | }
90 | }
91 | }
92 | }
93 | }
94 |
95 | public class Offset
96 | {
97 | public Offset()
98 | {
99 | Time = -1;
100 | MaxOffsets = 1;
101 | }
102 |
103 | public string Topic { get; set; }
104 | public int PartitionId { get; set; }
105 |
106 | ///
107 | /// Used to ask for all messages before a certain time (ms). There are two special values.
108 | /// Specify -1 to receive the latest offsets and -2 to receive the earliest available offset.
109 | /// Note that because offsets are pulled in descending order, asking for the earliest offset will always return you a single element.
110 | ///
111 | public long Time { get; set; }
112 |
113 | public int MaxOffsets { get; set; }
114 | }
115 |
116 | public class OffsetResponse : IBaseResponse
117 | {
118 | public string Topic { get; set; }
119 | public int PartitionId { get; set; }
120 | public Int16 Error { get; set; }
121 | public List Offsets { get; set; }
122 | }
123 |
124 | public class OffsetPosition
125 | {
126 | public OffsetPosition()
127 | {
128 | }
129 |
130 | public OffsetPosition(int partitionId, long offset)
131 | {
132 | PartitionId = partitionId;
133 | Offset = offset;
134 | }
135 |
136 | public int PartitionId { get; set; }
137 | public long Offset { get; set; }
138 |
139 | public override string ToString()
140 | {
141 | return string.Format("PartitionId:{0}, Offset:{1}", PartitionId, Offset);
142 | }
143 |
144 | public override bool Equals(object obj)
145 | {
146 | if (ReferenceEquals(null, obj)) return false;
147 | if (ReferenceEquals(this, obj)) return true;
148 | if (obj.GetType() != this.GetType()) return false;
149 | return Equals((OffsetPosition)obj);
150 | }
151 |
152 | protected bool Equals(OffsetPosition other)
153 | {
154 | return PartitionId == other.PartitionId && Offset == other.Offset;
155 | }
156 |
157 | public override int GetHashCode()
158 | {
159 | unchecked
160 | {
161 | return (PartitionId * 397) ^ Offset.GetHashCode();
162 | }
163 | }
164 | }
165 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/Protocol/Topic.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Common;
2 | using System;
3 | using System.Collections.Generic;
4 |
5 | namespace KafkaNet.Protocol
6 | {
7 | public class Topic
8 | {
9 | public Int16 ErrorCode { get; set; }
10 | public string Name { get; set; }
11 | public List Partitions { get; set; }
12 |
13 | public static Topic FromStream(BigEndianBinaryReader stream)
14 | {
15 | var topic = new Topic
16 | {
17 | ErrorCode = stream.ReadInt16(),
18 | Name = stream.ReadInt16String(),
19 | Partitions = new List()
20 | };
21 |
22 | var numPartitions = stream.ReadInt32();
23 | for (int i = 0; i < numPartitions; i++)
24 | {
25 | topic.Partitions.Add(Partition.FromStream(stream));
26 | }
27 |
28 | return topic;
29 | }
30 | }
31 |
32 | public class Partition
33 | {
34 | ///
35 | /// Error code. 0 indicates no error occured.
36 | ///
37 | public Int16 ErrorCode { get; set; }
38 |
39 | ///
40 | /// The Id of the partition that this metadata describes.
41 | ///
42 | public int PartitionId { get; set; }
43 |
44 | ///
45 | /// The node id for the kafka broker currently acting as leader for this partition. If no leader exists because we are in the middle of a leader election this id will be -1.
46 | ///
47 | public int LeaderId { get; set; }
48 |
49 | ///
50 | /// The set of alive nodes that currently acts as slaves for the leader for this partition.
51 | ///
52 | public List Replicas { get; set; }
53 |
54 | ///
55 | /// The set subset of the replicas that are "caught up" to the leader
56 | ///
57 | public List Isrs { get; set; }
58 |
59 | public static Partition FromStream(BigEndianBinaryReader stream)
60 | {
61 | var partition = new Partition
62 | {
63 | ErrorCode = stream.ReadInt16(),
64 | PartitionId = stream.ReadInt32(),
65 | LeaderId = stream.ReadInt32(),
66 | Replicas = new List(),
67 | Isrs = new List()
68 | };
69 |
70 | var numReplicas = stream.ReadInt32();
71 | for (int i = 0; i < numReplicas; i++)
72 | {
73 | partition.Replicas.Add(stream.ReadInt32());
74 | }
75 |
76 | var numIsr = stream.ReadInt32();
77 | for (int i = 0; i < numIsr; i++)
78 | {
79 | partition.Isrs.Add(stream.ReadInt32());
80 | }
81 |
82 | return partition;
83 | }
84 |
85 | protected bool Equals(Partition other)
86 | {
87 | return PartitionId == other.PartitionId;
88 | }
89 |
90 | public override int GetHashCode()
91 | {
92 | return PartitionId;
93 | }
94 |
95 | public override bool Equals(object obj)
96 | {
97 | if (ReferenceEquals(null, obj)) return false;
98 | if (ReferenceEquals(this, obj)) return true;
99 | if (obj.GetType() != this.GetType()) return false;
100 | return Equals((Partition)obj);
101 | }
102 | }
103 | }
--------------------------------------------------------------------------------
/src/KafkaNetClient/ProtocolGateway.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using KafkaNet.Protocol;
3 | using System;
4 | using System.Linq;
5 | using System.Runtime.ExceptionServices;
6 | using System.Threading.Tasks;
7 |
8 | namespace KafkaNet
9 | {
10 | public class ProtocolGateway : IDisposable
11 | {
12 | private readonly IBrokerRouter _brokerRouter;
13 |
14 | //Add Loger
15 | public ProtocolGateway(params Uri[] brokerUrl)
16 | {
17 | var kafkaOptions = new KafkaOptions(brokerUrl) { MaximumReconnectionTimeout = TimeSpan.FromSeconds(60), ResponseTimeoutMs = TimeSpan.FromSeconds(60) };
18 | _brokerRouter = new BrokerRouter(kafkaOptions);
19 | }
20 |
21 | public ProtocolGateway(IBrokerRouter brokerRouter)
22 | {
23 | _brokerRouter = brokerRouter;
24 | }
25 |
26 | public ProtocolGateway(KafkaOptions kafkaOptions)
27 | {
28 | _brokerRouter = new BrokerRouter(kafkaOptions);
29 | }
30 |
31 | private readonly int _maxRetry = 3;
32 |
33 | /// Thrown if the returned metadata for the given topic is invalid or missing
34 | /// Thrown if the give partitionId does not exist for the given topic.
35 | /// Thrown if none of the default brokers can be contacted
36 | /// Thrown if there request times out
37 | /// Thrown in case of network error contacting broker (after retries)
38 | /// Thrown in case of an unexpected error in the request
39 | /// Thrown in case the topic name is invalid
40 | public async Task SendProtocolRequest(IKafkaRequest request, string topic, int partition) where T : class, IBaseResponse
41 | {
42 | ValidateTopic(topic);
43 | T response = null;
44 | int retryTime = 0;
45 | while (retryTime < _maxRetry)
46 | {
47 | bool needToRefreshTopicMetadata = false;
48 | ExceptionDispatchInfo exceptionInfo = null;
49 | string errorDetails = "";
50 |
51 | try
52 | {
53 | await _brokerRouter.RefreshMissingTopicMetadata(topic);
54 |
55 | //find route it can chage after Metadata Refresh
56 | var route = _brokerRouter.SelectBrokerRouteFromLocalCache(topic, partition);
57 | var responses = await route.Connection.SendAsync(request).ConfigureAwait(false);
58 | response = responses.FirstOrDefault();
59 |
60 | //this can happened if you send ProduceRequest with ack level=0
61 | if (response == null)
62 | {
63 | return null;
64 | }
65 |
66 | var error = (ErrorResponseCode) response.Error;
67 | if (error == ErrorResponseCode.NoError)
68 | {
69 | return response;
70 | }
71 |
72 | //It means we had an error
73 | errorDetails = error.ToString();
74 | needToRefreshTopicMetadata = CanRecoverByRefreshMetadata(error);
75 | }
76 | catch (ResponseTimeoutException ex)
77 | {
78 | exceptionInfo = ExceptionDispatchInfo.Capture(ex);
79 | }
80 | catch (BrokerConnectionException ex)
81 | {
82 | exceptionInfo = ExceptionDispatchInfo.Capture(ex);
83 | }
84 | catch (NoLeaderElectedForPartition ex)
85 | {
86 | exceptionInfo = ExceptionDispatchInfo.Capture(ex);
87 | }
88 | catch (LeaderNotFoundException ex)//the numbar partition of can be change
89 | {
90 | exceptionInfo = ExceptionDispatchInfo.Capture(ex);
91 | }
92 |
93 | if (exceptionInfo != null)
94 | {
95 | needToRefreshTopicMetadata = true;
96 | errorDetails = exceptionInfo.SourceException.GetType().Name;
97 | }
98 |
99 | retryTime++;
100 | bool hasMoreRetry = retryTime < _maxRetry;
101 |
102 | _brokerRouter.Log.WarnFormat("ProtocolGateway error sending request, retrying (attempt number {0}): {1}", retryTime, errorDetails);
103 | if (needToRefreshTopicMetadata && hasMoreRetry)
104 | {
105 | await _brokerRouter.RefreshTopicMetadata(topic).ConfigureAwait(false);
106 | }
107 | else
108 | {
109 | _brokerRouter.Log.ErrorFormat("ProtocolGateway sending request failed");
110 |
111 | // If an exception was thrown, we want to propagate it
112 | if (exceptionInfo != null)
113 | {
114 | exceptionInfo.Throw();
115 | }
116 |
117 | // Otherwise, the error was from Kafka, throwing application exception
118 | throw new KafkaApplicationException("FetchResponse received an error from Kafka: {0}", errorDetails) { ErrorCode = response.Error };
119 | }
120 | }
121 |
122 | return response;
123 | }
124 |
125 | private static bool CanRecoverByRefreshMetadata(ErrorResponseCode error)
126 | {
127 | return error == ErrorResponseCode.BrokerNotAvailable ||
128 | error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode ||
129 | error == ErrorResponseCode.LeaderNotAvailable ||
130 | error == ErrorResponseCode.NotLeaderForPartition;
131 | }
132 |
133 | public void Dispose()
134 | {
135 | _brokerRouter.Dispose();
136 | }
137 |
138 | private void ValidateTopic(string topic)
139 | {
140 | if (topic.Contains(" "))
141 | {
142 | throw new FormatException("topic name is invalid");
143 | }
144 | }
145 | }
146 | }
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/Configuration.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Configuration;
3 | using System.Linq;
4 |
5 | namespace StatisticsTestLoader
6 | {
7 | public class Configuration
8 | {
9 | public Uri[] KafkaUrl
10 | {
11 | get
12 | {
13 | return ConfigurationManager.AppSettings["KafkaUrl"].Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries)
14 | .Select(x => new Uri(x)).ToArray();
15 | }
16 | }
17 | public Uri[] PropertyCacheUrl
18 | {
19 | get
20 | {
21 | return ConfigurationManager.AppSettings["PropertyCacheUrl"].Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries)
22 | .Select(x => new Uri(x))
23 | .ToArray();
24 | }
25 | }
26 | public string CacheUsername {
27 | get { return ConfigurationManager.AppSettings["CacheUsername"]; }
28 | }
29 |
30 | public string CachePassword {
31 | get { return ConfigurationManager.AppSettings["CachePassword"]; }
32 | }
33 | }
34 | }
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/ConsoleLogger.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using KafkaNet;
3 |
4 | namespace StatisticsTestLoader
5 | {
6 | public class ConsoleLogger : IKafkaLog
7 | {
8 | public void DebugFormat(string format, params object[] args)
9 | {
10 | //Console.WriteLine(format, args);
11 | }
12 |
13 | public void InfoFormat(string format, params object[] args)
14 | {
15 | //Console.WriteLine(format, args);
16 | }
17 |
18 | public void WarnFormat(string format, params object[] args)
19 | {
20 | Console.WriteLine(format, args);
21 | }
22 |
23 | public void ErrorFormat(string format, params object[] args)
24 | {
25 | Console.WriteLine(format, args);
26 | }
27 |
28 | public void FatalFormat(string format, params object[] args)
29 | {
30 | Console.WriteLine(format, args);
31 | }
32 | }
33 | }
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/IRecordSource.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 |
3 | namespace StatisticsTestLoader
4 | {
5 | public interface IRecordSource
6 | {
7 | string Topic { get; }
8 | IEnumerable Poll(long index);
9 | int QueueCount { get; }
10 | }
11 | }
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/KafkaRecord.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 |
3 | namespace StatisticsTestLoader
4 | {
5 | public class KafkaRecord
6 | {
7 | public string Key { get; set; }
8 | public string Topic { get; set; }
9 | public long Offset { get; set; }
10 | public Message Message { get; private set; }
11 |
12 | public void AddDocument(string document)
13 | {
14 | Message = new Message(Key, document);
15 | }
16 | }
17 | }
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Linq;
3 | using System.Threading;
4 | using System.Threading.Tasks;
5 | using KafkaNet.Common;
6 |
7 | namespace StatisticsTestLoader
8 | {
9 | ///
10 | /// Note: This project is for testing large documents being pushed and compressed to a kafka server.
11 | /// This is not currently a generic loader program and requires a certain infrastructure to run. It
12 | /// was created as a quick prototype to test out statistic tracking of long running, large data flow.
13 | ///
14 | class Program
15 | {
16 |
17 | private static DestinationKafka _kafka;
18 | private static SourcePropertyChanges _propertySource;
19 | private static bool _interrupted;
20 |
21 | static void Main(string[] args)
22 | {
23 | var configuration = new Configuration();
24 |
25 | _kafka = new DestinationKafka(configuration.KafkaUrl);
26 |
27 | _propertySource = new SourcePropertyChanges(configuration.CacheUsername, configuration.CachePassword,
28 | configuration.PropertyCacheUrl);
29 |
30 | Task.Run(() => StartPolling(_propertySource, _kafka));
31 |
32 | Console.WriteLine("Running...");
33 | Console.ReadLine();
34 | _interrupted = true;
35 | Console.WriteLine("Quitting...");
36 | }
37 |
38 | private static void StartPolling(IRecordSource source, DestinationKafka kafka)
39 | {
40 | while (_interrupted == false)
41 | {
42 | try
43 | {
44 | var index = kafka.GetStoredOffset(source.Topic);
45 | foreach (var batchEnumerable in source.Poll(index).Batch(500))
46 | {
47 | var batch = batchEnumerable.ToList();
48 |
49 | kafka.PostBatchAsync(batch);
50 | }
51 |
52 | Thread.Sleep(TimeSpan.FromMinutes(1));
53 | }
54 | catch (Exception ex)
55 | {
56 | Console.WriteLine("Exception occured trying to post to topic:{0}. Expcetion:{1}", source.Topic, ex);
57 | }
58 | }
59 | }
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.CompilerServices;
3 | using System.Runtime.InteropServices;
4 |
5 | // General Information about an assembly is controlled through the following
6 | // set of attributes. Change these attribute values to modify the information
7 | // associated with an assembly.
8 | [assembly: AssemblyTitle("StatisticsTestLoader")]
9 | [assembly: AssemblyDescription("")]
10 | [assembly: AssemblyConfiguration("")]
11 | [assembly: AssemblyCompany("")]
12 | [assembly: AssemblyProduct("StatisticsTestLoader")]
13 | [assembly: AssemblyCopyright("Copyright © 2015")]
14 | [assembly: AssemblyTrademark("")]
15 | [assembly: AssemblyCulture("")]
16 |
17 | // Setting ComVisible to false makes the types in this assembly not visible
18 | // to COM components. If you need to access a type in this assembly from
19 | // COM, set the ComVisible attribute to true on that type.
20 | [assembly: ComVisible(false)]
21 |
22 | // The following GUID is for the ID of the typelib if this project is exposed to COM
23 | [assembly: Guid("8dd28590-dd14-465d-a029-d6a7223ce45e")]
24 |
25 | // Version information for an assembly consists of the following four values:
26 | //
27 | // Major Version
28 | // Minor Version
29 | // Build Number
30 | // Revision
31 | //
32 | // You can specify all the values or you can default the Build and Revision Numbers
33 | // by using the '*' as shown below:
34 | // [assembly: AssemblyVersion("1.0.*")]
35 | [assembly: AssemblyVersion("1.0.0.0")]
36 | [assembly: AssemblyFileVersion("1.0.0.0")]
37 |
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/SourcePropertyChanges.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Net.Http;
6 | using System.Threading;
7 | using System.Threading.Tasks;
8 | using Couchbase;
9 | using Couchbase.Configuration;
10 | using KafkaNet.Common;
11 |
12 | namespace StatisticsTestLoader
13 | {
14 | public class SourcePropertyChanges : IRecordSource
15 | {
16 | private const string ApiTopic = "pcs_encoding_updates";
17 |
18 | private readonly CouchbaseClient _couch;
19 | private BlockingCollection _dataQueue;
20 |
21 | public SourcePropertyChanges(string username, string password, params Uri[] couchbaseServers)
22 | {
23 | var config = new CouchbaseClientConfiguration
24 | {
25 | Bucket = username,
26 | BucketPassword = password,
27 | };
28 |
29 | Array.ForEach(couchbaseServers, uri => config.Urls.Add(uri));
30 | _couch = new CouchbaseClient(config);
31 | _couch.NodeFailed += node => Console.WriteLine(node.ToString());
32 | }
33 |
34 | public string Topic { get { return ApiTopic; } }
35 | public IEnumerable Poll(long index)
36 | {
37 | return PollForChanges(index);
38 | }
39 |
40 | public int QueueCount { get { return _dataQueue.Count; } }
41 |
42 | private IEnumerable PollForChanges(long index)
43 | {
44 | if (index <= 0) index = DateTime.UtcNow.AddYears(-1).Ticks;
45 |
46 | _dataQueue = new BlockingCollection(100000);
47 |
48 | Task.Factory.StartNew(() => PopulateData(index, _dataQueue), CancellationToken.None,
49 | TaskCreationOptions.LongRunning, TaskScheduler.Default);
50 |
51 | return _dataQueue.GetConsumingEnumerable();
52 | }
53 |
54 | private void PopulateData(long index, BlockingCollection data)
55 | {
56 | try
57 | {
58 | //load the large documet set from couchbase
59 | Console.WriteLine("Polling for couchbase changes...");
60 | var changes = _couch.GetView("Kafka", "by_versiontick", false)
61 | .StartKey(index)
62 | .Select(x => new KafkaRecord
63 | {
64 | Key = x.ItemId,
65 | Offset = (long)x.ViewKey[0],
66 | Topic = ApiTopic,
67 | });
68 |
69 | //as fast as we can, pull the documents from CB and push to our output collection
70 | Parallel.ForEach(changes.Batch(100), new ParallelOptions { MaxDegreeOfParallelism = 20 },
71 | (batch) =>
72 | {
73 | var temp = batch.ToList();
74 | var records = _couch.Get(temp.Select(x => x.Key));
75 |
76 | foreach (var change in temp)
77 | {
78 | if (records.ContainsKey(change.Key))
79 | {
80 | change.AddDocument(records[change.Key].ToString());
81 | data.Add(change);
82 | }
83 | }
84 | });
85 |
86 | }
87 | catch (Exception ex)
88 | {
89 | Console.WriteLine("Failed to populate _dataQueue: {0}", ex);
90 | }
91 | finally
92 | {
93 | data.CompleteAdding();
94 | }
95 | }
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/StatisticsTestLoader.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | AnyCPU
7 | {02E1B2E3-E1B2-4E68-9BD9-95F266302C75}
8 | Exe
9 | Properties
10 | StatisticsTestLoader
11 | StatisticsTestLoader
12 | v4.5
13 | 512
14 | ..\
15 | true
16 |
17 |
18 | AnyCPU
19 | true
20 | full
21 | false
22 | bin\Debug\
23 | DEBUG;TRACE
24 | prompt
25 | 4
26 |
27 |
28 | AnyCPU
29 | pdbonly
30 | true
31 | bin\Release\
32 | TRACE
33 | prompt
34 | 4
35 |
36 |
37 |
38 | ..\packages\CouchbaseNetClient.1.3.9\lib\net40\Couchbase.dll
39 |
40 |
41 | False
42 | ..\packages\CouchbaseNetClient.1.3.9\lib\net40\Enyim.Caching.dll
43 |
44 |
45 | ..\packages\Newtonsoft.Json.6.0.8\lib\net45\Newtonsoft.Json.dll
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 | {1343eb68-55cb-4452-8386-24a9989de1c0}
74 | kafka-net
75 |
76 |
77 |
78 |
79 |
80 |
81 | This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
82 |
83 |
84 |
85 |
92 |
--------------------------------------------------------------------------------
/src/StatisticsTestLoader/packages.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/src/TestHarness/App.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/TestHarness/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Linq;
3 | using System.Threading.Tasks;
4 | using KafkaNet;
5 | using KafkaNet.Common;
6 | using KafkaNet.Model;
7 | using KafkaNet.Protocol;
8 |
9 | namespace TestHarness
10 | {
11 | class Program
12 | {
13 | static void Main(string[] args)
14 | {
15 | const string topicName = "TestHarness";
16 |
17 | //create an options file that sets up driver preferences
18 | var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092"))
19 | {
20 | Log = new DefaultTraceLog()
21 | };
22 |
23 | //start an out of process thread that runs a consumer that will write all received messages to the console
24 | Task.Run(() =>
25 | {
26 | var consumer = new Consumer(new ConsumerOptions(topicName, new BrokerRouter(options)) { Log = new DefaultTraceLog() });
27 | foreach (var data in consumer.Consume())
28 | {
29 | Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String());
30 | }
31 | });
32 |
33 | //create a producer to send messages with
34 | var producer = new Producer(new BrokerRouter(options))
35 | {
36 | BatchSize = 100,
37 | BatchDelayTime = TimeSpan.FromMilliseconds(2000)
38 | };
39 |
40 |
41 | //take in console read messages
42 | Console.WriteLine("Type a message and press enter...");
43 | while (true)
44 | {
45 | var message = Console.ReadLine();
46 | if (message == "quit") break;
47 |
48 | if (string.IsNullOrEmpty(message))
49 | {
50 | //send a random batch of messages
51 | SendRandomBatch(producer, topicName, 200);
52 | }
53 | else
54 | {
55 | producer.SendMessageAsync(topicName, new[] { new Message(message) });
56 | }
57 | }
58 |
59 | using (producer)
60 | {
61 |
62 | }
63 | }
64 |
65 | private static async void SendRandomBatch(Producer producer, string topicName, int count)
66 | {
67 | //send multiple messages
68 | var sendTask = producer.SendMessageAsync(topicName, Enumerable.Range(0, count).Select(x => new Message(x.ToString())));
69 |
70 | Console.WriteLine("Posted #{0} messages. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount);
71 |
72 | var response = await sendTask;
73 |
74 | Console.WriteLine("Completed send of batch: {0}. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount);
75 | foreach (var result in response.OrderBy(x => x.PartitionId))
76 | {
77 | Console.WriteLine("Topic:{0} PartitionId:{1} Offset:{2}", result.Topic, result.PartitionId, result.Offset);
78 | }
79 |
80 | }
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/src/TestHarness/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.CompilerServices;
3 | using System.Runtime.InteropServices;
4 |
5 | // General Information about an assembly is controlled through the following
6 | // set of attributes. Change these attribute values to modify the information
7 | // associated with an assembly.
8 | [assembly: AssemblyTitle("TestHarness")]
9 | [assembly: AssemblyDescription("")]
10 | [assembly: AssemblyConfiguration("")]
11 | [assembly: AssemblyCompany("")]
12 | [assembly: AssemblyProduct("TestHarness")]
13 | [assembly: AssemblyCopyright("Copyright © James Roland 2014")]
14 | [assembly: AssemblyTrademark("")]
15 | [assembly: AssemblyCulture("")]
16 |
17 | // Setting ComVisible to false makes the types in this assembly not visible
18 | // to COM components. If you need to access a type in this assembly from
19 | // COM, set the ComVisible attribute to true on that type.
20 | [assembly: ComVisible(false)]
21 |
22 | // The following GUID is for the ID of the typelib if this project is exposed to COM
23 | [assembly: Guid("2f0193f8-32ab-438c-bc96-2e430a23c2c7")]
24 |
25 | // Version information for an assembly consists of the following four values:
26 | //
27 | // Major Version
28 | // Minor Version
29 | // Build Number
30 | // Revision
31 | //
32 | // You can specify all the values or you can default the Build and Revision Numbers
33 | // by using the '*' as shown below:
34 | // [assembly: AssemblyVersion("1.0.*")]
35 | [assembly: AssemblyVersion("1.0.0.0")]
36 | [assembly: AssemblyFileVersion("1.0.0.0")]
37 |
--------------------------------------------------------------------------------
/src/TestHarness/TestHarness.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | AnyCPU
7 | {53E0B3CE-6C41-4C8A-8B66-9BD03667B1E0}
8 | Exe
9 | Properties
10 | TestHarness
11 | TestHarness
12 | v4.5
13 | 512
14 | ..\
15 | true
16 |
17 |
18 | AnyCPU
19 | true
20 | full
21 | false
22 | bin\Debug\
23 | DEBUG;TRACE
24 | prompt
25 | 4
26 |
27 |
28 | AnyCPU
29 | pdbonly
30 | true
31 | bin\Release\
32 | TRACE
33 | prompt
34 | 4
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | {1343eb68-55cb-4452-8386-24a9989de1c0}
55 | KafkaNetClient
56 |
57 |
58 |
59 |
60 |
61 |
62 | This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
63 |
64 |
65 |
66 |
73 |
--------------------------------------------------------------------------------
/src/TestHarness/packages.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/src/kafka-tests/App.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/kafka-tests/Fakes/FakeBrokerRouter.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Fakes;
2 | using KafkaNet;
3 | using KafkaNet.Model;
4 | using KafkaNet.Protocol;
5 | using NSubstitute;
6 | using System;
7 | using System.Collections.Generic;
8 | using System.Net;
9 | using System.Threading;
10 |
11 | namespace kafka_tests
12 | {
13 | public class FakeBrokerRouter
14 | {
15 | public const string TestTopic = "UnitTest";
16 |
17 | private int _offset0;
18 | private int _offset1;
19 | private readonly FakeKafkaConnection _fakeConn0;
20 | private readonly FakeKafkaConnection _fakeConn1;
21 | private readonly IKafkaConnectionFactory _mockKafkaConnectionFactory;
22 | public readonly TimeSpan _cacheExpiration = TimeSpan.FromMilliseconds(1);
23 | public FakeKafkaConnection BrokerConn0 { get { return _fakeConn0; } }
24 | public FakeKafkaConnection BrokerConn1 { get { return _fakeConn1; } }
25 | public IKafkaConnectionFactory KafkaConnectionMockKafkaConnectionFactory { get { return _mockKafkaConnectionFactory; } }
26 |
27 | public Func MetadataResponse = () => DefaultMetadataResponse();
28 |
29 | public IPartitionSelector PartitionSelector = new DefaultPartitionSelector();
30 |
31 | public FakeBrokerRouter()
32 | {
33 | //setup mock IKafkaConnection
34 |
35 | _fakeConn0 = new FakeKafkaConnection(new Uri("http://localhost:1"));
36 | _fakeConn0.ProduceResponseFunction = async () => new ProduceResponse { Offset = _offset0++, PartitionId = 0, Topic = TestTopic };
37 | _fakeConn0.MetadataResponseFunction = async () => MetadataResponse();
38 | _fakeConn0.OffsetResponseFunction = async () => new OffsetResponse { Offsets = new List { 0, 99 }, PartitionId = 0, Topic = TestTopic };
39 | _fakeConn0.FetchResponseFunction = async () => { Thread.Sleep(500); return null; };
40 |
41 | _fakeConn1 = new FakeKafkaConnection(new Uri("http://localhost:2"));
42 | _fakeConn1.ProduceResponseFunction = async () => new ProduceResponse { Offset = _offset1++, PartitionId = 1, Topic = TestTopic };
43 | _fakeConn1.MetadataResponseFunction = async () => MetadataResponse();
44 | _fakeConn1.OffsetResponseFunction = async () => new OffsetResponse { Offsets = new List { 0, 100 }, PartitionId = 1, Topic = TestTopic };
45 | _fakeConn1.FetchResponseFunction = async () => { Thread.Sleep(500); return null; };
46 |
47 | _mockKafkaConnectionFactory = Substitute.For();
48 | _mockKafkaConnectionFactory.Create(Arg.Is(e => e.Endpoint.Port == 1), Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()).Returns(_fakeConn0);
49 | _mockKafkaConnectionFactory.Create(Arg.Is(e => e.Endpoint.Port == 2), Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()).Returns(_fakeConn1);
50 | _mockKafkaConnectionFactory.Resolve(Arg.Any(), Arg.Any())
51 | .Returns(info => new KafkaEndpoint
52 | {
53 | Endpoint = new IPEndPoint(IPAddress.Parse("127.0.0.1"), ((Uri)info[0]).Port),
54 | ServeUri = ((Uri)info[0])
55 | });
56 | }
57 |
58 | public IBrokerRouter Create()
59 | {
60 | return new BrokerRouter(new KafkaNet.Model.KafkaOptions
61 | {
62 | KafkaServerUri = new List { new Uri("http://localhost:1"), new Uri("http://localhost:2") },
63 | KafkaConnectionFactory = _mockKafkaConnectionFactory,
64 | PartitionSelector = PartitionSelector
65 | ,
66 | CacheExpiration = _cacheExpiration
67 | });
68 | }
69 |
70 | public static MetadataResponse DefaultMetadataResponse()
71 | {
72 | return new MetadataResponse
73 | {
74 | CorrelationId = 1,
75 | Brokers = new List
76 | {
77 | new Broker
78 | {
79 | Host = "localhost",
80 | Port = 1,
81 | BrokerId = 0
82 | },
83 | new Broker
84 | {
85 | Host = "localhost",
86 | Port = 2,
87 | BrokerId = 1
88 | },
89 | },
90 | Topics = new List
91 | {
92 | new Topic
93 | {
94 | ErrorCode = 0,
95 | Name = TestTopic,
96 | Partitions = new List
97 | {
98 | new Partition
99 | {
100 | ErrorCode = 0,
101 | Isrs = new List {1},
102 | PartitionId = 0,
103 | LeaderId = 0,
104 | Replicas = new List {1},
105 | },
106 | new Partition
107 | {
108 | ErrorCode = 0,
109 | Isrs = new List {1},
110 | PartitionId = 1,
111 | LeaderId = 1,
112 | Replicas = new List {1},
113 | }
114 | }
115 | }
116 | }
117 | };
118 | }
119 | }
120 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Fakes/FakeKafkaConnection.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet;
2 | using KafkaNet.Model;
3 | using KafkaNet.Protocol;
4 | using System;
5 | using System.Collections.Generic;
6 | using System.Threading;
7 | using System.Threading.Tasks;
8 |
9 | namespace kafka_tests.Fakes
10 | {
11 | public class FakeKafkaConnection : IKafkaConnection
12 | {
13 | public Func> ProduceResponseFunction;
14 | public Func> MetadataResponseFunction;
15 | public Func> OffsetResponseFunction;
16 | public Func> FetchResponseFunction;
17 |
18 | public FakeKafkaConnection(Uri address)
19 | {
20 | Endpoint = new DefaultKafkaConnectionFactory().Resolve(address, new DefaultTraceLog());
21 | }
22 |
23 | public long MetadataRequestCallCount; // { get; set; }
24 | public long ProduceRequestCallCount; //{ get; set; }
25 | public long OffsetRequestCallCount; //{ get; set; }
26 | public long FetchRequestCallCount; // { get; set; }
27 |
28 | public KafkaEndpoint Endpoint { get; private set; }
29 |
30 | public bool ReadPolling
31 | {
32 | get { return true; }
33 | }
34 |
35 | public Task SendAsync(KafkaDataPayload payload)
36 | {
37 | throw new NotImplementedException();
38 | }
39 |
40 | /// A delegate callback throws an exception.
41 | /// The address of is a null pointer.
42 | public async Task> SendAsync(IKafkaRequest request)
43 | {
44 | T result;
45 |
46 | if (typeof(T) == typeof(ProduceResponse))
47 | {
48 | Interlocked.Increment(ref ProduceRequestCallCount);
49 | result = (T)((object)await ProduceResponseFunction());
50 | }
51 | else if (typeof(T) == typeof(MetadataResponse))
52 | {
53 | Interlocked.Increment(ref MetadataRequestCallCount);
54 | result = (T)(object)await MetadataResponseFunction();
55 | }
56 | else if (typeof(T) == typeof(OffsetResponse))
57 | {
58 | Interlocked.Increment(ref OffsetRequestCallCount);
59 | result = (T)(object)await OffsetResponseFunction();
60 | }
61 | else if (typeof(T) == typeof(FetchResponse))
62 | {
63 | Interlocked.Increment(ref FetchRequestCallCount);
64 | result = (T)(object)await FetchResponseFunction();
65 | }
66 | else
67 | {
68 | throw new Exception("no found implementation");
69 | }
70 | var resultlist = new List();
71 | resultlist.Add(result);
72 | return resultlist;
73 | }
74 |
75 | public void Dispose()
76 | {
77 | }
78 | }
79 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Fakes/FakeTcpServer.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet;
2 | using System;
3 | using System.Linq;
4 | using System.Net;
5 | using System.Net.Sockets;
6 | using System.Text;
7 | using System.Threading;
8 | using System.Threading.Tasks;
9 |
10 | namespace kafka_tests.Fakes
11 | {
12 | public class FakeTcpServer : IDisposable
13 | {
14 | public event Action OnBytesReceived;
15 |
16 | public event Action OnClientConnected;
17 |
18 | public event Action OnClientDisconnected;
19 |
20 | private IKafkaLog _log;
21 |
22 | private TcpClient _client;
23 | private readonly SemaphoreSlim _semaphoreSlim = new SemaphoreSlim(0);
24 | private readonly TcpListener _listener;
25 | private readonly CancellationTokenSource _disposeToken = new CancellationTokenSource();
26 | private TaskCompletionSource _clientConnectedTrigger = new TaskCompletionSource();
27 |
28 | private readonly Task _clientConnectionHandlerTask = null;
29 |
30 | public int ConnectionEventcount = 0;
31 | public int DisconnectionEventCount = 0;
32 | public Task HasClientConnected { get { return _clientConnectedTrigger.Task; } }
33 |
34 | public FakeTcpServer(IKafkaLog log, int port)
35 | {
36 | _log = log;
37 | _listener = new TcpListener(IPAddress.Any, port);
38 | _listener.Start();
39 |
40 | OnClientConnected += () =>
41 | {
42 | Interlocked.Increment(ref ConnectionEventcount);
43 | _clientConnectedTrigger.TrySetResult(true);
44 | };
45 |
46 | OnClientDisconnected += () =>
47 | {
48 | Interlocked.Increment(ref DisconnectionEventCount);
49 | _clientConnectedTrigger = new TaskCompletionSource();
50 | };
51 |
52 | _clientConnectionHandlerTask = StartHandlingClientRequestAsync();
53 | }
54 |
55 | public async Task SendDataAsync(byte[] data)
56 | {
57 | try
58 | {
59 | await _semaphoreSlim.WaitAsync();
60 | _log.DebugFormat("FakeTcpServer: writing {0} bytes.", data.Length);
61 | await _client.GetStream().WriteAsync(data, 0, data.Length).ConfigureAwait(false);
62 | }
63 | catch (Exception ex)
64 | {
65 | _log.ErrorFormat("error:{0} stack{1}", ex.Message, ex.StackTrace);
66 | }
67 | finally
68 | {
69 | _semaphoreSlim.Release();
70 | }
71 | }
72 |
73 | public Task SendDataAsync(string data)
74 | {
75 | var msg = Encoding.ASCII.GetBytes(data);
76 | return SendDataAsync(msg);
77 | }
78 |
79 | public void DropConnection()
80 | {
81 | if (_client != null)
82 | {
83 | using (_client)
84 | {
85 | _client.Close();
86 | }
87 |
88 | _client = null;
89 | }
90 | }
91 |
92 | private async Task StartHandlingClientRequestAsync()
93 | {
94 | while (_disposeToken.IsCancellationRequested == false)
95 | {
96 | _log.InfoFormat("FakeTcpServer: Accepting clients.");
97 | _client = await _listener.AcceptTcpClientAsync();
98 |
99 | _log.InfoFormat("FakeTcpServer: Connected client");
100 | if (OnClientConnected != null) OnClientConnected();
101 | _semaphoreSlim.Release();
102 |
103 | try
104 | {
105 | using (_client)
106 | {
107 | var buffer = new byte[4096];
108 | var stream = _client.GetStream();
109 |
110 | while (!_disposeToken.IsCancellationRequested)
111 | {
112 | //connect client
113 | var connectTask = stream.ReadAsync(buffer, 0, buffer.Length, _disposeToken.Token);
114 |
115 | var bytesReceived = await connectTask;
116 |
117 | if (bytesReceived > 0)
118 | {
119 | if (OnBytesReceived != null) OnBytesReceived(buffer.Take(bytesReceived).ToArray());
120 | }
121 | }
122 | }
123 | }
124 | catch (Exception ex)
125 | {
126 | _log.ErrorFormat("FakeTcpServer: Client exception... Exception:{0}", ex.Message);
127 | }
128 |
129 | _log.ErrorFormat("FakeTcpServer: Client Disconnected.");
130 | await _semaphoreSlim.WaitAsync(); //remove the one client
131 | if (OnClientDisconnected != null) OnClientDisconnected();
132 | }
133 | }
134 |
135 | public void Dispose()
136 | {
137 | if (_disposeToken != null) _disposeToken.Cancel();
138 |
139 | using (_disposeToken)
140 | {
141 | if (_clientConnectionHandlerTask != null)
142 | {
143 | _clientConnectionHandlerTask.Wait(TimeSpan.FromSeconds(5));
144 | }
145 |
146 | _listener.Stop();
147 | }
148 | }
149 | }
150 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Helpers/IntegrationConfig.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet;
2 | using System;
3 | using System.Configuration;
4 |
5 | namespace kafka_tests.Helpers
6 | {
7 | public static class IntegrationConfig
8 | {
9 | public static string IntegrationCompressionTopic = Environment.MachineName + "IntegrationCompressionTopic1";
10 | public static string IntegrationTopic = Environment.MachineName + "IntegrationCompressionTopic1";
11 | public static string IntegrationConsumer = Environment.MachineName + "IntegrationConsumer1";
12 | public const int NumberOfRepeat = 1;
13 |
14 | // Some of the tests measured performance.my log is too slow so i change the log level to
15 | // only critical message
16 | public static IKafkaLog NoDebugLog = new DefaultTraceLog(LogLevel.Info);
17 |
18 | public static IKafkaLog AllLog = new DefaultTraceLog();
19 |
20 | public static string Highlight(string message)
21 | {
22 | return String.Format("**************************{0}**************************", message);
23 | }
24 |
25 | public static string Highlight(string message, params object[] args)
26 | {
27 | return String.Format("**************************{0}**************************", string.Format(message, args));
28 | }
29 |
30 | public static Uri IntegrationUri
31 | {
32 | get
33 | {
34 | var url = ConfigurationManager.AppSettings["IntegrationKafkaServerUrl"];
35 | if (url == null) throw new ConfigurationErrorsException("IntegrationKafkaServerUrl must be specified in the app.config file.");
36 | return new Uri(url);
37 | }
38 | }
39 | }
40 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Helpers/TaskTest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics;
3 | using System.Threading.Tasks;
4 |
5 | namespace kafka_tests.Helpers
6 | {
7 | public static class TaskTest
8 | {
9 | /// A delegate callback throws an exception.
10 | public async static Task WaitFor(Func predicate, int milliSeconds = 3000)
11 | {
12 | var sw = Stopwatch.StartNew();
13 | while (predicate() == false)
14 | {
15 | if (sw.ElapsedMilliseconds > milliSeconds)
16 | return false;
17 | await Task.Delay(50).ConfigureAwait(false);
18 | }
19 | return true;
20 | }
21 | }
22 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/GzipProducerConsumerTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Common;
4 | using KafkaNet.Model;
5 | using KafkaNet.Protocol;
6 | using NUnit.Framework;
7 | using System;
8 | using System.Collections.Generic;
9 | using System.Linq;
10 | using System.Threading;
11 | using System.Threading.Tasks;
12 |
13 | namespace kafka_tests.Integration
14 | {
15 | [TestFixture]
16 | [Category("Integration")]
17 | public class GzipProducerConsumerTests
18 | {
19 | private readonly KafkaOptions _options = new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog };
20 |
21 | private KafkaConnection GetKafkaConnection()
22 | {
23 | var endpoint = new DefaultKafkaConnectionFactory().Resolve(_options.KafkaServerUri.First(), _options.Log);
24 | return new KafkaConnection(new KafkaTcpSocket(new DefaultTraceLog(), endpoint, 5), _options.ResponseTimeoutMs, _options.Log);
25 | }
26 |
27 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
28 | [Ignore]
29 | public async Task EnsureGzipCompressedMessageCanSend()
30 | {
31 | IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start EnsureGzipCompressedMessageCanSend"));
32 | using (var conn = GetKafkaConnection())
33 | {
34 | conn.SendAsync(new MetadataRequest
35 | {
36 | Topics = new List(new[] { IntegrationConfig.IntegrationCompressionTopic })
37 | })
38 | .Wait(TimeSpan.FromSeconds(10));
39 | }
40 |
41 | using (var router = new BrokerRouter(_options))
42 | {
43 | IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start RefreshMissingTopicMetadata"));
44 | await router.RefreshMissingTopicMetadata(IntegrationConfig.IntegrationCompressionTopic);
45 | IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end RefreshMissingTopicMetadata"));
46 | var conn = router.SelectBrokerRouteFromLocalCache(IntegrationConfig.IntegrationCompressionTopic, 0);
47 |
48 | var request = new ProduceRequest
49 | {
50 | Acks = 1,
51 | TimeoutMS = 1000,
52 | Payload = new List
53 | {
54 | new Payload
55 | {
56 | Codec = MessageCodec.CodecGzip,
57 | Topic = IntegrationConfig.IntegrationCompressionTopic,
58 | Partition = 0,
59 | Messages = new List
60 | {
61 | new Message("0", "1"),
62 | new Message("1", "1"),
63 | new Message("2", "1")
64 | }
65 | }
66 | }
67 | };
68 | IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start SendAsync"));
69 | var response = conn.Connection.SendAsync(request).Result;
70 | IntegrationConfig.NoDebugLog.InfoFormat("end SendAsync");
71 | Assert.That(response.First().Error, Is.EqualTo(0));
72 | IntegrationConfig.NoDebugLog.InfoFormat("start dispose");
73 | }
74 | IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end EnsureGzipCompressedMessageCanSend"));
75 | }
76 |
77 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
78 | public void EnsureGzipCanDecompressMessageFromKafka()
79 | {
80 | var router = new BrokerRouter(_options);
81 | var producer = new Producer(router);
82 |
83 | var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;
84 |
85 | var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List() { 0 } },
86 | offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
87 | int numberOfmessage = 3;
88 | for (int i = 0; i < numberOfmessage; i++)
89 | {
90 | producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip,
91 | partition: 0);
92 | }
93 |
94 | var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList();
95 |
96 | for (int i = 0; i < numberOfmessage; i++)
97 | {
98 | Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
99 | }
100 |
101 | using (producer)
102 | using (consumer) { }
103 | }
104 | }
105 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/KafkaConnectionIntegrationTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Model;
4 | using KafkaNet.Protocol;
5 | using NUnit.Framework;
6 | using System.Collections.Generic;
7 | using System.Linq;
8 | using System.Threading.Tasks;
9 |
10 | namespace kafka_tests.Integration
11 | {
12 | ///
13 | /// Note these integration tests require an actively running kafka server defined in the app.config file.
14 | ///
15 | [TestFixture]
16 | [Category("Integration")]
17 | public class KafkaConnectionIntegrationTests
18 | {
19 | private KafkaConnection _conn;
20 |
21 | [SetUp]
22 | public void Setup()
23 | {
24 | var options = new KafkaOptions(IntegrationConfig.IntegrationUri);
25 | var endpoint = new DefaultKafkaConnectionFactory().Resolve(options.KafkaServerUri.First(), options.Log);
26 |
27 | _conn = new KafkaConnection(new KafkaTcpSocket(new DefaultTraceLog(), endpoint, 5), options.ResponseTimeoutMs, options.Log);
28 | }
29 |
30 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
31 | public void EnsureTwoRequestsCanCallOneAfterAnother()
32 | {
33 | var result1 = _conn.SendAsync(new MetadataRequest()).Result;
34 | var result2 = _conn.SendAsync(new MetadataRequest()).Result;
35 | Assert.That(result1.Count, Is.EqualTo(1));
36 | Assert.That(result2.Count, Is.EqualTo(1));
37 | }
38 |
39 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
40 | public void EnsureAsyncRequestResponsesCorrelate()
41 | {
42 | var result1 = _conn.SendAsync(new MetadataRequest());
43 | var result2 = _conn.SendAsync(new MetadataRequest());
44 | var result3 = _conn.SendAsync(new MetadataRequest());
45 |
46 | Assert.That(result1.Result.Count, Is.EqualTo(1));
47 | Assert.That(result2.Result.Count, Is.EqualTo(1));
48 | Assert.That(result3.Result.Count, Is.EqualTo(1));
49 | }
50 |
51 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
52 | public void EnsureMultipleAsyncRequestsCanReadResponses()
53 | {
54 | var requests = new List>>();
55 | var singleResult = _conn.SendAsync(new MetadataRequest { Topics = new List(new[] { IntegrationConfig.IntegrationTopic }) }).Result;
56 | Assert.That(singleResult.Count, Is.GreaterThan(0));
57 | Assert.That(singleResult.First().Topics.Count, Is.GreaterThan(0));
58 |
59 | for (int i = 0; i < 20; i++)
60 | {
61 | requests.Add(_conn.SendAsync(new MetadataRequest()));
62 | }
63 |
64 | var results = requests.Select(x => x.Result).ToList();
65 | Assert.That(results.Count, Is.EqualTo(20));
66 | }
67 |
68 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
69 | public void EnsureDifferentTypesOfResponsesCanBeReadAsync()
70 | {
71 | //just ensure the topic exists for this test
72 | var ensureTopic = _conn.SendAsync(new MetadataRequest { Topics = new List(new[] { IntegrationConfig.IntegrationTopic }) }).Result;
73 |
74 | Assert.That(ensureTopic.Count, Is.GreaterThan(0));
75 | Assert.That(ensureTopic.First().Topics.Count, Is.EqualTo(1));
76 | Assert.That(ensureTopic.First().Topics.First().Name == IntegrationConfig.IntegrationTopic, Is.True, "ProduceRequest did not return expected topic.");
77 |
78 | var result1 = _conn.SendAsync(RequestFactory.CreateProduceRequest(IntegrationConfig.IntegrationTopic, "test"));
79 | var result2 = _conn.SendAsync(new MetadataRequest());
80 | var result3 = _conn.SendAsync(RequestFactory.CreateOffsetRequest(IntegrationConfig.IntegrationTopic));
81 | var result4 = _conn.SendAsync(RequestFactory.CreateFetchRequest(IntegrationConfig.IntegrationTopic, 0));
82 |
83 | Assert.That(result1.Result.Count, Is.EqualTo(1));
84 | Assert.That(result1.Result.First().Topic == IntegrationConfig.IntegrationTopic, Is.True, "ProduceRequest did not return expected topic.");
85 |
86 | Assert.That(result2.Result.Count, Is.GreaterThan(0));
87 | Assert.That(result2.Result.First().Topics.Any(x => x.Name == IntegrationConfig.IntegrationTopic), Is.True, "MetadataRequest did not return expected topic.");
88 |
89 | Assert.That(result3.Result.Count, Is.EqualTo(1));
90 | Assert.That(result3.Result.First().Topic == IntegrationConfig.IntegrationTopic, Is.True, "OffsetRequest did not return expected topic.");
91 |
92 | Assert.That(result4.Result.Count, Is.EqualTo(1));
93 | Assert.That(result4.Result.First().Topic == IntegrationConfig.IntegrationTopic, Is.True, "FetchRequest did not return expected topic.");
94 | }
95 | }
96 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/KafkaMetadataProviderUnitTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Model;
4 | using KafkaNet.Protocol;
5 | using NUnit.Framework;
6 | using System;
7 | using System.Linq;
8 | using System.Threading.Tasks;
9 |
10 | namespace kafka_tests.Integration
11 | {
12 | [TestFixture]
13 | [Category("Integration")]
14 | public class KafkaMetadataProviderUnitTests
15 | {
16 | private readonly KafkaOptions _options = new KafkaOptions(IntegrationConfig.IntegrationUri);
17 |
18 | private KafkaConnection GetKafkaConnection()
19 | {
20 | var endpoint = new DefaultKafkaConnectionFactory().Resolve(_options.KafkaServerUri.First(), _options.Log);
21 | return new KafkaConnection(new KafkaTcpSocket(new DefaultTraceLog(), endpoint, 5), _options.ResponseTimeoutMs, _options.Log);
22 | }
23 |
24 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
25 | [Ignore("Disable auto topic create in our server")]
26 |
27 | public async Task NewlyCreatedTopicShouldRetryUntilBrokerIsAssigned()
28 | {
29 | var expectedTopic = Guid.NewGuid().ToString();
30 | var repo = new KafkaMetadataProvider(_options.Log);
31 | var response = repo.Get(new[] { GetKafkaConnection() }, new[] { expectedTopic });
32 | var topic = (await response).Topics.FirstOrDefault();
33 |
34 | Assert.That(topic, Is.Not.Null);
35 | Assert.That(topic.Name, Is.EqualTo(expectedTopic));
36 | Assert.That(topic.ErrorCode, Is.EqualTo((int)ErrorResponseCode.NoError));
37 | }
38 | }
39 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/ManualTesting.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet;
2 | using KafkaNet.Model;
3 | using KafkaNet.Protocol;
4 | using NUnit.Framework;
5 | using System;
6 | using System.Collections.Generic;
7 | using System.Linq;
8 | using System.Threading;
9 | using System.Threading.Tasks;
10 |
11 | namespace kafka_tests.Integration
12 | {
13 | [TestFixture]
14 | internal class ManualTesting
15 | {
16 | private readonly KafkaOptions _options = new KafkaOptions(new Uri("http://S1.com:9092"), new Uri("http://S2.com:9092"), new Uri("http://S3.com:9092")) { Log = new DefaultTraceLog(LogLevel.Warn) };
17 | public readonly DefaultTraceLog _log = new DefaultTraceLog(LogLevel.Debug);
18 |
19 | ///
20 | /// These tests are for manual run. You need to stop the partition leader and then start it again and let it became the leader.
21 | ///
22 |
23 | [Test]
24 | [Ignore("manual test")]
25 | public void ConsumerFailure()
26 | {
27 | string topic = "TestTopicIssue13-2-3R-1P";
28 | using (var router = new BrokerRouter(_options))
29 | {
30 | var producer = new Producer(router);
31 | var offsets = producer.GetTopicOffsetAsync(topic).Result;
32 | var maxOffsets = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
33 | var consumerOptions = new ConsumerOptions(topic, router) { PartitionWhitelist = new List() { 0 }, MaxWaitTimeForMinimumBytes = TimeSpan.Zero };
34 |
35 | SandMessageForever(producer, topic);
36 | ReadMessageForever(consumerOptions, maxOffsets);
37 | }
38 | }
39 |
40 | [Test]
41 | [Ignore("manual test")]
42 | public async Task ManualConsumerFailure()
43 | {
44 | string topic = "TestTopicIssue13-3R-1P";
45 | var manualConsumer = new ManualConsumer(0, topic, new ProtocolGateway(_options), "test client", 10000);
46 | long offset = await manualConsumer.FetchLastOffset();
47 |
48 | var router = new BrokerRouter(_options);
49 | var producer = new Producer(router);
50 | SandMessageForever(producer, topic);
51 | await ReadMessageForever(manualConsumer, offset);
52 | }
53 |
54 |
55 |
56 | private void ReadMessageForever(ConsumerOptions consumerOptions, OffsetPosition[] maxOffsets)
57 | {
58 | using (var consumer = new Consumer(consumerOptions, maxOffsets))
59 | {
60 | var blockingEnumerableOfMessage = consumer.Consume();
61 | foreach (var message in blockingEnumerableOfMessage)
62 | {
63 | _log.InfoFormat("Offset{0}", message.Meta.Offset);
64 | }
65 | }
66 | }
67 |
68 | private void SandMessageForever(Producer producer, string topic)
69 | {
70 | var sandMessageForever = Task.Run(() =>
71 | {
72 | int id = 0;
73 | while (true)
74 | {
75 | try
76 | {
77 | producer.SendMessageAsync(topic, new[] { new Message((++id).ToString()) }, partition: 0).Wait();
78 | Thread.Sleep(1000);
79 | }
80 | catch (Exception ex)
81 | {
82 | _log.InfoFormat("can't send:\n" + ex);
83 | }
84 | }
85 | });
86 | }
87 |
88 | private async Task ReadMessageForever(ManualConsumer manualConsumer, long offset)
89 | {
90 | while (true)
91 | {
92 | try
93 | {
94 | var messages = await manualConsumer.FetchMessages(1000, offset);
95 |
96 | if (messages.Any())
97 | {
98 | foreach (var message in messages)
99 | {
100 | _log.InfoFormat("Offset{0} ", message.Meta.Offset);
101 | }
102 | offset = messages.Max(x => x.Meta.Offset) + 1;
103 | }
104 | else
105 | {
106 | await Task.Delay(100);
107 | }
108 | }
109 | catch (Exception ex)
110 | {
111 | _log.InfoFormat("can't read:\n" + ex);
112 | }
113 | }
114 | }
115 | }
116 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/ProducerIntegrationTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Model;
4 | using KafkaNet.Protocol;
5 | using NUnit.Framework;
6 | using System;
7 | using System.Collections.Generic;
8 | using System.Linq;
9 | using System.Threading.Tasks;
10 |
11 | namespace kafka_tests.Integration
12 | {
13 | [TestFixture]
14 | [Category("Integration")]
15 | public class ProducerIntegrationTests
16 | {
17 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
18 | public void ProducerShouldNotExpectResponseWhenAckIsZero()
19 | {
20 | using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
21 | using (var producer = new Producer(router))
22 | {
23 | var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }, acks: 0);
24 |
25 | sendTask.Wait(TimeSpan.FromMinutes(2));
26 |
27 | Assert.That(sendTask.Status, Is.EqualTo(TaskStatus.RanToCompletion));
28 | }
29 | }
30 |
31 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
32 | public async void SendAsyncShouldGetOneResultForMessage()
33 | {
34 | using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
35 | using (var producer = new Producer(router))
36 | {
37 | var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) });
38 |
39 | Assert.That(result.Length, Is.EqualTo(1));
40 | }
41 | }
42 |
43 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
44 | public async void SendAsyncShouldGetAResultForEachPartitionSentTo()
45 | {
46 | using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
47 | using (var producer = new Producer(router))
48 | {
49 | var messages = new[] { new Message("1"), new Message("2"), new Message("3") };
50 | var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, messages);
51 |
52 | Assert.That(result.Length, Is.EqualTo(messages.Distinct().Count()));
53 |
54 | Assert.That(result.Length, Is.EqualTo(messages.Count()));
55 | }
56 | }
57 |
58 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
59 | public async void SendAsyncShouldGetOneResultForEachPartitionThroughBatching()
60 | {
61 | using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
62 | using (var producer = new Producer(router))
63 | {
64 | var tasks = new[]
65 | {
66 | producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}),
67 | producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}),
68 | producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}),
69 | };
70 |
71 | await Task.WhenAll(tasks);
72 |
73 | var result = tasks.SelectMany(x => x.Result).Distinct().ToList();
74 | Assert.That(result.Count, Is.EqualTo(tasks.Count()));
75 | }
76 | }
77 | }
78 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Integration/ProtocolGatewayTest.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Common;
4 | using KafkaNet.Model;
5 | using KafkaNet.Protocol;
6 | using NUnit.Framework;
7 | using System;
8 | using System.Collections.Generic;
9 | using System.Linq;
10 | using System.Threading.Tasks;
11 |
12 | namespace kafka_tests.Integration
13 | {
14 | [TestFixture]
15 | [Category("Integration")]
16 | public class ProtocolGatewayTest
17 | {
18 | private readonly KafkaOptions Options = new KafkaOptions(IntegrationConfig.IntegrationUri);
19 |
20 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
21 | public async Task ProtocolGateway()
22 | {
23 | int partitionId = 0;
24 | var router = new BrokerRouter(Options);
25 |
26 | var producer = new Producer(router);
27 | string messge1 = Guid.NewGuid().ToString();
28 | var respose = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(messge1) }, 1, null, MessageCodec.CodecNone, partitionId);
29 | var offset = respose.FirstOrDefault().Offset;
30 |
31 | ProtocolGateway protocolGateway = new ProtocolGateway(IntegrationConfig.IntegrationUri);
32 | var fetch = new Fetch
33 | {
34 | Topic = IntegrationConfig.IntegrationTopic,
35 | PartitionId = partitionId,
36 | Offset = offset,
37 | MaxBytes = 32000,
38 | };
39 |
40 | var fetches = new List { fetch };
41 |
42 | var fetchRequest = new FetchRequest
43 | {
44 | MaxWaitTime = 1000,
45 | MinBytes = 10,
46 | Fetches = fetches
47 | };
48 |
49 | var r = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
50 | // var r1 = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
51 | Assert.IsTrue(r.Messages.FirstOrDefault().Value.ToUtf8String() == messge1);
52 | }
53 | }
54 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Properties/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.InteropServices;
3 |
4 | // General Information about an assembly is controlled through the following
5 | // set of attributes. Change these attribute values to modify the information
6 | // associated with an assembly.
7 | [assembly: AssemblyTitle("kafka-tests")]
8 | [assembly: AssemblyDescription("")]
9 | [assembly: AssemblyConfiguration("")]
10 | [assembly: AssemblyCompany("TP")]
11 | [assembly: AssemblyProduct("kafka-tests")]
12 | [assembly: AssemblyCopyright("Copyright © James Roland 2014")]
13 | [assembly: AssemblyTrademark("")]
14 | [assembly: AssemblyCulture("")]
15 |
16 | // Setting ComVisible to false makes the types in this assembly not visible
17 | // to COM components. If you need to access a type in this assembly from
18 | // COM, set the ComVisible attribute to true on that type.
19 | [assembly: ComVisible(false)]
20 |
21 | // The following GUID is for the ID of the typelib if this project is exposed to COM
22 | [assembly: Guid("8a305115-586a-43d0-944b-9c7230ab6796")]
23 |
24 | // Version information for an assembly consists of the following four values:
25 | //
26 | // Major Version
27 | // Minor Version
28 | // Build Number
29 | // Revision
30 | //
31 | // You can specify all the values or you can default the Build and Revision Numbers
32 | // by using the '*' as shown below:
33 | // [assembly: AssemblyVersion("1.0.*")]
34 | [assembly: AssemblyVersion("1.0.0.0")]
35 | [assembly: AssemblyFileVersion("1.0.0.0")]
--------------------------------------------------------------------------------
/src/kafka-tests/RequestFactory.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System.Collections.Generic;
3 |
4 | namespace kafka_tests
5 | {
6 | public static class RequestFactory
7 | {
8 | public static ProduceRequest CreateProduceRequest(string topic, string message, string key = null)
9 | {
10 | return new ProduceRequest
11 | {
12 | Payload = new List(new[]
13 | {
14 | new Payload
15 | {
16 | Topic = topic,
17 | Messages = new List(new[] {new Message(message)})
18 | }
19 | })
20 | };
21 | }
22 |
23 | public static FetchRequest CreateFetchRequest(string topic, int offset, int partitionId = 0)
24 | {
25 | return new FetchRequest
26 | {
27 | CorrelationId = 1,
28 | Fetches = new List(new[]
29 | {
30 | new Fetch
31 | {
32 | Topic = topic,
33 | PartitionId = partitionId,
34 | Offset = offset
35 | }
36 | })
37 | };
38 | }
39 |
40 | public static OffsetRequest CreateOffsetRequest(string topic, int partitionId = 0, int maxOffsets = 1, int time = -1)
41 | {
42 | return new OffsetRequest
43 | {
44 | CorrelationId = 1,
45 | Offsets = new List(new[]
46 | {
47 | new Offset
48 | {
49 | Topic = topic,
50 | PartitionId = partitionId,
51 | MaxOffsets = maxOffsets,
52 | Time = time
53 | }
54 | })
55 | };
56 | }
57 |
58 | public static OffsetFetchRequest CreateOffsetFetchRequest(string topic, int partitionId = 0)
59 | {
60 | return new OffsetFetchRequest
61 | {
62 | ConsumerGroup = "DefaultGroup",
63 | Topics = new List(new[]
64 | {
65 | new OffsetFetch
66 | {
67 | Topic = topic,
68 | PartitionId = partitionId
69 | }
70 | })
71 | };
72 | }
73 | }
74 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/AsyncLockTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet.Common;
3 | using NUnit.Framework;
4 | using System;
5 | using System.Collections.Generic;
6 | using System.Net.Http;
7 | using System.Threading;
8 | using System.Threading.Tasks;
9 |
10 | namespace kafka_tests.Unit
11 | {
12 | [TestFixture]
13 | [Category("Unit")]
14 | public class AsyncLockTests
15 | {
16 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
17 | [ExpectedException(typeof(OperationCanceledException))]
18 | public async void AsyncLockCancelShouldThrowOperationCanceledException()
19 | {
20 | var count = 0;
21 | var token = new CancellationTokenSource(TimeSpan.FromMilliseconds(10));
22 | var alock = new AsyncLock();
23 |
24 | for (int i = 0; i < 2; i++)
25 | {
26 | //the second call will timeout
27 | using (await alock.LockAsync(token.Token))
28 | {
29 | Interlocked.Increment(ref count);
30 | Thread.Sleep(100);
31 | }
32 | }
33 | Assert.That(count, Is.EqualTo(1), "Only the first call should succeed. The second should timeout.");
34 | }
35 |
36 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
37 | public async void AsyncLockCancelShouldNotAllowInsideLock()
38 | {
39 | var count = 0;
40 | var token = new CancellationTokenSource(TimeSpan.FromMilliseconds(10));
41 | var alock = new AsyncLock();
42 |
43 | try
44 | {
45 | for (int i = 0; i < 2; i++)
46 | {
47 | //the second call will timeout
48 | using (await alock.LockAsync(token.Token))
49 | {
50 | Interlocked.Increment(ref count);
51 | Thread.Sleep(100);
52 | }
53 | }
54 | }
55 | catch
56 | {
57 | }
58 |
59 | Assert.That(count, Is.EqualTo(1));
60 | }
61 |
62 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
63 | public void AsyncLockShouldAllowMultipleStackedWaits()
64 | {
65 | var count = 0;
66 | var alock = new AsyncLock();
67 | var locks = new List>();
68 | for (int i = 0; i < 1000; i++)
69 | {
70 | var task = alock.LockAsync();
71 | task.ContinueWith(t => Interlocked.Increment(ref count));
72 | locks.Add(task);
73 | }
74 |
75 | for (int i = 0; i < 100; i++)
76 | {
77 | using (locks[i].Result)
78 | {
79 | Thread.Sleep(10);
80 | Assert.That(count, Is.EqualTo(i + 1));
81 | }
82 | }
83 | }
84 |
85 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
86 | public async Task AsyncLockShouldAllowOnlyOneThread()
87 | {
88 | var block = new SemaphoreSlim(0, 2);
89 | var count = 0;
90 | var alock = new AsyncLock();
91 |
92 | var firstCall = Task.Run(async () =>
93 | {
94 | using (await alock.LockAsync())
95 | {
96 | Interlocked.Increment(ref count);
97 | block.Wait();
98 | }
99 | block.Wait();//keep this thread busy
100 | });
101 |
102 | await TaskTest.WaitFor(() => count > 0);
103 |
104 | alock.LockAsync().ContinueWith(t => Interlocked.Increment(ref count));
105 |
106 | Assert.That(count, Is.EqualTo(1), "Only one task should have gotten past lock.");
107 | Assert.That(firstCall.IsCompleted, Is.False, "Task should still be running.");
108 |
109 | block.Release();
110 | await TaskTest.WaitFor(() => count > 1);
111 | Assert.That(count, Is.EqualTo(2), "Second call should get past lock.");
112 | Assert.That(firstCall.IsCompleted, Is.False, "First call should still be busy.");
113 | block.Release();
114 | }
115 |
116 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
117 | public async Task AsyncLockShouldUnlockEvenFromDifferentThreads()
118 | {
119 | var block = new SemaphoreSlim(0, 2);
120 | var count = 0;
121 | var alock = new AsyncLock();
122 |
123 | Task.Factory.StartNew(async () =>
124 | {
125 | using (await alock.LockAsync().ConfigureAwait(false))
126 | {
127 | Console.WriteLine("Enter lock id: {0}", Thread.CurrentThread.ManagedThreadId);
128 | Interlocked.Increment(ref count);
129 | await ExternalThread();
130 | await block.WaitAsync();
131 | Console.WriteLine("Exit lock id: {0}", Thread.CurrentThread.ManagedThreadId);
132 | }
133 | });
134 |
135 | await TaskTest.WaitFor(() => count > 0);
136 |
137 | Task.Factory.StartNew(async () =>
138 | {
139 | Console.WriteLine("Second call waiting Id:{0}", Thread.CurrentThread.ManagedThreadId);
140 | using (await alock.LockAsync().ConfigureAwait(false))
141 | {
142 | Console.WriteLine("Past lock Id:{0}", Thread.CurrentThread.ManagedThreadId);
143 | Interlocked.Increment(ref count);
144 | }
145 | });
146 |
147 | Assert.That(count, Is.EqualTo(1), "Only one task should have gotten past lock.");
148 |
149 | block.Release();
150 | await TaskTest.WaitFor(() => count > 1);
151 | Assert.That(count, Is.EqualTo(2), "Second call should get past lock.");
152 | }
153 |
154 | private async Task ExternalThread()
155 | {
156 | var client = new HttpClient();
157 | await client.GetAsync("http://www.google.com");
158 | Thread.Sleep(1000);
159 | }
160 | }
161 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/BinaryFormatterSerializationTests.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using KafkaNet.Protocol;
3 | using NUnit.Framework;
4 | using System;
5 | using System.IO;
6 | using System.Net;
7 | using System.Runtime.Serialization.Formatters.Binary;
8 |
9 | namespace kafka_tests.Unit
10 | {
11 | [TestFixture]
12 | public class BinaryFormatterSerializationTests
13 | {
14 | [Test]
15 | public void ShouldSerializeInvalidTopicMetadataException()
16 | {
17 | var expected = new InvalidTopicMetadataException(ErrorResponseCode.RequestTimedOut, "blblb");
18 | var actual = SerializeDeserialize(expected);
19 |
20 | Assert.AreEqual(expected.ErrorResponseCode, actual.ErrorResponseCode);
21 | }
22 |
23 | [Test]
24 | public void ShouldSerializeBufferUnderRunException()
25 | {
26 | var expected = new BufferUnderRunException(44,44);
27 | var actual = SerializeDeserialize(expected);
28 |
29 | Assert.AreEqual(expected.MessageHeaderSize, actual.MessageHeaderSize);
30 | Assert.AreEqual(expected.RequiredBufferSize, actual.RequiredBufferSize);
31 | }
32 |
33 | [Test]
34 | public void ShouldSerializeOffsetOutOfRangeException()
35 | {
36 | var expected = new OffsetOutOfRangeException("a"){FetchRequest = new Fetch(){MaxBytes = 1,Topic = "aa",Offset = 2,PartitionId = 3}};
37 | var actual = SerializeDeserialize(expected);
38 |
39 | Assert.AreEqual(expected.FetchRequest.MaxBytes, actual.FetchRequest.MaxBytes);
40 | Assert.AreEqual(expected.FetchRequest.Offset, actual.FetchRequest.Offset);
41 | Assert.AreEqual(expected.FetchRequest.PartitionId, actual.FetchRequest.PartitionId);
42 | Assert.AreEqual(expected.FetchRequest.Topic, actual.FetchRequest.Topic);
43 | }
44 |
45 | [Test]
46 | public void ShouldSerializeOffsetOutOfRangeExceptionNull()
47 | {
48 | var expected = new OffsetOutOfRangeException("a") {FetchRequest = null};
49 | var actual = SerializeDeserialize(expected);
50 |
51 | Assert.AreEqual(expected.FetchRequest, actual.FetchRequest);
52 | }
53 |
54 | [Test]
55 | public void ShouldSerializeOffsetKafkaEndpointInnerObjectAreNull()
56 | {
57 | var expected = new BrokerException("a",new KafkaEndpoint());
58 | var actual = SerializeDeserialize(expected);
59 |
60 | Assert.AreEqual(expected.BrokerEndPoint.ServeUri, actual.BrokerEndPoint.ServeUri);
61 | Assert.AreEqual(expected.BrokerEndPoint.Endpoint, actual.BrokerEndPoint.Endpoint);
62 | }
63 |
64 | [Test]
65 | public void ShouldSerializeOffsetKafkaEndpoint()
66 | {
67 | var expected = new BrokerException("a", new KafkaEndpoint() {Endpoint = new IPEndPoint(IPAddress.Parse("127.0.0.1"), 8888),ServeUri = new Uri("http://S1.com")});
68 | var actual = SerializeDeserialize(expected);
69 |
70 | Assert.AreEqual(expected.BrokerEndPoint.ServeUri, actual.BrokerEndPoint.ServeUri);
71 | Assert.AreEqual(expected.BrokerEndPoint.Endpoint, actual.BrokerEndPoint.Endpoint);
72 | }
73 |
74 | [Test]
75 | public void ShouldSerializeOffsetKafkaEndpointNull()
76 | {
77 | var expected = new BrokerException("a", null);
78 | var actual = SerializeDeserialize(expected);
79 |
80 | Assert.AreEqual(expected.BrokerEndPoint, actual.BrokerEndPoint);
81 | }
82 |
83 | [Test]
84 | public void ShouldSerializeKafkaApplicationException()
85 | {
86 | var expected = new KafkaApplicationException("3"){ErrorCode = 1};
87 | var actual = SerializeDeserialize(expected);
88 |
89 | Assert.AreEqual(expected.ErrorCode, actual.ErrorCode);
90 | }
91 |
92 | private static T SerializeDeserialize (T expected)
93 | {
94 | var formatter = new BinaryFormatter();
95 | MemoryStream memoryStream = new MemoryStream();
96 | formatter.Serialize(memoryStream, expected);
97 | memoryStream.Seek(0, 0);
98 |
99 | var actual = (T)formatter.Deserialize(memoryStream);
100 | return actual;
101 | }
102 | }
103 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/CircularBufferTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet.Common;
3 | using NUnit.Framework;
4 | using System.Linq;
5 | using System.Threading.Tasks;
6 |
7 | namespace kafka_tests.Unit
8 | {
9 | [TestFixture]
10 | [Category("Unit")]
11 | public class CircularBufferTests
12 | {
13 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
14 | public void BufferShouldOnlyStoreMaxAmount()
15 | {
16 | var buffer = new ConcurrentCircularBuffer(2);
17 |
18 | for (int i = 0; i < 10; i++)
19 | {
20 | buffer.Enqueue(i);
21 | }
22 |
23 | Assert.That(buffer.Count, Is.EqualTo(2));
24 | }
25 |
26 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
27 | public void BufferShouldCountUntilMaxHitThenAlswaysShowMax()
28 | {
29 | var buffer = new ConcurrentCircularBuffer(2);
30 |
31 | Assert.That(buffer.Count, Is.EqualTo(0));
32 | buffer.Enqueue(1);
33 | Assert.That(buffer.Count, Is.EqualTo(1));
34 | buffer.Enqueue(1);
35 | Assert.That(buffer.Count, Is.EqualTo(2));
36 | buffer.Enqueue(1);
37 | Assert.That(buffer.Count, Is.EqualTo(2));
38 | }
39 |
40 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
41 | public void BufferMaxSizeShouldReportMax()
42 | {
43 | var buffer = new ConcurrentCircularBuffer(2);
44 |
45 | Assert.That(buffer.MaxSize, Is.EqualTo(2));
46 | buffer.Enqueue(1);
47 | Assert.That(buffer.MaxSize, Is.EqualTo(2));
48 | }
49 |
50 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
51 | public void EnumerationShouldReturnOnlyRecordsWithData()
52 | {
53 | var buffer = new ConcurrentCircularBuffer(2);
54 | Assert.That(buffer.ToList().Count, Is.EqualTo(0));
55 |
56 | buffer.Enqueue(1);
57 | Assert.That(buffer.ToList().Count, Is.EqualTo(1));
58 |
59 | buffer.Enqueue(1);
60 | buffer.Enqueue(1);
61 | Assert.That(buffer.ToList().Count, Is.EqualTo(2));
62 | }
63 |
64 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
65 | public void EnqueueShouldAddToFirstSlot()
66 | {
67 | var buffer = new ConcurrentCircularBuffer(2);
68 | buffer.Enqueue(1);
69 | Assert.That(buffer.First(), Is.EqualTo(1));
70 | }
71 |
72 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
73 | public void EnqueueCanBeUseFromDefferantThread()
74 | {
75 | var buffer = new ConcurrentCircularBuffer(2);
76 |
77 | Parallel.For(0, 1000, (i) =>
78 | {
79 | buffer.Enqueue(i);
80 | Assert.That(buffer.Count, Is.LessThanOrEqualTo(2));
81 | });
82 | Assert.That(buffer.Count, Is.EqualTo(2));
83 | }
84 | }
85 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/FakeTcpServerTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Fakes;
2 | using kafka_tests.Helpers;
3 | using KafkaNet;
4 | using KafkaNet.Common;
5 | using NUnit.Framework;
6 | using System;
7 | using System.Net.Sockets;
8 | using System.Threading;
9 | using System.Threading.Tasks;
10 |
11 | namespace kafka_tests.Unit
12 | {
13 | [TestFixture]
14 | [Category("unit")]
15 | public class FakeTcpServerTests
16 | {
17 | private readonly Uri _fakeServerUrl;
18 | private IKafkaLog Ilog = new DefaultTraceLog(LogLevel.Warn);
19 |
20 | public FakeTcpServerTests()
21 | {
22 | _fakeServerUrl = new Uri("http://localhost:8999");
23 | }
24 |
25 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
26 | public async Task FakeShouldBeAbleToReconnect()
27 | {
28 | using (var server = new FakeTcpServer(Ilog, 8999))
29 | {
30 | byte[] received = null;
31 | server.OnBytesReceived += data => received = data;
32 |
33 | var t1 = new TcpClient();
34 | t1.Connect(_fakeServerUrl.Host, _fakeServerUrl.Port);
35 | await TaskTest.WaitFor(() => server.ConnectionEventcount == 1);
36 |
37 | server.DropConnection();
38 | await TaskTest.WaitFor(() => server.DisconnectionEventCount == 1);
39 |
40 | var t2 = new TcpClient();
41 | t2.Connect(_fakeServerUrl.Host, _fakeServerUrl.Port);
42 | await TaskTest.WaitFor(() => server.ConnectionEventcount == 2);
43 |
44 | t2.GetStream().Write(99.ToBytes(), 0, 4);
45 | await TaskTest.WaitFor(() => received != null);
46 |
47 | Assert.That(received.ToInt32(), Is.EqualTo(99));
48 | }
49 | }
50 |
51 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
52 | public void ShouldDisposeEvenWhenTryingToSendWithoutExceptionThrown()
53 | {
54 | using (var server = new FakeTcpServer(Ilog, 8999))
55 | {
56 | server.SendDataAsync("test");
57 | Thread.Sleep(500);
58 | }
59 | }
60 |
61 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
62 | public void ShouldDisposeWithoutExecptionThrown()
63 | {
64 | using (var server = new FakeTcpServer(Ilog, 8999))
65 | {
66 | Thread.Sleep(500);
67 | }
68 | }
69 |
70 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
71 | public void SendAsyncShouldWaitUntilClientIsConnected()
72 | {
73 | const int testData = 99;
74 | using (var server = new FakeTcpServer(Ilog, 8999))
75 | using (var client = new TcpClient())
76 | {
77 | server.SendDataAsync(testData.ToBytes());
78 | Thread.Sleep(1000);
79 | client.Connect(_fakeServerUrl.Host, _fakeServerUrl.Port);
80 |
81 | var buffer = new byte[4];
82 | client.GetStream().ReadAsync(buffer, 0, 4).Wait(TimeSpan.FromSeconds(5));
83 |
84 | Assert.That(buffer.ToInt32(), Is.EqualTo(testData));
85 | }
86 | }
87 | }
88 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/KafkaEndpointTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using NUnit.Framework;
4 | using System;
5 | using System.Net;
6 |
7 | namespace kafka_tests.Unit
8 | {
9 | [TestFixture]
10 | public class KafkaEndpointTests
11 | {
12 | private IKafkaLog _log = new DefaultTraceLog(LogLevel.Warn);
13 |
14 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
15 | public void EnsureEndpointCanBeResulved()
16 | {
17 | var expected = IPAddress.Parse("127.0.0.1");
18 | var endpoint = new DefaultKafkaConnectionFactory().Resolve(new Uri("http://localhost:8888"), _log);
19 | Assert.That(endpoint.Endpoint.Address, Is.EqualTo(expected));
20 | Assert.That(endpoint.Endpoint.Port, Is.EqualTo(8888));
21 | }
22 |
23 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
24 | public void EnsureTwoEndpointNotOfTheSameReferenceButSameIPAreEqual()
25 | {
26 | var endpoint1 = new DefaultKafkaConnectionFactory().Resolve(new Uri("http://localhost:8888"), _log);
27 | var endpoint2 = new DefaultKafkaConnectionFactory().Resolve(new Uri("http://localhost:8888"), _log);
28 |
29 | Assert.That(ReferenceEquals(endpoint1, endpoint2), Is.False, "Should not be the same reference.");
30 | Assert.That(endpoint1, Is.EqualTo(endpoint2));
31 | }
32 |
33 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
34 | public void EnsureTwoEndointWithSameIPButDifferentPortsAreNotEqual()
35 | {
36 | var endpoint1 = new DefaultKafkaConnectionFactory().Resolve(new Uri("http://localhost:8888"), _log);
37 | var endpoint2 = new DefaultKafkaConnectionFactory().Resolve(new Uri("http://localhost:1"), _log);
38 |
39 | Assert.That(endpoint1, Is.Not.EqualTo(endpoint2));
40 | }
41 | }
42 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/MetadataQueriesTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet;
3 | using KafkaNet.Protocol;
4 | using Moq;
5 | using Ninject.MockingKernel.Moq;
6 | using NUnit.Framework;
7 | using System;
8 | using System.Threading.Tasks;
9 |
10 | namespace kafka_tests.Unit
11 | {
12 | [TestFixture]
13 | [Category("Unit")]
14 | public class MetadataQueriesTest
15 | {
16 | private MoqMockingKernel _kernel;
17 |
18 | [SetUp]
19 | public void Setup()
20 | {
21 | _kernel = new MoqMockingKernel();
22 | }
23 |
24 | #region GetTopicOffset Tests...
25 |
26 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
27 | public void GetTopicOffsetShouldQueryEachBroker()
28 | {
29 | var routerProxy = new BrokerRouterProxy(_kernel);
30 | var router = routerProxy.Create();
31 | var common = new MetadataQueries(router);
32 |
33 | var result = common.GetTopicOffsetAsync(BrokerRouterProxy.TestTopic).Result;
34 | Assert.That(routerProxy.BrokerConn0.OffsetRequestCallCount, Is.EqualTo(1));
35 | Assert.That(routerProxy.BrokerConn1.OffsetRequestCallCount, Is.EqualTo(1));
36 | }
37 |
38 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
39 | public void GetTopicOffsetShouldThrowAnyException()
40 | {
41 | var routerProxy = new BrokerRouterProxy(_kernel);
42 | routerProxy.BrokerConn0.OffsetResponseFunction = () => { throw new ApplicationException("test 99"); };
43 | var router = routerProxy.Create();
44 | var common = new MetadataQueries(router);
45 |
46 | common.GetTopicOffsetAsync(BrokerRouterProxy.TestTopic).ContinueWith(t =>
47 | {
48 | Assert.That(t.IsFaulted, Is.True);
49 | Assert.That(t.Exception.Flatten().ToString(), Is.StringContaining("test 99"));
50 | }).Wait();
51 | }
52 |
53 | #endregion GetTopicOffset Tests...
54 |
55 | #region GetTopic Tests...
56 |
57 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
58 | public async Task GetTopicShouldReturnTopic()
59 | {
60 | var routerProxy = new BrokerRouterProxy(_kernel);
61 | var router = routerProxy.Create();
62 | await router.RefreshMissingTopicMetadata(BrokerRouterProxy.TestTopic);
63 | var common = new MetadataQueries(router);
64 |
65 | var result = common.GetTopicFromCache(BrokerRouterProxy.TestTopic);
66 | Assert.That(result.Name, Is.EqualTo(BrokerRouterProxy.TestTopic));
67 | }
68 |
69 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
70 | [ExpectedException(typeof(InvalidTopicNotExistsInCache))]
71 | public void EmptyTopicMetadataShouldThrowException()
72 | {
73 | var routerProxy = new BrokerRouterProxy(_kernel);
74 | var router = routerProxy.Create();
75 | var common = new MetadataQueries(router);
76 |
77 | common.GetTopicFromCache("MissingTopic");
78 | }
79 |
80 | #endregion GetTopic Tests...
81 |
82 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
83 | public void EnsureCommonQueriesDisposesRouter()
84 | {
85 | var router = _kernel.GetMock();
86 | var common = new MetadataQueries(router.Object);
87 | using (common) { }
88 | router.Verify(x => x.Dispose(), Times.Once());
89 | }
90 | }
91 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/ProtocolBaseRequestTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet.Protocol;
3 | using NUnit.Framework;
4 |
5 | namespace kafka_tests.Unit
6 | {
7 | [TestFixture]
8 | [Category("Unit")]
9 | public class ProtocolBaseRequestTests
10 | {
11 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
12 | public void EnsureHeaderShouldPackCorrectByteLengths()
13 | {
14 | var result = BaseRequest.EncodeHeader(new FetchRequest { ClientId = "test", CorrelationId = 123456789 }).PayloadNoLength();
15 |
16 | Assert.That(result.Length, Is.EqualTo(14));
17 | Assert.That(result, Is.EqualTo(new byte[] { 0, 1, 0, 0, 7, 91, 205, 21, 0, 4, 116, 101, 115, 116 }));
18 | }
19 | }
20 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/ProtocolMessageTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet.Common;
3 | using KafkaNet.Protocol;
4 | using NUnit.Framework;
5 | using System;
6 | using System.IO;
7 | using System.Linq;
8 | using System.Text;
9 |
10 | namespace kafka_tests.Unit
11 | {
12 | [TestFixture]
13 | [Category("Unit")]
14 | public class ProtocolMessageTests
15 | {
16 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
17 | [ExpectedException(typeof(FailCrcCheckException))]
18 | public void DecodeMessageShouldThrowWhenCrcFails()
19 | {
20 | var testMessage = new Message(value: "kafka test message.", key: "test");
21 |
22 | var encoded = Message.EncodeMessage(testMessage);
23 | encoded[0] += 1;
24 | var result = Message.DecodeMessage(0, encoded).First();
25 | }
26 |
27 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
28 | [TestCase("test key", "test message")]
29 | [TestCase(null, "test message")]
30 | [TestCase("test key", null)]
31 | [TestCase(null, null)]
32 | public void EnsureMessageEncodeAndDecodeAreCompatible(string key, string value)
33 | {
34 | var testMessage = new Message(key: key, value: value);
35 |
36 | var encoded = Message.EncodeMessage(testMessage);
37 | var result = Message.DecodeMessage(0, encoded).First();
38 |
39 | Assert.That(testMessage.Key, Is.EqualTo(result.Key));
40 | Assert.That(testMessage.Value, Is.EqualTo(result.Value));
41 | }
42 |
43 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
44 | public void EncodeMessageSetEncodesMultipleMessages()
45 | {
46 | //expected generated from python library
47 | var expected = new byte[]
48 | {
49 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 45, 70, 24, 62, 0, 0, 0, 0, 0, 1, 49, 0, 0, 0, 1, 48, 0, 0, 0,
50 | 0, 0, 0, 0, 0, 0, 0, 0, 16, 90, 65, 40, 168, 0, 0, 0, 0, 0, 1, 49, 0, 0, 0, 1, 49, 0, 0, 0, 0, 0, 0,
51 | 0, 0, 0, 0, 0, 16, 195, 72, 121, 18, 0, 0, 0, 0, 0, 1, 49, 0, 0, 0, 1, 50
52 | };
53 |
54 | var messages = new[]
55 | {
56 | new Message("0", "1"),
57 | new Message("1", "1"),
58 | new Message("2", "1")
59 | };
60 |
61 | var result = Message.EncodeMessageSet(messages);
62 |
63 | Assert.That(expected, Is.EqualTo(result));
64 | }
65 |
66 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
67 | public void DecodeMessageSetShouldHandleResponseWithMaxBufferSizeHit()
68 | {
69 | //This message set has a truncated message bytes at the end of it
70 | var result = Message.DecodeMessageSet(MessageHelper.FetchResponseMaxBytesOverflow).ToList();
71 |
72 | var message = Encoding.UTF8.GetString(result.First().Value);
73 |
74 | Assert.That(message, Is.EqualTo("test"));
75 | Assert.That(result.Count, Is.EqualTo(529));
76 | }
77 |
78 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
79 | public void WhenMessageIsTruncatedThenBufferUnderRunExceptionIsThrown()
80 | {
81 | // arrange
82 | var offset = (Int64)0;
83 | var message = new Byte[] { };
84 | var messageSize = message.Length + 1;
85 | var memoryStream = new MemoryStream();
86 | var binaryWriter = new BigEndianBinaryWriter(memoryStream);
87 | binaryWriter.Write(offset);
88 | binaryWriter.Write(messageSize);
89 | binaryWriter.Write(message);
90 | var payloadBytes = memoryStream.ToArray();
91 |
92 | // act/assert
93 | Assert.Throws(() => Message.DecodeMessageSet(payloadBytes).ToList());
94 | }
95 |
96 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
97 | public void WhenMessageIsExactlyTheSizeOfBufferThenMessageIsDecoded()
98 | {
99 | // arrange
100 | var expectedPayloadBytes = new Byte[] { 1, 2, 3, 4 };
101 | var payload = MessageHelper.CreateMessage(0, new Byte[] { 0 }, expectedPayloadBytes);
102 |
103 | // act/assert
104 | var messages = Message.DecodeMessageSet(payload).ToList();
105 | var actualPayload = messages.First().Value;
106 |
107 | // assert
108 | var expectedPayload = new Byte[] { 1, 2, 3, 4 };
109 | CollectionAssert.AreEqual(expectedPayload, actualPayload);
110 | }
111 | }
112 | }
--------------------------------------------------------------------------------
/src/kafka-tests/Unit/ProtocolTests.cs:
--------------------------------------------------------------------------------
1 | using kafka_tests.Helpers;
2 | using KafkaNet.Protocol;
3 | using NUnit.Framework;
4 | using System.Linq;
5 |
6 | namespace kafka_tests.Unit
7 | {
8 | [TestFixture]
9 | [Category("Unit")]
10 | public class ProtocolTests
11 | {
12 | [Test, Repeat(IntegrationConfig.NumberOfRepeat)]
13 | public void MetadataResponseShouldDecode()
14 | {
15 | var request = new MetadataRequest();
16 | var response = request.Decode(MessageHelper.CreateMetadataResponse(1, "Test").Skip(4).ToArray()).First();
17 |
18 | Assert.That(response.CorrelationId, Is.EqualTo(1));
19 | Assert.That(response.Topics[0].Name, Is.EqualTo("Test"));
20 | }
21 | }
22 | }
--------------------------------------------------------------------------------
/src/kafka-tests/packages.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------