├── .gitignore
├── CODE_OF_CONDUCT.md
├── LICENSE
├── README.md
├── kafka-net-core.sln
└── kafka-net-core
├── BrokerRouter.cs
├── Common
├── AsyncCollection.cs
├── AsyncLock.cs
├── AsyncManualResetEvent.cs
├── BigEndianBinaryReader.cs
├── BigEndianBinaryWriter.cs
├── ConcurrentCircularBuffer.cs
├── Crc32Provider.cs
├── Extensions.cs
├── KafkaMessagePacker.cs
└── ScheduledTimer.cs
├── Consumer.cs
├── Default
├── ConsoleLog.cs
├── DefaultKafkaConnectionFactory.cs
├── DefaultPartitionSelector.cs
└── DefaultTraceLog.cs
├── Interfaces
├── IBrokerRouter.cs
├── IKafkaConnection.cs
├── IKafkaConnectionFactory.cs
├── IKafkaLog.cs
├── IKafkaRequest.cs
├── IKafkaTcpSocket.cs
├── IMetadataQueries.cs
└── IPartitionSelector.cs
├── KafkaConnection.cs
├── KafkaMetadataProvider.cs
├── KafkaTcpSocket.cs
├── MetadataQueries.cs
├── Model
├── BrokerRoute.cs
├── ConsumerOptions.cs
├── KafkaEndpoint.cs
└── KafkaOptions.cs
├── Producer.cs
├── Protocol
├── BaseRequest.cs
├── Broker.cs
├── ConsumerMetadataRequest.cs
├── FetchRequest.cs
├── Message.cs
├── MetadataRequest.cs
├── OffsetCommitRequest.cs
├── OffsetFetchRequest.cs
├── OffsetRequest.cs
├── ProduceRequest.cs
├── Protocol.cs
└── Topic.cs
├── Statistics
└── StatisticsTracker.cs
└── kafka-net-core.csproj
/.gitignore:
--------------------------------------------------------------------------------
1 | .vs/
2 | kafka-net-core/bin/
3 | kafka-net-core/obj/
4 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
6 |
7 | ## Our Standards
8 |
9 | Examples of behavior that contributes to creating a positive environment include:
10 |
11 | * Using welcoming and inclusive language
12 | * Being respectful of differing viewpoints and experiences
13 | * Gracefully accepting constructive criticism
14 | * Focusing on what is best for the community
15 | * Showing empathy towards other community members
16 |
17 | Examples of unacceptable behavior by participants include:
18 |
19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances
20 | * Trolling, insulting/derogatory comments, and personal or political attacks
21 | * Public or private harassment
22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission
23 | * Other conduct which could reasonably be considered inappropriate in a professional setting
24 |
25 | ## Our Responsibilities
26 |
27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
28 |
29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
30 |
31 | ## Scope
32 |
33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
34 |
35 | ## Enforcement
36 |
37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at avsenev@hotmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
38 |
39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
40 |
41 | ## Attribution
42 |
43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
44 |
45 | [homepage]: http://contributor-covenant.org
46 | [version]: http://contributor-covenant.org/version/1/4/
47 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Viacheslav Avsenev
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://badge.fury.io/nu/kafka-net-core)
2 |
3 | # kafka-net-core
4 |
5 | This is the net core versions of the library [kafka-net](https://github.com/Jroland/kafka-net)
6 |
7 | # Examples
8 |
9 | Use .NET CLI
10 | ```sh
11 | dotnet add package kafka-net-core --version 1.0.2
12 | ```
13 | ##### Producer
14 | ```csharp
15 | var options = new KafkaOptions(new Uri("http://localhost:9092"));
16 | var router = new BrokerRouter(options);
17 |
18 | using (Producer client = new Producer(router))
19 | {
20 | client.SendMessageAsync("test_topic", new[] { new Message("hello world") }).Wait();
21 | }
22 | ```
23 | ##### Consumer
24 | ```csharp
25 | var options = new KafkaOptions(new Uri("http://localhost:9092"));
26 | var router = new BrokerRouter(options);
27 | using (var consumer = new Consumer(new ConsumerOptions("test_topic", router)))
28 | {
29 | // Consume returns a blocking IEnumerable (ie: never ending stream)
30 | foreach (var message in consumer.Consume())
31 | {
32 | Console.WriteLine("Response: P{0},O{1} : {2}",
33 | message.Meta.PartitionId, message.Meta.Offset, message.Value);
34 | }
35 | }
36 | ```
37 |
--------------------------------------------------------------------------------
/kafka-net-core.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 15
4 | VisualStudioVersion = 15.0.28010.0
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "kafka-net-core", "kafka-net-core\kafka-net-core.csproj", "{90891EE4-5D57-4EB8-B1DD-E7F35C339C0F}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Any CPU = Debug|Any CPU
11 | Release|Any CPU = Release|Any CPU
12 | EndGlobalSection
13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
14 | {90891EE4-5D57-4EB8-B1DD-E7F35C339C0F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
15 | {90891EE4-5D57-4EB8-B1DD-E7F35C339C0F}.Debug|Any CPU.Build.0 = Debug|Any CPU
16 | {90891EE4-5D57-4EB8-B1DD-E7F35C339C0F}.Release|Any CPU.ActiveCfg = Release|Any CPU
17 | {90891EE4-5D57-4EB8-B1DD-E7F35C339C0F}.Release|Any CPU.Build.0 = Release|Any CPU
18 | EndGlobalSection
19 | GlobalSection(SolutionProperties) = preSolution
20 | HideSolutionNode = FALSE
21 | EndGlobalSection
22 | GlobalSection(ExtensibilityGlobals) = postSolution
23 | SolutionGuid = {A449276C-38E7-49BB-9DEE-A777F2BF16E5}
24 | EndGlobalSection
25 | EndGlobal
26 |
--------------------------------------------------------------------------------
/kafka-net-core/BrokerRouter.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Concurrent;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using KafkaNet.Model;
5 | using KafkaNet.Protocol;
6 |
7 | namespace KafkaNet
8 | {
9 | ///
10 | /// This class provides an abstraction from querying multiple Kafka servers for Metadata details and caching this data.
11 | ///
12 | /// All metadata queries are cached lazily. If metadata from a topic does not exist in cache it will be queried for using
13 | /// the default brokers provided in the constructor. Each Uri will be queried to get metadata information in turn until a
14 | /// response is received. It is recommended therefore to provide more than one Kafka Uri as this API will be able to to get
15 | /// metadata information even if one of the Kafka servers goes down.
16 | ///
17 | ///
18 | /// The metadata will stay in cache until an error condition is received indicating the metadata is out of data. This error
19 | /// can be in the form of a socket disconnect or an error code from a response indicating a broker no longer hosts a partition.
20 | ///
21 | ///
22 | public class BrokerRouter : IBrokerRouter
23 | {
24 | private readonly object _threadLock = new object();
25 | private readonly KafkaOptions _kafkaOptions;
26 | private readonly KafkaMetadataProvider _kafkaMetadataProvider;
27 | private readonly ConcurrentDictionary _defaultConnectionIndex = new ConcurrentDictionary();
28 | private readonly ConcurrentDictionary _brokerConnectionIndex = new ConcurrentDictionary();
29 | private readonly ConcurrentDictionary _topicIndex = new ConcurrentDictionary();
30 |
31 | public BrokerRouter(KafkaOptions kafkaOptions)
32 | {
33 | _kafkaOptions = kafkaOptions;
34 | _kafkaMetadataProvider = new KafkaMetadataProvider(_kafkaOptions.Log);
35 |
36 | foreach (var endpoint in _kafkaOptions.KafkaServerEndpoints)
37 | {
38 | var conn = _kafkaOptions.KafkaConnectionFactory.Create(endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log, _kafkaOptions.MaximumReconnectionTimeout);
39 | _defaultConnectionIndex.AddOrUpdate(endpoint, _ => conn, (_, __) => conn);
40 | }
41 |
42 | if (_defaultConnectionIndex.Count <= 0)
43 | throw new ServerUnreachableException("None of the provided Kafka servers are resolvable.");
44 | }
45 |
46 | ///
47 | /// Select a broker for a specific topic and partitionId.
48 | ///
49 | /// The topic name to select a broker for.
50 | /// The exact partition to select a broker for.
51 | /// A broker route for the given partition of the given topic.
52 | ///
53 | /// This function does not use any selector criteria. If the given partitionId does not exist an exception will be thrown.
54 | ///
55 | /// Thrown if the returned metadata for the given topic is invalid or missing.
56 | /// Thrown if the give partitionId does not exist for the given topic.
57 | /// Thrown if none of the Default Brokers can be contacted.
58 | public BrokerRoute SelectBrokerRoute(string topic, int partitionId)
59 | {
60 | var cachedTopic = GetTopicMetadata(topic);
61 |
62 | if (cachedTopic.Count <= 0)
63 | throw new InvalidTopicMetadataException(ErrorResponseCode.NoError, "The Metadata is invalid as it returned no data for the given topic:{0}", topic);
64 |
65 | var topicMetadata = cachedTopic[0];
66 |
67 | var partition = topicMetadata.Partitions.Find(x => x.PartitionId == partitionId);
68 | if (partition == null) throw new InvalidPartitionException(string.Format("The topic:{0} does not have a partitionId:{1} defined.", topic, partitionId));
69 |
70 | return GetCachedRoute(topicMetadata.Name, partition);
71 | }
72 |
73 | ///
74 | /// Select a broker for a given topic using the IPartitionSelector function.
75 | ///
76 | /// The topic to retreive a broker route for.
77 | /// The key used by the IPartitionSelector to collate to a consistent partition. Null value means key will be ignored in selection process.
78 | /// A broker route for the given topic.
79 | /// Thrown if the returned metadata for the given topic is invalid or missing.
80 | /// Thrown if none of the Default Brokers can be contacted.
81 | public BrokerRoute SelectBrokerRoute(string topic, byte[] key = null)
82 | {
83 | //get topic either from cache or server.
84 | var cachedTopic = GetTopicMetadata(topic).FirstOrDefault();
85 |
86 | if (cachedTopic == null)
87 | throw new InvalidTopicMetadataException(ErrorResponseCode.NoError, "The Metadata is invalid as it returned no data for the given topic:{0}", topic);
88 |
89 | var partition = _kafkaOptions.PartitionSelector.Select(cachedTopic, key);
90 |
91 | return GetCachedRoute(cachedTopic.Name, partition);
92 | }
93 |
94 | ///
95 | /// Returns Topic metadata for each topic requested.
96 | ///
97 | /// Collection of topics to request metadata for.
98 | /// List of Topics as provided by Kafka.
99 | ///
100 | /// The topic metadata will by default check the cache first and then if it does not exist it will then
101 | /// request metadata from the server. To force querying the metadata from the server use
102 | ///
103 | public List GetTopicMetadata(params string[] topics)
104 | {
105 | var topicSearchResult = SearchCacheForTopics(topics);
106 |
107 | //update metadata for all missing topics
108 | if (topicSearchResult.Missing.Count > 0)
109 | {
110 | //double check for missing topics and query
111 | RefreshTopicMetadata(topicSearchResult.Missing.Where(x => !_topicIndex.ContainsKey(x)).ToArray());
112 |
113 | var refreshedTopics = topicSearchResult.Missing.Select(GetCachedTopic).Where(x => x != null);
114 | topicSearchResult.Topics.AddRange(refreshedTopics);
115 | }
116 |
117 | return topicSearchResult.Topics;
118 | }
119 |
120 | ///
121 | /// Force a call to the kafka servers to refresh metadata for the given topics.
122 | ///
123 | /// List of topics to update metadata for.
124 | ///
125 | /// This method will ignore the cache and initiate a call to the kafka servers for all given topics, updating the cache with the resulting metadata.
126 | /// Only call this method to force a metadata update. For all other queries use which uses cached values.
127 | ///
128 | public void RefreshTopicMetadata(params string[] topics)
129 | {
130 | //TODO need to remove lock here, try and move to lock free design
131 | lock (_threadLock)
132 | {
133 | _kafkaOptions.Log.DebugFormat("BrokerRouter: Refreshing metadata for topics: {0}", string.Join(",", topics));
134 |
135 | //get the connections to query against and get metadata
136 | var connections = _defaultConnectionIndex.Values.Union(_brokerConnectionIndex.Values).ToArray();
137 | var metadataResponse = _kafkaMetadataProvider.Get(connections, topics);
138 |
139 | UpdateInternalMetadataCache(metadataResponse);
140 | }
141 | }
142 |
143 | private TopicSearchResult SearchCacheForTopics(IEnumerable topics)
144 | {
145 | var result = new TopicSearchResult();
146 |
147 | foreach (var topic in topics)
148 | {
149 | var cachedTopic = GetCachedTopic(topic);
150 |
151 | if (cachedTopic == null)
152 | result.Missing.Add(topic);
153 | else
154 | result.Topics.Add(cachedTopic);
155 | }
156 |
157 | return result;
158 | }
159 |
160 | private Topic GetCachedTopic(string topic)
161 | {
162 | return _topicIndex.TryGetValue(topic, out Topic cachedTopic) ? cachedTopic : null;
163 | }
164 |
165 | private BrokerRoute GetCachedRoute(string topic, Partition partition)
166 | {
167 | var route = TryGetRouteFromCache(topic, partition);
168 |
169 | //leader could not be found, refresh the broker information and try one more time
170 | if (route == null)
171 | {
172 | RefreshTopicMetadata(topic);
173 | route = TryGetRouteFromCache(topic, partition);
174 | }
175 |
176 | if (route != null) return route;
177 |
178 | throw new LeaderNotFoundException(string.Format("Lead broker cannot be found for parition: {0}, leader: {1}", partition.PartitionId, partition.LeaderId));
179 | }
180 |
181 | private BrokerRoute TryGetRouteFromCache(string topic, Partition partition)
182 | {
183 | if (_brokerConnectionIndex.TryGetValue(partition.LeaderId, out IKafkaConnection conn))
184 | {
185 | return new BrokerRoute
186 | {
187 | Topic = topic,
188 | PartitionId = partition.PartitionId,
189 | Connection = conn
190 | };
191 | }
192 |
193 | return null;
194 | }
195 |
196 | private void UpdateInternalMetadataCache(MetadataResponse metadata)
197 | {
198 | //resolve each broker
199 | var brokerEndpoints = metadata.Brokers.Select(broker => new
200 | {
201 | Broker = broker,
202 | Endpoint = _kafkaOptions.KafkaConnectionFactory.Resolve(broker.Address, _kafkaOptions.Log)
203 | });
204 |
205 | foreach (var broker in brokerEndpoints)
206 | {
207 | //if the connection is in our default connection index already, remove it and assign it to the broker index.
208 | IKafkaConnection connection;
209 | if (_defaultConnectionIndex.TryRemove(broker.Endpoint, out connection))
210 | {
211 | UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection);
212 | }
213 | else
214 | {
215 | connection = _kafkaOptions.KafkaConnectionFactory.Create(broker.Endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log);
216 | UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection);
217 | }
218 | }
219 |
220 | foreach (var topic in metadata.Topics)
221 | {
222 | var localTopic = topic;
223 | _topicIndex.AddOrUpdate(topic.Name, _ => localTopic, (_, __) => localTopic);
224 | }
225 | }
226 |
227 | private void UpsertConnectionToBrokerConnectionIndex(int brokerId, IKafkaConnection newConnection)
228 | {
229 | //associate the connection with the broker id, and add or update the reference
230 | _brokerConnectionIndex.AddOrUpdate(brokerId,
231 | _ => newConnection,
232 | (_, existingConnection) =>
233 | {
234 | //if a connection changes for a broker close old connection and create a new one
235 | if (existingConnection.Endpoint.Equals(newConnection.Endpoint)) return existingConnection;
236 | _kafkaOptions.Log.WarnFormat("Broker:{0} Uri changed from:{1} to {2}", brokerId, existingConnection.Endpoint, newConnection.Endpoint);
237 | using (existingConnection)
238 | {
239 | return newConnection;
240 | }
241 | });
242 | }
243 |
244 | public void Dispose()
245 | {
246 | _defaultConnectionIndex.Values.ToList().ForEach(conn => { using (conn) { } });
247 | _brokerConnectionIndex.Values.ToList().ForEach(conn => { using (conn) { } });
248 | }
249 | }
250 |
251 | #region BrokerCache Class...
252 | public class TopicSearchResult
253 | {
254 | public List Topics { get; set; }
255 | public List Missing { get; set; }
256 |
257 | public TopicSearchResult()
258 | {
259 | Topics = new List();
260 | Missing = new List();
261 | }
262 | }
263 | #endregion
264 | }
265 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/AsyncCollection.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Threading;
5 | using System.Threading.Tasks;
6 |
7 | namespace KafkaNet.Common
8 | {
9 | public class AsyncCollection
10 | {
11 | private readonly object _lock = new object();
12 | private readonly AsyncManualResetEvent _dataAvailableEvent = new AsyncManualResetEvent();
13 | private readonly ConcurrentQueue _queue = new ConcurrentQueue();
14 | private long _dataInBufferCount = 0;
15 |
16 | public int Count
17 | {
18 | get { return _queue.Count + (int)Interlocked.Read(ref _dataInBufferCount); }
19 | }
20 |
21 | public bool IsCompleted { get; private set; }
22 |
23 | public void CompleteAdding()
24 | {
25 | IsCompleted = true;
26 | }
27 |
28 | public Task OnHasDataAvailable(CancellationToken token)
29 | {
30 | return _dataAvailableEvent.WaitAsync().WithCancellation(token);
31 | }
32 |
33 | public void Add(T data)
34 | {
35 | if (IsCompleted)
36 | {
37 | throw new ObjectDisposedException("AsyncCollection has been marked as complete. No new documents can be added.");
38 | }
39 |
40 | _queue.Enqueue(data);
41 |
42 | TriggerDataAvailability();
43 | }
44 |
45 | public void AddRange(IEnumerable data)
46 | {
47 | if (IsCompleted)
48 | {
49 | throw new ObjectDisposedException("AsyncCollection has been marked as complete. No new documents can be added.");
50 | }
51 |
52 | foreach (var item in data)
53 | {
54 | _queue.Enqueue(item);
55 | }
56 |
57 | TriggerDataAvailability();
58 | }
59 |
60 | public T Pop()
61 | {
62 | return TryTake(out T data) ? data : default(T);
63 | }
64 |
65 | public async Task> TakeAsync(int count, TimeSpan timeout, CancellationToken token)
66 | {
67 | var batch = new List(count);
68 | var timeoutTask = Task.Delay(timeout, token);
69 |
70 | try
71 | {
72 | do
73 | {
74 | while (TryTake(out T data))
75 | {
76 | batch.Add(data);
77 | Interlocked.Increment(ref _dataInBufferCount);
78 | if (--count <= 0 || timeoutTask.IsCompleted) return batch;
79 | }
80 | } while (await Task.WhenAny(_dataAvailableEvent.WaitAsync(), timeoutTask) != timeoutTask);
81 |
82 | return batch;
83 | }
84 | catch
85 | {
86 | return batch;
87 | }
88 | finally
89 | {
90 | Interlocked.Add(ref _dataInBufferCount, -1 * batch.Count);
91 | }
92 | }
93 |
94 | public void DrainAndApply(Action appliedFunc)
95 | {
96 | while (_queue.TryDequeue(out T data))
97 | {
98 | appliedFunc(data);
99 | }
100 |
101 | TriggerDataAvailability();
102 | }
103 |
104 | public IEnumerable Drain()
105 | {
106 | while (_queue.TryDequeue(out T data))
107 | {
108 | yield return data;
109 | }
110 |
111 | TriggerDataAvailability();
112 | }
113 |
114 | public bool TryTake(out T data)
115 | {
116 | try
117 | {
118 | return _queue.TryDequeue(out data);
119 | }
120 | finally
121 | {
122 | if (_queue.IsEmpty) TriggerDataAvailability();
123 | }
124 | }
125 |
126 | private void TriggerDataAvailability()
127 | {
128 | if (_queue.IsEmpty && _dataAvailableEvent.IsOpen)
129 | {
130 | lock (_lock)
131 | {
132 | if (_queue.IsEmpty && _dataAvailableEvent.IsOpen)
133 | {
134 | _dataAvailableEvent.Close();
135 | }
136 | }
137 | }
138 |
139 | if (!_queue.IsEmpty && !_dataAvailableEvent.IsOpen)
140 | {
141 | lock (_lock)
142 | {
143 | if (!_queue.IsEmpty && !_dataAvailableEvent.IsOpen)
144 | {
145 | _dataAvailableEvent.Open();
146 | }
147 | }
148 | }
149 | }
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/AsyncLock.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Threading;
3 | using System.Threading.Tasks;
4 |
5 | namespace KafkaNet.Common
6 | {
7 | ///
8 | /// An asynchronous locking construct.
9 | ///
10 | ///
11 | /// This is based on Stephen Toub's implementation here: http://blogs.msdn.com/b/pfxteam/archive/2012/02/12/10266988.aspx
12 | /// However, we're using SemaphoreSlim as the basis rather than AsyncSempahore, since in .NET 4.5 SemaphoreSlim implements the WaitAsync() method.
13 | ///
14 | public class AsyncLock : IDisposable
15 | {
16 | private readonly SemaphoreSlim _semaphore;
17 | private readonly Task _releaser;
18 |
19 | public AsyncLock()
20 | {
21 | _semaphore = new SemaphoreSlim(1, 1);
22 | _releaser = Task.FromResult(new Releaser(this));
23 | }
24 |
25 | public bool IsLocked {
26 | get { return _semaphore.CurrentCount == 0; }
27 | }
28 |
29 | public Task LockAsync(CancellationToken canceller)
30 | {
31 | var wait = _semaphore.WaitAsync(canceller);
32 |
33 | if (wait.IsCanceled) throw new OperationCanceledException("Unable to aquire lock within timeout alloted.");
34 |
35 | return wait.IsCompleted ?
36 | _releaser :
37 | wait.ContinueWith((t, state) =>
38 | {
39 | if (t.IsCanceled) throw new OperationCanceledException("Unable to aquire lock within timeout alloted.");
40 | return new Releaser((AsyncLock) state);
41 | }, this, canceller, TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);
42 | }
43 |
44 | public Task LockAsync()
45 | {
46 | var wait = _semaphore.WaitAsync();
47 | return wait.IsCompleted ?
48 | _releaser :
49 | wait.ContinueWith((_, state) => new Releaser((AsyncLock)state),
50 | this, CancellationToken.None,
51 | TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);
52 | }
53 |
54 | public void Dispose()
55 | {
56 | Dispose(true);
57 | }
58 |
59 | protected void Dispose(bool disposing)
60 | {
61 | if (disposing)
62 | {
63 | using (_semaphore) { }
64 | using (_releaser) { }
65 | }
66 | }
67 |
68 | public struct Releaser : IDisposable
69 | {
70 | private readonly AsyncLock _toRelease;
71 |
72 | internal Releaser(AsyncLock toRelease) { _toRelease = toRelease; }
73 |
74 | public void Dispose()
75 | {
76 | if (_toRelease != null)
77 | {
78 | _toRelease._semaphore.Release();
79 | }
80 | }
81 | }
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/AsyncManualResetEvent.cs:
--------------------------------------------------------------------------------
1 | using System.Threading;
2 | using System.Threading.Tasks;
3 |
4 | // original idea by Stephen Toub: http://blogs.msdn.com/b/pfxteam/archive/2012/02/11/10266920.aspx
5 |
6 | namespace KafkaNet.Common
7 | {
8 |
9 |
10 | ///
11 | /// Async version of a manual reset event.
12 | ///
13 | public sealed class AsyncManualResetEvent
14 | {
15 | private TaskCompletionSource _tcs;
16 |
17 | public bool IsOpen
18 | {
19 | get { return _tcs.Task.IsCompleted; }
20 | }
21 |
22 | ///
23 | /// Async version of a manual reset event.
24 | ///
25 | /// Sets whether the initial state of the event is true=open or false=blocking.
26 | public AsyncManualResetEvent(bool set = false)
27 | {
28 | _tcs = new TaskCompletionSource();
29 | if (set)
30 | {
31 | _tcs.SetResult(true);
32 | }
33 | }
34 |
35 | ///
36 | /// Async wait for the manual reset event to be triggered.
37 | ///
38 | ///
39 | public Task WaitAsync()
40 | {
41 | return _tcs.Task;
42 | }
43 |
44 | ///
45 | /// Set the event and complete, releasing all WaitAsync requests.
46 | ///
47 | public void Open()
48 | {
49 | _tcs.TrySetResult(true);
50 | }
51 |
52 | ///
53 | /// Reset the event making all WaitAsync requests block, does nothing if already reset.
54 | ///
55 | public void Close()
56 | {
57 | while (true)
58 | {
59 | var tcs = _tcs;
60 | if (!tcs.Task.IsCompleted || Interlocked.CompareExchange(ref _tcs, new TaskCompletionSource(), tcs) == tcs)
61 | return;
62 | }
63 | }
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/BigEndianBinaryReader.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Diagnostics.Contracts;
4 | using System.IO;
5 | using System.Linq;
6 | using System.Text;
7 |
8 | namespace KafkaNet.Common
9 | {
10 | ///
11 | /// A BinaryReader that is BigEndian aware binary reader.
12 | ///
13 | ///
14 | /// Booleans, bytes and byte arrays will be written directly.
15 | /// All other values will be converted to a byte array in BigEndian byte order and written.
16 | /// Characters and Strings will all be encoded in UTF-8 (which is byte order independent).
17 | ///
18 | ///
19 | /// BigEndianBinaryWriter code provided by Zoltu
20 | /// https://github.com/Zoltu/Zoltu.EndianAwareBinaryReaderWriter
21 | ///
22 | /// The code was modified to provide Kafka specific logic and helper functions.
23 | ///
24 | public class BigEndianBinaryReader : BinaryReader
25 | {
26 | private const int KafkaNullSize = -1;
27 |
28 | public BigEndianBinaryReader(IEnumerable payload) : base(new MemoryStream(payload.ToArray()), Encoding.UTF8)
29 | {
30 |
31 | }
32 |
33 | public long Length{get{return base.BaseStream.Length;}}
34 | public long Position { get { return base.BaseStream.Position; } set { base.BaseStream.Position = 0; } }
35 | public bool HasData { get { return base.BaseStream.Position < base.BaseStream.Length; } }
36 |
37 | public bool Available(int dataSize)
38 | {
39 | return (base.BaseStream.Length - base.BaseStream.Position) >= dataSize;
40 | }
41 |
42 | public override Decimal ReadDecimal()
43 | {
44 | var bytes = GetNextBytesNativeEndian(16);
45 |
46 | var ints = new Int32[4];
47 | ints[0] = (Int32)bytes[0] << 0
48 | | (Int32)bytes[1] << 8
49 | | (Int32)bytes[2] << 16
50 | | (Int32)bytes[3] << 24;
51 | ints[1] = (Int32)bytes[4] << 0
52 | | (Int32)bytes[5] << 8
53 | | (Int32)bytes[6] << 16
54 | | (Int32)bytes[7] << 24;
55 | ints[2] = (Int32)bytes[8] << 0
56 | | (Int32)bytes[9] << 8
57 | | (Int32)bytes[10] << 16
58 | | (Int32)bytes[11] << 24;
59 | ints[3] = (Int32)bytes[12] << 0
60 | | (Int32)bytes[13] << 8
61 | | (Int32)bytes[14] << 16
62 | | (Int32)bytes[15] << 24;
63 |
64 | return new Decimal(ints);
65 | }
66 |
67 | public override Single ReadSingle()
68 | {
69 | return EndianAwareRead(4, BitConverter.ToSingle);
70 | }
71 |
72 | public override Double ReadDouble()
73 | {
74 | return EndianAwareRead(8, BitConverter.ToDouble);
75 | }
76 |
77 | public override Int16 ReadInt16()
78 | {
79 | return EndianAwareRead(2, BitConverter.ToInt16);
80 | }
81 |
82 | public override Int32 ReadInt32()
83 | {
84 | return EndianAwareRead(4, BitConverter.ToInt32);
85 | }
86 |
87 | public override Int64 ReadInt64()
88 | {
89 | return EndianAwareRead(8, BitConverter.ToInt64);
90 | }
91 |
92 | public override UInt16 ReadUInt16()
93 | {
94 | return EndianAwareRead(2, BitConverter.ToUInt16);
95 | }
96 |
97 | public override UInt32 ReadUInt32()
98 | {
99 | return EndianAwareRead(4, BitConverter.ToUInt32);
100 | }
101 |
102 | public override UInt64 ReadUInt64()
103 | {
104 | return EndianAwareRead(8, BitConverter.ToUInt64);
105 | }
106 |
107 | public string ReadInt16String()
108 | {
109 | var size = ReadInt16();
110 | if (size == KafkaNullSize) return null;
111 | return Encoding.UTF8.GetString(RawRead(size));
112 | }
113 |
114 | public string ReadIntString()
115 | {
116 | var size = ReadInt32();
117 | if (size == KafkaNullSize) return null;
118 | return Encoding.UTF8.GetString(RawRead(size));
119 | }
120 |
121 | public byte[] ReadInt16PrefixedBytes()
122 | {
123 | var size = ReadInt16();
124 | if (size == KafkaNullSize) { return null; }
125 | return RawRead(size);
126 | }
127 |
128 | public byte[] ReadIntPrefixedBytes()
129 | {
130 | var size = ReadInt32();
131 | if (size == KafkaNullSize) { return null; }
132 | return RawRead(size);
133 | }
134 |
135 | public byte[] ReadToEnd()
136 | {
137 | var size = (int)(base.BaseStream.Length - base.BaseStream.Position);
138 | var buffer = new byte[size];
139 | base.BaseStream.Read(buffer, 0, size);
140 | return buffer;
141 | }
142 |
143 | public byte[] CrcHash()
144 | {
145 | var currentPosition = base.BaseStream.Position;
146 | try
147 | {
148 | base.BaseStream.Position = 0;
149 | return Crc32Provider.ComputeHash(ReadToEnd());
150 | }
151 | finally
152 | {
153 | base.BaseStream.Position = currentPosition;
154 | }
155 | }
156 |
157 | public uint Crc()
158 | {
159 | var currentPosition = base.BaseStream.Position;
160 | try
161 | {
162 | base.BaseStream.Position = 0;
163 | return Crc32Provider.Compute(ReadToEnd());
164 | }
165 | finally
166 | {
167 | base.BaseStream.Position = currentPosition;
168 | }
169 | }
170 |
171 | public byte[] RawRead(int size)
172 | {
173 | if (size <= 0) { return new byte[0]; }
174 |
175 | var buffer = new byte[size];
176 |
177 | base.Read(buffer, 0, size);
178 |
179 | return buffer;
180 | }
181 |
182 | private T EndianAwareRead(Int32 size, Func converter) where T : struct
183 | {
184 | Contract.Requires(size >= 0);
185 | Contract.Requires(converter != null);
186 |
187 | var bytes = GetNextBytesNativeEndian(size);
188 | return converter(bytes, 0);
189 | }
190 |
191 | private Byte[] GetNextBytesNativeEndian(Int32 count)
192 | {
193 | Contract.Requires(count >= 0);
194 | Contract.Ensures(Contract.Result() != null);
195 | Contract.Ensures(Contract.Result().Length == count);
196 |
197 | var bytes = GetNextBytes(count);
198 | if (BitConverter.IsLittleEndian)
199 | Array.Reverse(bytes);
200 | return bytes;
201 | }
202 |
203 | private Byte[] GetNextBytes(Int32 count)
204 | {
205 | Contract.Requires(count >= 0);
206 | Contract.Ensures(Contract.Result() != null);
207 | Contract.Ensures(Contract.Result().Length == count);
208 |
209 | var buffer = new Byte[count];
210 | var bytesRead = BaseStream.Read(buffer, 0, count);
211 |
212 | if (bytesRead != count)
213 | throw new EndOfStreamException();
214 |
215 | return buffer;
216 | }
217 | }
218 | }
219 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/BigEndianBinaryWriter.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics.Contracts;
3 | using System.IO;
4 | using System.Text;
5 |
6 | namespace KafkaNet.Common
7 | {
8 | ///
9 | /// A BinaryWriter that stores values in BigEndian format.
10 | ///
11 | ///
12 | /// Booleans, bytes and byte arrays will be written directly.
13 | /// All other values will be converted to a byte array in BigEndian byte order and written.
14 | /// Characters and Strings will all be encoded in UTF-8 (which is byte order independent).
15 | ///
16 | ///
17 | /// BigEndianBinaryWriter code provided by Zoltu
18 | /// https://github.com/Zoltu/Zoltu.EndianAwareBinaryReaderWriter
19 | /// The code was modified to implement Kafka specific byte handling.
20 | ///
21 | public class BigEndianBinaryWriter : BinaryWriter
22 | {
23 | public BigEndianBinaryWriter(Stream stream)
24 | : base(stream, Encoding.UTF8)
25 | {
26 | Contract.Requires(stream != null);
27 | }
28 |
29 | public BigEndianBinaryWriter(Stream stream, Boolean leaveOpen)
30 | : base(stream, Encoding.UTF8, leaveOpen)
31 | {
32 | Contract.Requires(stream != null);
33 | }
34 |
35 | public override void Write(Decimal value)
36 | {
37 | var ints = Decimal.GetBits(value);
38 | Contract.Assume(ints != null);
39 | Contract.Assume(ints.Length == 4);
40 |
41 | if (BitConverter.IsLittleEndian)
42 | Array.Reverse(ints);
43 |
44 | for (var i = 0; i < 4; ++i)
45 | {
46 | var bytes = BitConverter.GetBytes(ints[i]);
47 | if (BitConverter.IsLittleEndian)
48 | Array.Reverse(bytes);
49 |
50 | Write(bytes);
51 | }
52 | }
53 |
54 | public override void Write(Single value)
55 | {
56 | var bytes = BitConverter.GetBytes(value);
57 | WriteBigEndian(bytes);
58 | }
59 |
60 | public override void Write(Double value)
61 | {
62 | var bytes = BitConverter.GetBytes(value);
63 | WriteBigEndian(bytes);
64 | }
65 |
66 | public override void Write(Int16 value)
67 | {
68 | var bytes = BitConverter.GetBytes(value);
69 | WriteBigEndian(bytes);
70 | }
71 |
72 | public override void Write(Int32 value)
73 | {
74 | var bytes = BitConverter.GetBytes(value);
75 | WriteBigEndian(bytes);
76 | }
77 |
78 | public override void Write(Int64 value)
79 | {
80 | var bytes = BitConverter.GetBytes(value);
81 | WriteBigEndian(bytes);
82 | }
83 |
84 | public override void Write(UInt16 value)
85 | {
86 | var bytes = BitConverter.GetBytes(value);
87 | WriteBigEndian(bytes);
88 | }
89 |
90 | public override void Write(UInt32 value)
91 | {
92 | var bytes = BitConverter.GetBytes(value);
93 | WriteBigEndian(bytes);
94 | }
95 |
96 | public override void Write(UInt64 value)
97 | {
98 | var bytes = BitConverter.GetBytes(value);
99 | WriteBigEndian(bytes);
100 | }
101 |
102 | public override void Write(string value)
103 | {
104 | throw new NotSupportedException("Kafka requires specific string length prefix encoding.");
105 | }
106 |
107 | public void Write(byte[] value, StringPrefixEncoding encoding)
108 | {
109 | if (value == null)
110 | {
111 | Write(-1);
112 | return;
113 | }
114 |
115 | switch (encoding)
116 | {
117 | case StringPrefixEncoding.Int16:
118 | Write((Int16)value.Length);
119 | break;
120 | case StringPrefixEncoding.Int32:
121 | Write(value.Length);
122 | break;
123 | }
124 |
125 | Write(value);
126 | }
127 |
128 | public void Write(string value, StringPrefixEncoding encoding)
129 | {
130 | if (value == null)
131 | {
132 | switch (encoding)
133 | {
134 | case StringPrefixEncoding.Int16:
135 | Write((Int16)(-1));
136 | return;
137 | default:
138 | Write(-1);
139 | return;
140 | }
141 | }
142 |
143 | switch (encoding)
144 | {
145 | case StringPrefixEncoding.Int16:
146 | Write((Int16)value.Length);
147 | break;
148 | case StringPrefixEncoding.Int32:
149 | Write(value.Length);
150 | break;
151 | }
152 |
153 | Write(Encoding.UTF8.GetBytes(value));
154 | }
155 |
156 |
157 | private void WriteBigEndian(Byte[] bytes)
158 | {
159 | Contract.Requires(bytes != null);
160 |
161 | if (BitConverter.IsLittleEndian)
162 | Array.Reverse(bytes);
163 |
164 | Write(bytes);
165 | }
166 | }
167 |
168 | public enum StringPrefixEncoding
169 | {
170 | Int16,
171 | Int32,
172 | None
173 | };
174 | }
175 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/ConcurrentCircularBuffer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections;
3 | using System.Collections.Generic;
4 | using System.Threading;
5 |
6 | namespace KafkaNet.Common
7 | {
8 | public class ConcurrentCircularBuffer : IEnumerable
9 | {
10 | private readonly int _maxSize;
11 | private long _count;
12 | private int _head = -1;
13 | readonly T[] _values;
14 |
15 | public ConcurrentCircularBuffer(int max)
16 | {
17 | _maxSize = max;
18 | _values = new T[_maxSize];
19 | }
20 |
21 | public int MaxSize { get { return _maxSize; } }
22 |
23 | public long Count
24 | {
25 | get
26 | {
27 | return Interlocked.Read(ref _count);
28 | }
29 | }
30 |
31 | public ConcurrentCircularBuffer Enqueue(T obj)
32 | {
33 | var head = Interlocked.Increment(ref _head);
34 |
35 | if (head > _maxSize - 1)
36 | {
37 | //this should exchange to correct index even if interlocked called twice from different threads
38 | Interlocked.Exchange(ref _head, head - _maxSize);
39 | head = head - _maxSize;
40 | }
41 |
42 | _values[head] = obj;
43 |
44 | if (_count != _maxSize) //once we hit max size we dont need to track count.
45 | Interlocked.Exchange(ref _count, Math.Min(Interlocked.Increment(ref _count), _maxSize));
46 |
47 | return this;
48 | }
49 |
50 | public IEnumerator GetEnumerator()
51 | {
52 | for (int i = 0; i < Count; i++)
53 | {
54 | yield return _values[i];
55 | }
56 | }
57 |
58 | IEnumerator IEnumerable.GetEnumerator()
59 | {
60 | return GetEnumerator();
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/Crc32Provider.cs:
--------------------------------------------------------------------------------
1 | // Copyright (c) Damien Guard. All rights reserved.
2 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
3 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
4 | // Originally published at http://damieng.com/blog/2006/08/08/calculating_crc32_in_c_and_net
5 |
6 | using System;
7 |
8 | namespace KafkaNet.Common
9 | {
10 | ///
11 | /// This code was originally from the copyrighted code listed above but was modified significantly
12 | /// as the original code was not thread safe and did not match was was required of this driver. This
13 | /// class now provides a static lib which will do the simple CRC calculation required by Kafka servers.
14 | ///
15 | public static class Crc32Provider
16 | {
17 | public const UInt32 DefaultPolynomial = 0xedb88320u;
18 | public const UInt32 DefaultSeed = 0xffffffffu;
19 | private static readonly UInt32[] PolynomialTable;
20 |
21 | static Crc32Provider()
22 | {
23 | PolynomialTable = InitializeTable(DefaultPolynomial);
24 | }
25 |
26 | public static UInt32 Compute(byte[] buffer)
27 | {
28 | return ~CalculateHash(buffer, 0, buffer.Length);
29 | }
30 |
31 | public static UInt32 Compute(byte[] buffer, int offset, int length)
32 | {
33 | return ~CalculateHash(buffer, offset, length);
34 | }
35 |
36 | public static byte[] ComputeHash(byte[] buffer)
37 | {
38 | return UInt32ToBigEndianBytes(Compute(buffer));
39 | }
40 |
41 | public static byte[] ComputeHash(byte[] buffer, int offset, int length)
42 | {
43 | return UInt32ToBigEndianBytes(Compute(buffer, offset, length));
44 | }
45 |
46 | private static UInt32[] InitializeTable(UInt32 polynomial)
47 | {
48 | var createTable = new UInt32[256];
49 | for (var i = 0; i < 256; i++)
50 | {
51 | var entry = (UInt32)i;
52 | for (var j = 0; j < 8; j++)
53 | if ((entry & 1) == 1)
54 | entry = (entry >> 1) ^ polynomial;
55 | else
56 | entry = entry >> 1;
57 | createTable[i] = entry;
58 | }
59 |
60 | return createTable;
61 | }
62 |
63 | private static UInt32 CalculateHash(byte[] buffer, int offset, int length)
64 | {
65 | var crc = DefaultSeed;
66 | for (var i = offset; i < length; i++)
67 | {
68 | crc = (crc >> 8) ^ PolynomialTable[buffer[i] ^ crc & 0xff];
69 | }
70 | return crc;
71 | }
72 |
73 | private static byte[] UInt32ToBigEndianBytes(UInt32 uint32)
74 | {
75 | var result = BitConverter.GetBytes(uint32);
76 |
77 | if (BitConverter.IsLittleEndian)
78 | Array.Reverse(result);
79 |
80 | return result;
81 | }
82 | }
83 | }
--------------------------------------------------------------------------------
/kafka-net-core/Common/Extensions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Diagnostics.Contracts;
4 | using System.Linq;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 | using System.Threading;
8 |
9 | namespace KafkaNet.Common
10 | {
11 | ///
12 | /// Provides Big Endian conversion extensions to required types for the Kafka protocol.
13 | ///
14 | public static class Extensions
15 | {
16 | public static byte[] ToIntSizedBytes(this string value)
17 | {
18 | if (string.IsNullOrEmpty(value)) return (-1).ToBytes();
19 |
20 | return value.Length.ToBytes()
21 | .Concat(value.ToBytes())
22 | .ToArray();
23 | }
24 |
25 | public static byte[] ToInt16SizedBytes(this string value)
26 | {
27 | if (string.IsNullOrEmpty(value)) return (-1).ToBytes();
28 |
29 | return ((Int16)value.Length).ToBytes()
30 | .Concat(value.ToBytes())
31 | .ToArray();
32 | }
33 |
34 | public static byte[] ToInt32PrefixedBytes(this byte[] value)
35 | {
36 | if (value == null) return (-1).ToBytes();
37 |
38 | return value.Length.ToBytes()
39 | .Concat(value)
40 | .ToArray();
41 | }
42 |
43 | public static string ToUtf8String(this byte[] value)
44 | {
45 | if (value == null) return string.Empty;
46 |
47 | return Encoding.UTF8.GetString(value);
48 | }
49 |
50 | public static KafkaDataPayload ToPayload(this byte[] data)
51 | {
52 | return new KafkaDataPayload {Buffer = data};
53 | }
54 |
55 | public static byte[] ToBytes(this string value)
56 | {
57 | if (string.IsNullOrEmpty(value)) return (-1).ToBytes();
58 |
59 | //UTF8 is array of bytes, no endianness
60 | return Encoding.UTF8.GetBytes(value);
61 | }
62 |
63 | public static byte[] ToBytes(this Int16 value)
64 | {
65 | return BitConverter.GetBytes(value).Reverse().ToArray();
66 | }
67 |
68 | public static byte[] ToBytes(this Int32 value)
69 | {
70 | return BitConverter.GetBytes(value).Reverse().ToArray();
71 | }
72 |
73 | public static byte[] ToBytes(this Int64 value)
74 | {
75 | return BitConverter.GetBytes(value).Reverse().ToArray();
76 | }
77 |
78 | public static byte[] ToBytes(this float value)
79 | {
80 | return BitConverter.GetBytes(value).Reverse().ToArray();
81 | }
82 |
83 | public static byte[] ToBytes(this double value)
84 | {
85 | return BitConverter.GetBytes(value).Reverse().ToArray();
86 | }
87 |
88 | public static byte[] ToBytes(this char value)
89 | {
90 | return BitConverter.GetBytes(value).Reverse().ToArray();
91 | }
92 |
93 | public static byte[] ToBytes(this bool value)
94 | {
95 | return BitConverter.GetBytes(value).Reverse().ToArray();
96 | }
97 |
98 | public static Int32 ToInt32(this byte[] value)
99 | {
100 | return BitConverter.ToInt32(value.Reverse().ToArray(), 0);
101 | }
102 |
103 | ///
104 | /// Execute an await task while monitoring a given cancellation token. Use with non-cancelable async operations.
105 | ///
106 | ///
107 | /// This extension method will only cancel the await and not the actual IO operation. The status of the IO opperation will still
108 | /// need to be considered after the operation is cancelled.
109 | /// See
110 | ///
111 | public static async Task WithCancellation(this Task task, CancellationToken cancellationToken)
112 | {
113 | var tcs = new TaskCompletionSource();
114 |
115 | var cancelRegistration = cancellationToken.Register(source => ((TaskCompletionSource) source).TrySetResult(true), tcs);
116 |
117 | using (cancelRegistration)
118 | {
119 | if (task != await Task.WhenAny(task, tcs.Task).ConfigureAwait(false))
120 | {
121 | throw new OperationCanceledException(cancellationToken);
122 | }
123 | }
124 |
125 | return await task.ConfigureAwait(false);
126 | }
127 |
128 | ///
129 | /// Execute an await task while monitoring a given cancellation token. Use with non-cancelable async operations.
130 | ///
131 | ///
132 | /// This extension method will only cancel the await and not the actual IO operation. The status of the IO opperation will still
133 | /// need to be considered after the operation is cancelled.
134 | /// See
135 | ///
136 | public static async Task WithCancellation(this Task task, CancellationToken cancellationToken)
137 | {
138 | var tcs = new TaskCompletionSource();
139 |
140 | var cancelRegistration = cancellationToken.Register(source => ((TaskCompletionSource)source).TrySetResult(true), tcs);
141 |
142 | using (cancelRegistration)
143 | {
144 | if (task != await Task.WhenAny(task, tcs.Task).ConfigureAwait(false))
145 | {
146 | throw new OperationCanceledException(cancellationToken);
147 | }
148 | }
149 | }
150 |
151 |
152 | ///
153 | /// Returns true if before timeout expires./>
154 | ///
155 | /// The handle whose signal triggers the task to be completed.
156 | /// The timespan to wait before returning false
157 | /// The task returns true if the handle is signaled before the timeout has expired.
158 | ///
159 | /// Original code from: http://blog.nerdbank.net/2011/07/c-await-for-waithandle.html
160 | /// There is a (brief) time delay between when the handle is signaled and when the task is marked as completed.
161 | ///
162 | public static Task WaitAsync(this WaitHandle handle, TimeSpan timeout)
163 | {
164 | Contract.Requires(handle != null);
165 | Contract.Ensures(Contract.Result() != null);
166 |
167 | var tcs = new TaskCompletionSource();
168 | var localVariableInitLock = new object();
169 | lock (localVariableInitLock)
170 | {
171 | RegisteredWaitHandle callbackHandle = null;
172 | callbackHandle = ThreadPool.RegisterWaitForSingleObject(
173 | handle,
174 | (state, timedOut) =>
175 | {
176 | tcs.TrySetResult(!timedOut);
177 |
178 | // We take a lock here to make sure the outer method has completed setting the local variable callbackHandle.
179 | lock (localVariableInitLock)
180 | {
181 | if (callbackHandle!= null) callbackHandle.Unregister(null);
182 | }
183 | },
184 | state: null,
185 | millisecondsTimeOutInterval: (long)timeout.TotalMilliseconds,
186 | executeOnlyOnce: true);
187 | }
188 |
189 | return tcs.Task;
190 | }
191 |
192 | ///
193 | /// Mainly used for testing, allows waiting on a single task without throwing exceptions.
194 | ///
195 | public static void SafeWait(this Task source, TimeSpan timeout)
196 | {
197 | try
198 | {
199 | source.Wait(timeout);
200 | }
201 | catch
202 | {
203 | //ignore an exception that happens in this source
204 | }
205 | }
206 |
207 | ///
208 | /// Splits a collection into given batch sizes and returns as an enumerable of batches.
209 | ///
210 | public static IEnumerable> Batch(this IEnumerable collection, int batchSize)
211 | {
212 | var nextbatch = new List(batchSize);
213 | foreach (T item in collection)
214 | {
215 | nextbatch.Add(item);
216 | if (nextbatch.Count == batchSize)
217 | {
218 | yield return nextbatch;
219 | nextbatch = new List(batchSize);
220 | }
221 | }
222 | if (nextbatch.Count > 0)
223 | yield return nextbatch;
224 | }
225 |
226 | ///
227 | /// Extracts a concrete exception out of a Continue with result.
228 | ///
229 | public static Exception ExtractException(this Task task)
230 | {
231 | if (task.IsFaulted == false) return null;
232 | if (task.Exception != null)
233 | return task.Exception.Flatten();
234 |
235 | return new ApplicationException("Unknown exception occured.");
236 | }
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/kafka-net-core/Common/KafkaMessagePacker.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 |
5 | namespace KafkaNet.Common
6 | {
7 | public class KafkaMessagePacker : IDisposable
8 | {
9 | private const int IntegerByteSize = 4;
10 | private readonly BigEndianBinaryWriter _stream;
11 |
12 | public KafkaMessagePacker()
13 | {
14 | _stream = new BigEndianBinaryWriter(new MemoryStream());
15 | Pack(IntegerByteSize); //pre-allocate space for buffer length
16 | }
17 |
18 | public KafkaMessagePacker Pack(byte value)
19 | {
20 | _stream.Write(value);
21 | return this;
22 | }
23 |
24 | public KafkaMessagePacker Pack(Int32 ints)
25 | {
26 | _stream.Write(ints);
27 | return this;
28 | }
29 |
30 | public KafkaMessagePacker Pack(Int16 ints)
31 | {
32 | _stream.Write(ints);
33 | return this;
34 | }
35 |
36 | public KafkaMessagePacker Pack(Int64 ints)
37 | {
38 | _stream.Write(ints);
39 | return this;
40 | }
41 |
42 | public KafkaMessagePacker Pack(byte[] buffer, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
43 | {
44 | _stream.Write(buffer, encoding);
45 | return this;
46 | }
47 |
48 | public KafkaMessagePacker Pack(string data, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
49 | {
50 | _stream.Write(data, encoding);
51 | return this;
52 | }
53 |
54 | public KafkaMessagePacker Pack(IEnumerable data, StringPrefixEncoding encoding = StringPrefixEncoding.Int32)
55 | {
56 | foreach (var item in data)
57 | {
58 | _stream.Write(item, encoding);
59 | }
60 |
61 | return this;
62 | }
63 |
64 | public byte[] Payload()
65 | {
66 | var buffer = new byte[_stream.BaseStream.Length];
67 | _stream.BaseStream.Position = 0;
68 | Pack((Int32)(_stream.BaseStream.Length - IntegerByteSize));
69 | _stream.BaseStream.Position = 0;
70 | _stream.BaseStream.Read(buffer, 0, (int)_stream.BaseStream.Length);
71 | return buffer;
72 | }
73 |
74 | public byte[] PayloadNoLength()
75 | {
76 | var payloadLength = _stream.BaseStream.Length - IntegerByteSize;
77 | var buffer = new byte[payloadLength];
78 | _stream.BaseStream.Position = IntegerByteSize;
79 | _stream.BaseStream.Read(buffer, 0, (int)payloadLength);
80 | return buffer;
81 | }
82 |
83 | public byte[] CrcPayload()
84 | {
85 | var buffer = new byte[_stream.BaseStream.Length];
86 |
87 | //copy the payload over
88 | _stream.BaseStream.Position = 0;
89 | _stream.BaseStream.Read(buffer, 0, (int)_stream.BaseStream.Length);
90 |
91 | //calculate the crc
92 | var crc = Crc32Provider.ComputeHash(buffer, IntegerByteSize, buffer.Length);
93 | buffer[0] = crc[0];
94 | buffer[1] = crc[1];
95 | buffer[2] = crc[2];
96 | buffer[3] = crc[3];
97 |
98 | return buffer;
99 | }
100 |
101 | public void Dispose()
102 | {
103 | using (_stream) { }
104 | }
105 | }
106 | }
--------------------------------------------------------------------------------
/kafka-net-core/Common/ScheduledTimer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Timers;
3 |
4 | namespace KafkaNet.Common
5 | {
6 | public enum ScheduledTimerStatus
7 | {
8 | ///
9 | /// Timer is stopped.
10 | ///
11 | Stopped,
12 |
13 | ///
14 | /// Timer is running.
15 | ///
16 | Running
17 | }
18 |
19 | public interface IScheduledTimer : IDisposable
20 | {
21 | ///
22 | /// Current running status of the timer.
23 | ///
24 | ScheduledTimerStatus Status { get; }
25 |
26 | ///
27 | /// Indicates if the timer is running.
28 | ///
29 | bool Enabled { get; }
30 |
31 | ///
32 | /// Set the time to start a replication.
33 | ///
34 | /// Start date and time for the replication timer.
35 | /// Instance of ScheduledTimer for fluent configuration.
36 | /// If no interval is set, the replication will only happen once.
37 | IScheduledTimer StartingAt(DateTime start);
38 |
39 | ///
40 | /// Set the interval to send a replication command to a Solr server.
41 | ///
42 | /// Interval this command is to be called.
43 | /// Instance of ScheduledTimer for fluent configuration.
44 | /// If no start time is set, the interval starts when the timer is started.
45 | IScheduledTimer Every(TimeSpan interval);
46 |
47 | ///
48 | /// Action to perform when the timer expires.
49 | ///
50 | IScheduledTimer Do(Action action);
51 |
52 | ///
53 | /// Sets the timer to execute and restart the timer without waiting for the Do method to finish.
54 | ///
55 | ///
56 | IScheduledTimer DontWait();
57 |
58 | ///
59 | /// Starts the timer
60 | ///
61 | IScheduledTimer Begin();
62 |
63 | ///
64 | /// Stop the timer.
65 | ///
66 | IScheduledTimer End();
67 | }
68 |
69 | ///
70 | /// TODO there is a bug in this that sometimes calls the do function twice on startup
71 | /// Timer class which providers a fluent interface for scheduling task for threads to execute at some future point.
72 | ///
73 | /// Thanks goes to Jeff Vanzella for this nifty little fluent class for scheduling tasks.
74 | ///
75 | public class ScheduledTimer : IScheduledTimer
76 | {
77 | private bool _disposed;
78 | private readonly Timer _timer;
79 | private TimeSpan? _interval;
80 | private DateTime? _timerStart;
81 | private Action _action;
82 | private bool _dontWait;
83 |
84 | ///
85 | /// Current running status of the timer.
86 | ///
87 | public ScheduledTimerStatus Status { get; private set; }
88 |
89 | ///
90 | /// Constructor.
91 | ///
92 | public ScheduledTimer()
93 | {
94 | _interval = null;
95 | _timerStart = null;
96 |
97 | _timer = new Timer();
98 | _timer.Elapsed += ReplicationStartupTimerElapsed;
99 | _timer.AutoReset = true;
100 |
101 | Status = ScheduledTimerStatus.Stopped;
102 | }
103 |
104 | private void WaitActionWrapper()
105 | {
106 | if (_action == null) return;
107 |
108 | try
109 | {
110 | _timer.Enabled = false;
111 | _action();
112 | }
113 | finally
114 | {
115 | if (_disposed == false)
116 | _timer.Enabled = true;
117 | }
118 | }
119 |
120 | private void ReplicationTimerElapsed(object sender, ElapsedEventArgs e)
121 | {
122 | if (_action == null) return;
123 |
124 | if (_dontWait)
125 | _action();
126 | else
127 | WaitActionWrapper();
128 | }
129 |
130 | private void ReplicationStartupTimerElapsed(object sender, ElapsedEventArgs e)
131 | {
132 | if (_interval.HasValue)
133 | {
134 | _timer.Stop();
135 |
136 | _timer.Elapsed -= ReplicationStartupTimerElapsed;
137 | _timer.Elapsed += ReplicationTimerElapsed;
138 |
139 | _timer.Interval = ProcessIntervalAndEnsureItIsGreaterThan0(_interval.Value);
140 |
141 | _timer.Start();
142 | }
143 | else
144 | {
145 | End();
146 | }
147 |
148 | ReplicationTimerElapsed(sender, e);
149 | }
150 |
151 | ///
152 | /// Indicates if the timer is running.
153 | ///
154 | public bool Enabled { get { return _timer.Enabled; } }
155 |
156 | ///
157 | /// Set the time to start the first execution of the scheduled task.
158 | ///
159 | /// Start date and time for the replication timer.
160 | /// Instance of IScheduledTimer for fluent configuration.
161 | /// If no start time is set, the interval starts when the timer is started.
162 | public IScheduledTimer StartingAt(DateTime start)
163 | {
164 | _timerStart = start;
165 |
166 | _timer.Interval = ProcessIntervalAndEnsureItIsGreaterThan0(start - DateTime.Now);
167 |
168 | return this;
169 | }
170 |
171 | ///
172 | /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources.
173 | ///
174 | public void Dispose()
175 | {
176 | if (Status == ScheduledTimerStatus.Running)
177 | {
178 | End();
179 | }
180 |
181 | using (_timer)
182 | {
183 | _disposed = true;
184 | }
185 | }
186 |
187 | ///
188 | /// Set the interval to wait between each execution of the task.
189 | ///
190 | /// Interval to wait between execution of tasks.
191 | /// Instance of IScheduledTimer for fluent configuration.
192 | /// If no interval is set, the schedule will only execute once.
193 | public IScheduledTimer Every(TimeSpan interval)
194 | {
195 | _interval = interval;
196 |
197 | if (!_timerStart.HasValue)
198 | {
199 | _timer.Interval = ProcessIntervalAndEnsureItIsGreaterThan0(_interval.Value);
200 | }
201 |
202 | return this;
203 | }
204 |
205 | ///
206 | /// Action to perform when the timer expires.
207 | ///
208 | public IScheduledTimer Do(Action action)
209 | {
210 | _action = action;
211 |
212 | return this;
213 | }
214 |
215 | ///
216 | /// Sets the timer to execute and restart the timer without waiting for the Do method to finish.
217 | ///
218 | ///
219 | /// Setting this will start the countdown timer to the next execution imediately
220 | /// after the current execution is triggered. If the execution action takes longer than
221 | /// the timer interval, the execution task will stack and run concurrently.
222 | ///
223 | public IScheduledTimer DontWait()
224 | {
225 | _dontWait = true;
226 | return this;
227 | }
228 |
229 | private static double ProcessIntervalAndEnsureItIsGreaterThan0(TimeSpan interval)
230 | {
231 | var intervalInMilliseconds = interval.TotalMilliseconds;
232 |
233 | intervalInMilliseconds =
234 | (intervalInMilliseconds < 1)
235 | ? 1
236 | : intervalInMilliseconds;
237 |
238 | return intervalInMilliseconds;
239 | }
240 |
241 | ///
242 | /// Starts the timer
243 | ///
244 | public IScheduledTimer Begin()
245 | {
246 | if (!_timerStart.HasValue)
247 | {
248 | StartingAt(DateTime.Now);
249 | }
250 |
251 | Status = ScheduledTimerStatus.Running;
252 |
253 | _timer.Enabled = true;
254 |
255 | return this;
256 | }
257 |
258 | ///
259 | /// Stop the timer.
260 | ///
261 | public IScheduledTimer End()
262 | {
263 | Status = ScheduledTimerStatus.Stopped;
264 | _timer.Enabled = false;
265 |
266 | return this;
267 | }
268 |
269 | ///
270 | /// Exposes the timer object for unit testing.
271 | ///
272 | public Timer TimerObject
273 | {
274 | get { return _timer; }
275 | }
276 | }
277 | }
278 |
--------------------------------------------------------------------------------
/kafka-net-core/Consumer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Threading;
6 | using System.Threading.Tasks;
7 | using KafkaNet.Model;
8 | using KafkaNet.Protocol;
9 |
10 | namespace KafkaNet
11 | {
12 | ///
13 | /// Provides a basic consumer of one Topic across all partitions or over a given whitelist of partitions.
14 | ///
15 | /// TODO: provide automatic offset saving when the feature is available in 0.8.2
16 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
17 | ///
18 | public class Consumer : IMetadataQueries, IDisposable
19 | {
20 | private readonly ConsumerOptions _options;
21 | private readonly BlockingCollection _fetchResponseQueue;
22 | private readonly CancellationTokenSource _disposeToken = new CancellationTokenSource();
23 | private readonly ConcurrentDictionary _partitionPollingIndex = new ConcurrentDictionary();
24 | private readonly ConcurrentDictionary _partitionOffsetIndex = new ConcurrentDictionary();
25 | private readonly IMetadataQueries _metadataQueries;
26 |
27 | private int _disposeCount;
28 | private int _ensureOneThread;
29 | private Topic _topic;
30 |
31 | public Consumer(ConsumerOptions options, params OffsetPosition[] positions)
32 | {
33 | _options = options;
34 | _fetchResponseQueue = new BlockingCollection(_options.ConsumerBufferSize);
35 | _metadataQueries = new MetadataQueries(_options.Router);
36 |
37 | SetOffsetPosition(positions);
38 | }
39 |
40 | ///
41 | /// Get the number of tasks created for consuming each partition.
42 | ///
43 | public int ConsumerTaskCount { get { return _partitionPollingIndex.Count; } }
44 |
45 | ///
46 | /// Returns a blocking enumerable of messages received from Kafka.
47 | ///
48 | /// Blocking enumberable of messages from Kafka.
49 | public IEnumerable Consume(CancellationToken? cancellationToken = null)
50 | {
51 | _options.Log.DebugFormat("Consumer: Beginning consumption of topic: {0}", _options.Topic);
52 | EnsurePartitionPollingThreads();
53 | return _fetchResponseQueue.GetConsumingEnumerable(cancellationToken ?? CancellationToken.None);
54 | }
55 |
56 | ///
57 | /// Force reset the offset position for a specific partition to a specific offset value.
58 | ///
59 | /// Collection of positions to reset to.
60 | public void SetOffsetPosition(params OffsetPosition[] positions)
61 | {
62 | foreach (var position in positions)
63 | {
64 | var temp = position;
65 | _partitionOffsetIndex.AddOrUpdate(position.PartitionId, i => temp.Offset, (i, l) => temp.Offset);
66 | }
67 | }
68 |
69 | ///
70 | /// Get the current running position (offset) for all consuming partition.
71 | ///
72 | /// List of positions for each consumed partitions.
73 | /// Will only return data if the consumer is actively being consumed.
74 | public List GetOffsetPosition()
75 | {
76 | return _partitionOffsetIndex.Select(x => new OffsetPosition { PartitionId = x.Key, Offset = x.Value }).ToList();
77 | }
78 |
79 | private void EnsurePartitionPollingThreads()
80 | {
81 | try
82 | {
83 | if (Interlocked.Increment(ref _ensureOneThread) == 1)
84 | {
85 | _options.Log.DebugFormat("Consumer: Refreshing partitions for topic: {0}", _options.Topic);
86 | var topic = _options.Router.GetTopicMetadata(_options.Topic);
87 | if (topic.Count <= 0) throw new ApplicationException(string.Format("Unable to get metadata for topic:{0}.", _options.Topic));
88 | _topic = topic.First();
89 |
90 | //create one thread per partition, if they are in the white list.
91 | foreach (var partition in _topic.Partitions)
92 | {
93 | var partitionId = partition.PartitionId;
94 | if (_options.PartitionWhitelist.Count == 0 || _options.PartitionWhitelist.Any(x => x == partitionId))
95 | {
96 | _partitionPollingIndex.AddOrUpdate(partitionId,
97 | i => ConsumeTopicPartitionAsync(_topic.Name, partitionId),
98 | (i, task) => task);
99 | }
100 | }
101 | }
102 | }
103 | catch (Exception ex)
104 | {
105 | _options.Log.ErrorFormat("Exception occured trying to setup consumer for topic:{0}. Exception={1}", _options.Topic, ex);
106 | }
107 | finally
108 | {
109 | Interlocked.Decrement(ref _ensureOneThread);
110 | }
111 | }
112 |
113 | private Task ConsumeTopicPartitionAsync(string topic, int partitionId)
114 | {
115 | return Task.Run(async () =>
116 | {
117 | try
118 | {
119 | var bufferSizeHighWatermark = FetchRequest.DefaultBufferSize;
120 |
121 | _options.Log.DebugFormat("Consumer: Creating polling task for topic: {0} on parition: {1}", topic, partitionId);
122 | while (_disposeToken.IsCancellationRequested == false)
123 | {
124 | try
125 | {
126 | //get the current offset, or default to zero if not there.
127 | long offset = 0;
128 | _partitionOffsetIndex.AddOrUpdate(partitionId, i => offset, (i, currentOffset) => { offset = currentOffset; return currentOffset; });
129 |
130 | //build a fetch request for partition at offset
131 | var fetch = new Fetch
132 | {
133 | Topic = topic,
134 | PartitionId = partitionId,
135 | Offset = offset,
136 | MaxBytes = bufferSizeHighWatermark,
137 | };
138 |
139 | var fetches = new List { fetch };
140 |
141 | var fetchRequest = new FetchRequest
142 | {
143 | MaxWaitTime = (int)Math.Min((long)int.MaxValue, _options.MaxWaitTimeForMinimumBytes.TotalMilliseconds),
144 | MinBytes = _options.MinimumBytes,
145 | Fetches = fetches
146 | };
147 |
148 | //make request and post to queue
149 | var route = _options.Router.SelectBrokerRoute(topic, partitionId);
150 |
151 | var responses = await route.Connection.SendAsync(fetchRequest).ConfigureAwait(false);
152 |
153 | if (responses.Count > 0)
154 | {
155 | var response = responses.FirstOrDefault(); //we only asked for one response
156 |
157 | if (response != null && response.Messages.Count > 0)
158 | {
159 | HandleResponseErrors(fetch, response);
160 |
161 | foreach (var message in response.Messages)
162 | {
163 | _fetchResponseQueue.Add(message, _disposeToken.Token);
164 |
165 | if (_disposeToken.IsCancellationRequested) return;
166 | }
167 |
168 | var nextOffset = response.Messages.Max(x => x.Meta.Offset) + 1;
169 | _partitionOffsetIndex.AddOrUpdate(partitionId, i => nextOffset, (i, l) => nextOffset);
170 |
171 | // sleep is not needed if responses were received
172 | continue;
173 | }
174 | }
175 |
176 | //no message received from server wait a while before we try another long poll
177 | Thread.Sleep(_options.BackoffInterval);
178 | }
179 | catch (BufferUnderRunException ex)
180 | {
181 | bufferSizeHighWatermark = (int)(ex.RequiredBufferSize * _options.FetchBufferMultiplier) + ex.MessageHeaderSize;
182 | _options.Log.InfoFormat("Buffer underrun. Increasing buffer size to: {0}", bufferSizeHighWatermark);
183 | }
184 | catch (OffsetOutOfRangeException ex)
185 | {
186 | //TODO this turned out really ugly. Need to fix this section.
187 | _options.Log.ErrorFormat(ex.Message);
188 | FixOffsetOutOfRangeExceptionAsync(ex.FetchRequest);
189 | }
190 | catch (InvalidMetadataException ex)
191 | {
192 | //refresh our metadata and ensure we are polling the correct partitions
193 | _options.Log.ErrorFormat(ex.Message);
194 | _options.Router.RefreshTopicMetadata(topic);
195 | EnsurePartitionPollingThreads();
196 | }
197 | catch (Exception ex)
198 | {
199 | _options.Log.ErrorFormat("Exception occured while polling topic:{0} partition:{1}. Polling will continue. Exception={2}", topic, partitionId, ex);
200 | }
201 | }
202 | }
203 | finally
204 | {
205 | _options.Log.DebugFormat("Consumer: Disabling polling task for topic: {0} on parition: {1}", topic, partitionId);
206 | Task tempTask;
207 | _partitionPollingIndex.TryRemove(partitionId, out tempTask);
208 | }
209 | });
210 | }
211 |
212 | private void HandleResponseErrors(Fetch request, FetchResponse response)
213 | {
214 | switch ((ErrorResponseCode)response.Error)
215 | {
216 | case ErrorResponseCode.NoError:
217 | return;
218 | case ErrorResponseCode.OffsetOutOfRange:
219 | throw new OffsetOutOfRangeException("FetchResponse indicated we requested an offset that is out of range. Requested Offset:{0}", request.Offset) { FetchRequest = request };
220 | case ErrorResponseCode.BrokerNotAvailable:
221 | case ErrorResponseCode.ConsumerCoordinatorNotAvailableCode:
222 | case ErrorResponseCode.LeaderNotAvailable:
223 | case ErrorResponseCode.NotLeaderForPartition:
224 | throw new InvalidMetadataException("FetchResponse indicated we may have mismatched metadata. ErrorCode:{0}", response.Error) { ErrorCode = response.Error };
225 | default:
226 | throw new KafkaApplicationException("FetchResponse returned error condition. ErrorCode:{0}", response.Error) { ErrorCode = response.Error };
227 | }
228 | }
229 |
230 | private void FixOffsetOutOfRangeExceptionAsync(Fetch request)
231 | {
232 | _metadataQueries.GetTopicOffsetAsync(request.Topic)
233 | .ContinueWith(t =>
234 | {
235 | try
236 | {
237 | var offsets = t.Result.FirstOrDefault(x => x.PartitionId == request.PartitionId);
238 | if (offsets == null) return;
239 |
240 | if (offsets.Offsets.Min() > request.Offset)
241 | SetOffsetPosition(new OffsetPosition(request.PartitionId, offsets.Offsets.Min()));
242 |
243 | if (offsets.Offsets.Max() < request.Offset)
244 | SetOffsetPosition(new OffsetPosition(request.PartitionId, offsets.Offsets.Max()));
245 | }
246 | catch (Exception ex)
247 | {
248 | _options.Log.ErrorFormat("Failed to fix the offset out of range exception on topic:{0} partition:{1}. Polling will continue. Exception={2}",
249 | request.Topic, request.PartitionId, ex);
250 | }
251 | });
252 | }
253 |
254 | public Topic GetTopic(string topic)
255 | {
256 | return _metadataQueries.GetTopic(topic);
257 | }
258 |
259 | public Task> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1)
260 | {
261 | return _metadataQueries.GetTopicOffsetAsync(topic, maxOffsets, time);
262 | }
263 |
264 | public void Dispose()
265 | {
266 | if (Interlocked.Increment(ref _disposeCount) != 1) return;
267 |
268 | _options.Log.DebugFormat("Consumer: Disposing...");
269 | _disposeToken.Cancel();
270 |
271 | //wait for all threads to unwind
272 | foreach (var task in _partitionPollingIndex.Values.Where(task => task != null))
273 | {
274 | task.Wait(TimeSpan.FromSeconds(5));
275 | }
276 |
277 | using (_metadataQueries)
278 | using (_disposeToken)
279 | { }
280 | }
281 | }
282 | }
283 |
--------------------------------------------------------------------------------
/kafka-net-core/Default/ConsoleLog.cs:
--------------------------------------------------------------------------------
1 | using System;
2 |
3 | namespace KafkaNet
4 | {
5 | ///
6 | /// This class simply logs all information out to the console. Usefull for
7 | /// debug testing in console applications.
8 | ///
9 | public class ConsoleLog : IKafkaLog
10 | {
11 | public void DebugFormat(string format, params object[] args)
12 | {
13 | Console.WriteLine(format, args);
14 | }
15 |
16 | public void InfoFormat(string format, params object[] args)
17 | {
18 | Console.WriteLine(format, args);
19 | }
20 |
21 | public void WarnFormat(string format, params object[] args)
22 | {
23 | Console.WriteLine(format, args);
24 | }
25 |
26 | public void ErrorFormat(string format, params object[] args)
27 | {
28 | Console.WriteLine(format, args);
29 | }
30 |
31 | public void FatalFormat(string format, params object[] args)
32 | {
33 | Console.WriteLine(format, args);
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/kafka-net-core/Default/DefaultKafkaConnectionFactory.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Linq;
3 | using System.Net;
4 | using System.Net.Sockets;
5 | using KafkaNet.Model;
6 | using KafkaNet.Protocol;
7 |
8 | namespace KafkaNet
9 | {
10 | public class DefaultKafkaConnectionFactory : IKafkaConnectionFactory
11 | {
12 | public IKafkaConnection Create(KafkaEndpoint endpoint, TimeSpan responseTimeoutMs, IKafkaLog log, TimeSpan? maximumReconnectionTimeout = null)
13 | {
14 | return new KafkaConnection(new KafkaTcpSocket(log, endpoint, maximumReconnectionTimeout), responseTimeoutMs, log);
15 | }
16 |
17 | public KafkaEndpoint Resolve(Uri kafkaAddress, IKafkaLog log)
18 | {
19 | var ipAddress = GetFirstAddress(kafkaAddress.Host, log);
20 | var ipEndpoint = new IPEndPoint(ipAddress, kafkaAddress.Port);
21 |
22 | var kafkaEndpoint = new KafkaEndpoint()
23 | {
24 | ServeUri = kafkaAddress,
25 | Endpoint = ipEndpoint
26 | };
27 |
28 | return kafkaEndpoint;
29 | }
30 |
31 |
32 | private static IPAddress GetFirstAddress(string hostname, IKafkaLog log)
33 | {
34 | try
35 | {
36 | //lookup the IP address from the provided host name
37 | var addresses = Dns.GetHostAddresses(hostname);
38 |
39 | if (addresses.Length > 0)
40 | {
41 | Array.ForEach(addresses, address => log.DebugFormat("Found address {0} for {1}", address, hostname));
42 |
43 | var selectedAddress = addresses.FirstOrDefault(item => item.AddressFamily == AddressFamily.InterNetwork) ?? addresses.First();
44 |
45 | log.DebugFormat("Using address {0} for {1}", selectedAddress, hostname);
46 |
47 | return selectedAddress;
48 | }
49 | }
50 | catch
51 | {
52 | throw new UnresolvedHostnameException("Could not resolve the following hostname: {0}", hostname);
53 | }
54 |
55 | throw new UnresolvedHostnameException("Could not resolve the following hostname: {0}", hostname);
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/kafka-net-core/Default/DefaultPartitionSelector.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Linq;
4 | using KafkaNet.Common;
5 | using KafkaNet.Model;
6 | using KafkaNet.Protocol;
7 |
8 | namespace KafkaNet
9 | {
10 | public class DefaultPartitionSelector : IPartitionSelector
11 | {
12 | private readonly ConcurrentDictionary _roundRobinTracker = new ConcurrentDictionary();
13 |
14 | public Partition Select(Topic topic, byte[] key)
15 | {
16 | if (topic == null) throw new ArgumentNullException("topic");
17 | if (topic.Partitions.Count <= 0) throw new ApplicationException(string.Format("Topic ({0}) has no partitions.", topic.Name));
18 |
19 | //use round robin
20 | var partitions = topic.Partitions;
21 | if (key == null)
22 | {
23 | //use round robin
24 | var paritionIndex = _roundRobinTracker.AddOrUpdate(topic.Name, p => 0, (s, i) =>
25 | {
26 | return ((i + 1) % partitions.Count);
27 | });
28 |
29 | return partitions[paritionIndex];
30 | }
31 |
32 | //use key hash
33 | var partitionId = Crc32Provider.Compute(key) % partitions.Count;
34 | var partition = partitions.FirstOrDefault(x => x.PartitionId == partitionId);
35 |
36 | if (partition == null)
37 | throw new InvalidPartitionException(string.Format("Hash function return partition id: {0}, but the available partitions are:{1}",
38 | partitionId, string.Join(",", partitions.Select(x => x.PartitionId))));
39 |
40 | return partition;
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/kafka-net-core/Default/DefaultTraceLog.cs:
--------------------------------------------------------------------------------
1 | using System.Diagnostics;
2 |
3 | namespace KafkaNet
4 | {
5 | ///
6 | /// This class simply logs all information out to the Trace log provided by windows.
7 | /// The reason Trace is being used as the default it to remove extenal references from
8 | /// the base kafka-net package. A proper logging framework like log4net is recommended.
9 | ///
10 | public class DefaultTraceLog : IKafkaLog
11 | {
12 | public void DebugFormat(string format, params object[] args)
13 | {
14 | Trace.WriteLine(string.Format(format, args));
15 | }
16 |
17 | public void InfoFormat(string format, params object[] args)
18 | {
19 | Trace.WriteLine(string.Format(format, args));
20 | }
21 |
22 | public void WarnFormat(string format, params object[] args)
23 | {
24 | Trace.WriteLine(string.Format(format, args));
25 | }
26 |
27 | public void ErrorFormat(string format, params object[] args)
28 | {
29 | Trace.WriteLine(string.Format(format, args));
30 | }
31 |
32 | public void FatalFormat(string format, params object[] args)
33 | {
34 | Trace.WriteLine(string.Format(format, args));
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IBrokerRouter.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using KafkaNet.Protocol;
4 |
5 | namespace KafkaNet
6 | {
7 | public interface IBrokerRouter : IDisposable
8 | {
9 | ///
10 | /// Select a broker for a specific topic and partitionId.
11 | ///
12 | /// The topic name to select a broker for.
13 | /// The exact partition to select a broker for.
14 | /// A broker route for the given partition of the given topic.
15 | ///
16 | /// This function does not use any selector criteria. If the given partitionId does not exist an exception will be thrown.
17 | ///
18 | /// Thrown if the returned metadata for the given topic is invalid or missing.
19 | /// Thrown if the give partitionId does not exist for the given topic.
20 | /// Thrown if none of the Default Brokers can be contacted.
21 | BrokerRoute SelectBrokerRoute(string topic, int partitionId);
22 |
23 | ///
24 | /// Select a broker for a given topic using the IPartitionSelector function.
25 | ///
26 | /// The topic to retreive a broker route for.
27 | /// The key used by the IPartitionSelector to collate to a consistent partition. Null value means key will be ignored in selection process.
28 | /// A broker route for the given topic.
29 | /// Thrown if the returned metadata for the given topic is invalid or missing.
30 | /// Thrown if none of the Default Brokers can be contacted.
31 | BrokerRoute SelectBrokerRoute(string topic, byte[] key = null);
32 |
33 | ///
34 | /// Returns Topic metadata for each topic requested.
35 | ///
36 | /// Collection of topids to request metadata for.
37 | /// List of Topics as provided by Kafka.
38 | /// The topic metadata will by default check the cache first and then request metadata from the server if it does not exist in cache.
39 | List GetTopicMetadata(params string[] topics);
40 |
41 | ///
42 | /// Force a call to the kafka servers to refresh metadata for the given topics.
43 | ///
44 | /// List of topics to update metadata for.
45 | ///
46 | /// This method will initiate a call to the kafka servers and retrieve metadata for all given topics, updating the broke cache in the process.
47 | ///
48 | void RefreshTopicMetadata(params string[] topics);
49 | }
50 | }
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IKafkaConnection.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Threading.Tasks;
4 | using KafkaNet.Model;
5 |
6 | namespace KafkaNet
7 | {
8 | public interface IKafkaConnection : IDisposable
9 | {
10 | ///
11 | /// The unique endpoint location of this connection.
12 | ///
13 | KafkaEndpoint Endpoint { get; }
14 |
15 | ///
16 | /// Value indicating the read polling thread is still active.
17 | ///
18 | bool ReadPolling { get; }
19 |
20 | ///
21 | /// Send raw payload data up to the connected endpoint.
22 | ///
23 | /// The raw data to send to the connected endpoint.
24 | /// Task representing the future success or failure of query.
25 | Task SendAsync(KafkaDataPayload payload);
26 |
27 | ///
28 | /// Send a specific IKafkaRequest to the connected endpoint.
29 | ///
30 | /// The type of the KafkaResponse expected from the request being sent.
31 | /// The KafkaRequest to send to the connected endpoint.
32 | /// Task representing the future responses from the sent request.
33 | Task> SendAsync(IKafkaRequest request);
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IKafkaConnectionFactory.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Model;
2 | using System;
3 |
4 | namespace KafkaNet
5 | {
6 | public interface IKafkaConnectionFactory
7 | {
8 | ///
9 | /// Create a new KafkaConnection.
10 | ///
11 | /// The specific KafkaEndpoint of the server to connect to.
12 | /// The amount of time to wait for a message response to be received after sending a message to Kafka
13 | /// Logging interface used to record any log messages created by the connection.
14 | /// The maximum time to wait when backing off on reconnection attempts.
15 | /// IKafkaConnection initialized to connecto to the given endpoint.
16 | IKafkaConnection Create(KafkaEndpoint endpoint, TimeSpan responseTimeoutMs, IKafkaLog log, TimeSpan? maximumReconnectionTimeout = null);
17 |
18 | ///
19 | /// Resolves a generic Uri into a uniquely identifiable KafkaEndpoint.
20 | ///
21 | /// The address to the kafka server to resolve.
22 | /// Logging interface used to record any log messages created by the Resolving process.
23 | /// KafkaEndpoint with resolved IP and Address.
24 | KafkaEndpoint Resolve(Uri kafkaAddress, IKafkaLog log);
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IKafkaLog.cs:
--------------------------------------------------------------------------------
1 | namespace KafkaNet
2 | {
3 | public interface IKafkaLog
4 | {
5 | ///
6 | /// Record debug information using the String.Format syntax.
7 | ///
8 | /// Format string template. e.g. "Exception = {0}"
9 | /// Arguments which will fill the template string in order of apperance.
10 | void DebugFormat(string format, params object[] args);
11 | ///
12 | /// Record info information using the String.Format syntax.
13 | ///
14 | /// Format string template. e.g. "Exception = {0}"
15 | /// Arguments which will fill the template string in order of apperance.
16 | void InfoFormat(string format, params object[] args);
17 | ///
18 | /// Record warning information using the String.Format syntax.
19 | ///
20 | /// Format string template. e.g. "Exception = {0}"
21 | /// Arguments which will fill the template string in order of apperance.
22 | void WarnFormat(string format, params object[] args);
23 | ///
24 | /// Record error information using the String.Format syntax.
25 | ///
26 | /// Format string template. e.g. "Exception = {0}"
27 | /// Arguments which will fill the template string in order of apperance.
28 | void ErrorFormat(string format, params object[] args);
29 | ///
30 | /// Record fatal information using the String.Format syntax.
31 | ///
32 | /// Format string template. e.g. "Exception = {0}"
33 | /// Arguments which will fill the template string in order of apperance.
34 | void FatalFormat(string format, params object[] args);
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IKafkaRequest.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 | using KafkaNet.Protocol;
3 |
4 | namespace KafkaNet
5 | {
6 | ///
7 | /// KafkaRequest represents a Kafka request messages as an object which can Encode itself into the appropriate
8 | /// binary request and Decode any responses to that request.
9 | ///
10 | /// The type of the KafkaResponse expected back from the request.
11 | public interface IKafkaRequest
12 | {
13 | ///
14 | /// Indicates this request should wait for a response from the broker
15 | ///
16 | bool ExpectResponse { get; }
17 | ///
18 | /// Descriptive name used to identify the source of this request.
19 | ///
20 | string ClientId { get; set; }
21 | ///
22 | /// Id which will be echoed back by Kafka to correlate responses to this request. Usually automatically assigned by driver.
23 | ///
24 | int CorrelationId { get; set; }
25 | ///
26 | /// Enum identifying the specific type of request message being represented.
27 | ///
28 | ApiKeyRequestType ApiKey { get; }
29 | ///
30 | /// Encode this request into the Kafka wire protocol.
31 | ///
32 | /// Byte[] representing the binary wire protocol of this request.
33 | KafkaDataPayload Encode();
34 | ///
35 | /// Decode a response payload from Kafka into an enumerable of T responses.
36 | ///
37 | /// Buffer data returned by Kafka servers.
38 | ///
39 | IEnumerable Decode(byte[] payload);
40 | }
41 | }
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IKafkaTcpSocket.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Threading;
3 | using System.Threading.Tasks;
4 | using KafkaNet.Model;
5 |
6 | namespace KafkaNet
7 | {
8 | public interface IKafkaTcpSocket : IDisposable
9 | {
10 | ///
11 | /// The IP endpoint to the server.
12 | ///
13 | KafkaEndpoint Endpoint { get; }
14 |
15 | ///
16 | /// Read a certain byte array size return only when all bytes received.
17 | ///
18 | /// The size in bytes to receive from server.
19 | /// Returns a byte[] array with the size of readSize.
20 | Task ReadAsync(int readSize);
21 |
22 | ///
23 | /// Read a certain byte array size return only when all bytes received.
24 | ///
25 | /// The size in bytes to receive from server.
26 | /// A cancellation token which will cancel the request.
27 | /// Returns a byte[] array with the size of readSize.
28 | Task ReadAsync(int readSize, CancellationToken cancellationToken);
29 |
30 | ///
31 | /// Convenience function to write full buffer data to the server.
32 | ///
33 | /// The buffer data to send.
34 | /// Returns Task handle to the write operation with size of written bytes.
35 | Task WriteAsync(KafkaDataPayload payload);
36 |
37 | ///
38 | /// Write the buffer data to the server.
39 | ///
40 | /// The buffer data to send.
41 | /// A cancellation token which will cancel the request.
42 | /// Returns Task handle to the write operation ith size of written bytes..
43 | Task WriteAsync(KafkaDataPayload payload, CancellationToken cancellationToken);
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IMetadataQueries.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Threading.Tasks;
5 | namespace KafkaNet
6 | {
7 | ///
8 | /// Contains common metadata query commands that are used by both a consumer and producer.
9 | ///
10 | interface IMetadataQueries : IDisposable
11 | {
12 | ///
13 | /// Get metadata on the given topic.
14 | ///
15 | /// The metadata on the requested topic.
16 | /// Topic object containing the metadata on the requested topic.
17 | Topic GetTopic(string topic);
18 |
19 | ///
20 | /// Get offsets for each partition from a given topic.
21 | ///
22 | /// Name of the topic to get offset information from.
23 | ///
24 | ///
25 | ///
26 | Task> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1);
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/kafka-net-core/Interfaces/IPartitionSelector.cs:
--------------------------------------------------------------------------------
1 | using KafkaNet.Protocol;
2 |
3 | namespace KafkaNet
4 | {
5 | public interface IPartitionSelector
6 | {
7 | ///
8 | /// Select the appropriate partition post a message based on topic and key data.
9 | ///
10 | /// The topic at which the message will be sent.
11 | /// The data used to consistently route a message to a particular partition. Value can be null.
12 | /// The partition to send the message to.
13 | Partition Select(Topic topic, byte[] key);
14 | }
15 | }
--------------------------------------------------------------------------------
/kafka-net-core/KafkaConnection.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Threading;
6 | using System.Threading.Tasks;
7 | using KafkaNet.Common;
8 | using KafkaNet.Model;
9 | using KafkaNet.Protocol;
10 |
11 | namespace KafkaNet
12 | {
13 | ///
14 | /// KafkaConnection represents the lowest level TCP stream connection to a Kafka broker.
15 | /// The Send and Receive are separated into two disconnected paths and must be combine outside
16 | /// this class by the correlation ID contained within the returned message.
17 | ///
18 | /// The SendAsync function will return a Task and complete once the data has been sent to the outbound stream.
19 | /// The Read response is handled by a single thread polling the stream for data and firing an OnResponseReceived
20 | /// event when a response is received.
21 | ///
22 | public class KafkaConnection : IKafkaConnection
23 | {
24 | private const int DefaultResponseTimeoutMs = 60000;
25 |
26 | private readonly ConcurrentDictionary _requestIndex = new ConcurrentDictionary();
27 | private readonly TimeSpan _responseTimeoutMS;
28 | private readonly IKafkaLog _log;
29 | private readonly IKafkaTcpSocket _client;
30 | private readonly CancellationTokenSource _disposeToken = new CancellationTokenSource();
31 |
32 | private int _disposeCount = 0;
33 | private Task _connectionReadPollingTask = null;
34 | private int _ensureOneActiveReader;
35 | private int _correlationIdSeed;
36 |
37 | ///
38 | /// Initializes a new instance of the KafkaConnection class.
39 | ///
40 | /// Logging interface used to record any log messages created by the connection.
41 | /// The kafka socket initialized to the kafka server.
42 | /// The amount of time to wait for a message response to be received after sending message to Kafka. Defaults to 30s.
43 | public KafkaConnection(IKafkaTcpSocket client, TimeSpan? responseTimeoutMs = null, IKafkaLog log = null)
44 | {
45 | _client = client;
46 | _log = log ?? new DefaultTraceLog();
47 | _responseTimeoutMS = responseTimeoutMs ?? TimeSpan.FromMilliseconds(DefaultResponseTimeoutMs);
48 |
49 | StartReadStreamPoller();
50 | }
51 |
52 | ///
53 | /// Indicates a thread is polling the stream for data to read.
54 | ///
55 | public bool ReadPolling
56 | {
57 | get { return _ensureOneActiveReader >= 1; }
58 | }
59 |
60 | ///
61 | /// Provides the unique ip/port endpoint for this connection
62 | ///
63 | public KafkaEndpoint Endpoint { get { return _client.Endpoint; } }
64 |
65 | ///
66 | /// Send raw byte[] payload to the kafka server with a task indicating upload is complete.
67 | ///
68 | /// kafka protocol formatted byte[] payload
69 | /// Task which signals the completion of the upload of data to the server.
70 | public Task SendAsync(KafkaDataPayload payload)
71 | {
72 | return _client.WriteAsync(payload);
73 | }
74 |
75 | ///
76 | /// Send raw byte[] payload to the kafka server with a task indicating upload is complete.
77 | ///
78 | /// kafka protocol formatted byte[] payload
79 | /// Cancellation token used to cancel the transfer.
80 | /// Task which signals the completion of the upload of data to the server.
81 | public Task SendAsync(KafkaDataPayload payload, CancellationToken token)
82 | {
83 | return _client.WriteAsync(payload, token);
84 | }
85 |
86 |
87 | ///
88 | /// Send kafka payload to server and receive a task event when response is received.
89 | ///
90 | /// A Kafka response object return by decode function.
91 | /// The IKafkaRequest to send to the kafka servers.
92 | ///
93 | public async Task> SendAsync(IKafkaRequest request)
94 | {
95 | //assign unique correlationId
96 | request.CorrelationId = NextCorrelationId();
97 |
98 | //if response is expected, register a receive data task and send request
99 | if (request.ExpectResponse)
100 | {
101 | using (var asyncRequest = new AsyncRequestItem(request.CorrelationId))
102 | {
103 |
104 | try
105 | {
106 | AddAsyncRequestItemToResponseQueue(asyncRequest);
107 | await _client.WriteAsync(request.Encode())
108 | .ContinueWith(t => asyncRequest.MarkRequestAsSent(t.Exception, _responseTimeoutMS, TriggerMessageTimeout))
109 | .ConfigureAwait(false);
110 | }
111 | catch (OperationCanceledException)
112 | {
113 | TriggerMessageTimeout(asyncRequest);
114 | }
115 |
116 | var response = await asyncRequest.ReceiveTask.Task.ConfigureAwait(false);
117 |
118 | return request.Decode(response).ToList();
119 | }
120 | }
121 |
122 |
123 | //no response needed, just send
124 | await _client.WriteAsync(request.Encode()).ConfigureAwait(false);
125 | //TODO should this return a response of success for request?
126 | return new List();
127 | }
128 |
129 | #region Equals Override...
130 | public override bool Equals(object obj)
131 | {
132 | if (ReferenceEquals(null, obj)) return false;
133 | if (ReferenceEquals(this, obj)) return true;
134 | if (obj.GetType() != this.GetType()) return false;
135 | return Equals((KafkaConnection)obj);
136 | }
137 |
138 | protected bool Equals(KafkaConnection other)
139 | {
140 | return Equals(_client.Endpoint, other.Endpoint);
141 | }
142 |
143 | public override int GetHashCode()
144 | {
145 | return (_client.Endpoint != null ? _client.Endpoint.GetHashCode() : 0);
146 | }
147 | #endregion
148 |
149 | private void StartReadStreamPoller()
150 | {
151 | //This thread will poll the receive stream for data, parce a message out
152 | //and trigger an event with the message payload
153 | _connectionReadPollingTask = Task.Run(async () =>
154 | {
155 | try
156 | {
157 | //only allow one reader to execute, dump out all other requests
158 | if (Interlocked.Increment(ref _ensureOneActiveReader) != 1) return;
159 |
160 | while (_disposeToken.IsCancellationRequested == false)
161 | {
162 | try
163 | {
164 | _log.DebugFormat("Awaiting message from: {0}", _client.Endpoint);
165 | var messageSizeResult = await _client.ReadAsync(4, _disposeToken.Token).ConfigureAwait(false);
166 | var messageSize = messageSizeResult.ToInt32();
167 |
168 | _log.DebugFormat("Received message of size: {0} From: {1}", messageSize, _client.Endpoint);
169 | var message = await _client.ReadAsync(messageSize, _disposeToken.Token).ConfigureAwait(false);
170 |
171 | CorrelatePayloadToRequest(message);
172 | }
173 | catch (Exception ex)
174 | {
175 | //don't record the exception if we are disposing
176 | if (_disposeToken.IsCancellationRequested == false)
177 | {
178 | //TODO being in sync with the byte order on read is important. What happens if this exception causes us to be out of sync?
179 | //record exception and continue to scan for data.
180 | _log.ErrorFormat("Exception occured in polling read thread. Exception={0}", ex);
181 | }
182 | }
183 | }
184 | }
185 | finally
186 | {
187 | Interlocked.Decrement(ref _ensureOneActiveReader);
188 | _log.DebugFormat("Closed down connection to: {0}", _client.Endpoint);
189 | }
190 | });
191 | }
192 |
193 | private void CorrelatePayloadToRequest(byte[] payload)
194 | {
195 | var correlationId = payload.Take(4).ToArray().ToInt32();
196 | AsyncRequestItem asyncRequest;
197 | if (_requestIndex.TryRemove(correlationId, out asyncRequest))
198 | {
199 | asyncRequest.ReceiveTask.SetResult(payload);
200 | }
201 | else
202 | {
203 | _log.WarnFormat("Message response received with correlationId={0}, but did not exist in the request queue.", correlationId);
204 | }
205 | }
206 |
207 | private int NextCorrelationId()
208 | {
209 | var id = Interlocked.Increment(ref _correlationIdSeed);
210 | if (id > int.MaxValue - 100) //somewhere close to max reset.
211 | {
212 | Interlocked.Exchange(ref _correlationIdSeed, 0);
213 | }
214 | return id;
215 | }
216 |
217 | private void AddAsyncRequestItemToResponseQueue(AsyncRequestItem requestItem)
218 | {
219 | if (requestItem == null) return;
220 | if (_requestIndex.TryAdd(requestItem.CorrelationId, requestItem) == false)
221 | throw new ApplicationException("Failed to register request for async response.");
222 | }
223 |
224 | private void TriggerMessageTimeout(AsyncRequestItem asyncRequestItem)
225 | {
226 | if (asyncRequestItem == null) return;
227 |
228 | AsyncRequestItem request;
229 | _requestIndex.TryRemove(asyncRequestItem.CorrelationId, out request); //just remove it from the index
230 |
231 | if (_disposeToken.IsCancellationRequested)
232 | {
233 | asyncRequestItem.ReceiveTask.TrySetException(
234 | new ObjectDisposedException("The object is being disposed and the connection is closing."));
235 | }
236 | else
237 | {
238 | asyncRequestItem.ReceiveTask.TrySetException(new ResponseTimeoutException(
239 | string.Format("Timeout Expired. Client failed to receive a response from server after waiting {0}ms.",
240 | _responseTimeoutMS)));
241 | }
242 | }
243 |
244 | public void Dispose()
245 | {
246 | //skip multiple calls to dispose
247 | if (Interlocked.Increment(ref _disposeCount) != 1) return;
248 |
249 | _disposeToken.Cancel();
250 |
251 | if (_connectionReadPollingTask != null) _connectionReadPollingTask.Wait(TimeSpan.FromSeconds(1));
252 |
253 | using (_disposeToken)
254 | using (_client)
255 | {
256 |
257 | }
258 | }
259 |
260 | #region Class AsyncRequestItem...
261 | class AsyncRequestItem : IDisposable
262 | {
263 | private readonly CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource();
264 |
265 | public AsyncRequestItem(int correlationId)
266 | {
267 | CorrelationId = correlationId;
268 | ReceiveTask = new TaskCompletionSource();
269 | }
270 |
271 | public int CorrelationId { get; private set; }
272 | public TaskCompletionSource ReceiveTask { get; private set; }
273 |
274 | public void MarkRequestAsSent(Exception ex, TimeSpan timeout, Action timeoutFunction)
275 | {
276 | if (ex != null)
277 | {
278 | ReceiveTask.TrySetException(ex);
279 | throw ex;
280 | }
281 |
282 | _cancellationTokenSource.CancelAfter(timeout);
283 | _cancellationTokenSource.Token.Register(() => timeoutFunction(this));
284 | }
285 |
286 |
287 | public void Dispose()
288 | {
289 | using (_cancellationTokenSource)
290 | {
291 |
292 | }
293 | }
294 | }
295 | #endregion
296 | }
297 |
298 |
299 | }
300 |
--------------------------------------------------------------------------------
/kafka-net-core/KafkaMetadataProvider.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Threading;
5 | using KafkaNet.Protocol;
6 |
7 | namespace KafkaNet
8 | {
9 | ///
10 | /// This provider blocks while it attempts to get the MetaData configuration of the Kafka servers. If any retry errors occurs it will
11 | /// continue to block the downstream call and then repeatedly query kafka until the retry errors subside. This repeat call happens in
12 | /// a backoff manner, which each subsequent call waiting longer before a requery.
13 | ///
14 | /// Error Codes:
15 | /// LeaderNotAvailable = 5
16 | /// NotLeaderForPartition = 6
17 | /// ConsumerCoordinatorNotAvailableCode = 15
18 | /// BrokerId = -1
19 | ///
20 | /// Documentation:
21 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-MetadataResponse
22 | ///
23 | public class KafkaMetadataProvider : IDisposable
24 | {
25 | private const int BackoffMilliseconds = 100;
26 |
27 | private readonly IKafkaLog _log;
28 |
29 | private bool _interrupted;
30 |
31 | public KafkaMetadataProvider(IKafkaLog log)
32 | {
33 | _log = log;
34 | }
35 |
36 | ///
37 | /// Given a collection of server connections, query for the topic metadata.
38 | ///
39 | /// The server connections to query. Will cycle through the collection, starting at zero until a response is received.
40 | /// The collection of topics to get metadata for.
41 | /// MetadataResponse validated to be complete.
42 | public MetadataResponse Get(IKafkaConnection[] connections, IEnumerable topics)
43 | {
44 | var request = new MetadataRequest { Topics = topics.ToList() };
45 | if (request.Topics.Count <= 0) return null;
46 |
47 | var performRetry = false;
48 | var retryAttempt = 0;
49 | MetadataResponse metadataResponse = null;
50 |
51 | do
52 | {
53 | performRetry = false;
54 | metadataResponse = GetMetadataResponse(connections, request);
55 | if (metadataResponse == null) return null;
56 |
57 | foreach (var validation in ValidateResponse(metadataResponse))
58 | {
59 | switch (validation.Status)
60 | {
61 | case ValidationResult.Retry:
62 | performRetry = true;
63 | _log.WarnFormat(validation.Message);
64 | break;
65 | case ValidationResult.Error:
66 | throw validation.Exception;
67 | }
68 | }
69 |
70 | BackoffOnRetry(++retryAttempt, performRetry);
71 |
72 | } while (_interrupted == false && performRetry);
73 |
74 | return metadataResponse;
75 | }
76 |
77 | private void BackoffOnRetry(int retryAttempt, bool performRetry)
78 | {
79 | if (performRetry && retryAttempt > 0)
80 | {
81 | var backoff = retryAttempt*retryAttempt*BackoffMilliseconds;
82 | _log.WarnFormat("Backing off metadata request retry. Waiting for {0}ms.", backoff);
83 | Thread.Sleep(TimeSpan.FromMilliseconds(backoff));
84 | }
85 | }
86 |
87 | private MetadataResponse GetMetadataResponse(IKafkaConnection[] connections, MetadataRequest request)
88 | {
89 | //try each default broker until we find one that is available
90 | foreach (var conn in connections)
91 | {
92 | try
93 | {
94 | //TODO remove blocking result here!
95 | var response = conn.SendAsync(request).Result;
96 | if (response != null && response.Count > 0)
97 | {
98 | return response.FirstOrDefault();
99 | }
100 | }
101 | catch (Exception ex)
102 | {
103 | _log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.Endpoint, ex);
104 | }
105 | }
106 |
107 | throw new ServerUnreachableException(
108 | "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}",
109 | string.Join(", ", connections.Select(x => x.ToString())));
110 | }
111 |
112 | private IEnumerable ValidateResponse(MetadataResponse metadata)
113 | {
114 | foreach (var broker in metadata.Brokers)
115 | {
116 | yield return ValidateBroker(broker);
117 | }
118 |
119 | foreach (var topic in metadata.Topics)
120 | {
121 | yield return ValidateTopic(topic);
122 | }
123 | }
124 |
125 | private MetadataValidationResult ValidateBroker(Broker broker)
126 | {
127 | if (broker.BrokerId == -1)
128 | {
129 | return new MetadataValidationResult { Status = ValidationResult.Retry, ErrorCode = ErrorResponseCode.Unknown };
130 | }
131 |
132 | if (string.IsNullOrEmpty(broker.Host))
133 | {
134 | return new MetadataValidationResult
135 | {
136 | Status = ValidationResult.Error,
137 | Exception = new InvalidTopicMetadataException(ErrorResponseCode.NoError, "Broker missing host information.")
138 | };
139 | }
140 |
141 | if (broker.Port <= 0)
142 | {
143 | return new MetadataValidationResult
144 | {
145 | Status = ValidationResult.Error,
146 | Exception = new InvalidTopicMetadataException(ErrorResponseCode.NoError, "Broker missing port information.")
147 | };
148 | }
149 |
150 | return new MetadataValidationResult();
151 | }
152 |
153 | private MetadataValidationResult ValidateTopic(Topic topic)
154 | {
155 | try
156 | {
157 | var errorCode = (ErrorResponseCode)topic.ErrorCode;
158 |
159 | if (errorCode == ErrorResponseCode.NoError) return new MetadataValidationResult();
160 |
161 | switch (errorCode)
162 | {
163 | case ErrorResponseCode.LeaderNotAvailable:
164 | case ErrorResponseCode.OffsetsLoadInProgressCode:
165 | case ErrorResponseCode.ConsumerCoordinatorNotAvailableCode:
166 | return new MetadataValidationResult
167 | {
168 | Status = ValidationResult.Retry,
169 | ErrorCode = errorCode,
170 | Message = string.Format("Topic:{0} returned error code of {1}. Retrying.", topic.Name, errorCode)
171 | };
172 | }
173 |
174 | return new MetadataValidationResult
175 | {
176 | Status = ValidationResult.Error,
177 | ErrorCode = errorCode,
178 | Exception = new InvalidTopicMetadataException(errorCode, "Topic:{0} returned an error of {1}.", topic.Name, errorCode)
179 | };
180 | }
181 | catch
182 | {
183 | return new MetadataValidationResult
184 | {
185 | Status = ValidationResult.Error,
186 | ErrorCode = ErrorResponseCode.Unknown,
187 | Exception = new InvalidTopicMetadataException(ErrorResponseCode.Unknown, "Unknown error code returned in metadata response. ErrorCode: {0}", topic.ErrorCode)
188 | };
189 | }
190 | }
191 |
192 | public void Dispose()
193 | {
194 | _interrupted = true;
195 | }
196 | }
197 |
198 | public enum ValidationResult { Valid, Error, Retry }
199 | public class MetadataValidationResult
200 | {
201 | public ValidationResult Status { get; set; }
202 | public string Message { get; set; }
203 | public ErrorResponseCode ErrorCode { get; set; }
204 | public Exception Exception { get; set; }
205 |
206 | public MetadataValidationResult()
207 | {
208 | ErrorCode = ErrorResponseCode.NoError;
209 | Status = ValidationResult.Valid;
210 | Message = "";
211 | }
212 | }
213 | }
214 |
--------------------------------------------------------------------------------
/kafka-net-core/MetadataQueries.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Threading.Tasks;
5 | using KafkaNet.Protocol;
6 |
7 | namespace KafkaNet
8 | {
9 | ///
10 | /// This class provides a set of common queries that are useful for both the Consumer and Producer classes.
11 | ///
12 | public class MetadataQueries : IMetadataQueries
13 | {
14 | private readonly IBrokerRouter _brokerRouter;
15 |
16 | public MetadataQueries(IBrokerRouter brokerRouter)
17 | {
18 | _brokerRouter = brokerRouter;
19 | }
20 |
21 | ///
22 | /// Get offsets for each partition from a given topic.
23 | ///
24 | /// Name of the topic to get offset information from.
25 | ///
26 | ///
27 | ///
28 | public async Task> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1)
29 | {
30 | var topicMetadata = GetTopic(topic);
31 |
32 | //send the offset request to each partition leader
33 | var sendRequests = topicMetadata.Partitions
34 | .GroupBy(x => x.PartitionId)
35 | .Select(p =>
36 | {
37 | var route = _brokerRouter.SelectBrokerRoute(topic, p.Key);
38 | var request = new OffsetRequest
39 | {
40 | Offsets = new List
41 | {
42 | new Offset
43 | {
44 | Topic = topic,
45 | PartitionId = p.Key,
46 | MaxOffsets = maxOffsets,
47 | Time = time
48 | }
49 | }
50 | };
51 |
52 | return route.Connection.SendAsync(request);
53 | }).ToArray();
54 |
55 | await Task.WhenAll(sendRequests).ConfigureAwait(false);
56 | return sendRequests.SelectMany(x => x.Result).ToList();
57 | }
58 |
59 | ///
60 | /// Get metadata on the given topic.
61 | ///
62 | /// The metadata on the requested topic.
63 | /// Topic object containing the metadata on the requested topic.
64 | public Topic GetTopic(string topic)
65 | {
66 | var response = _brokerRouter.GetTopicMetadata(topic);
67 |
68 | if (response.Count <= 0) throw new InvalidTopicMetadataException(ErrorResponseCode.NoError, "No metadata could be found for topic: {0}", topic);
69 |
70 | return response.First();
71 | }
72 |
73 | public void Dispose()
74 | {
75 | using (_brokerRouter) { }
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/kafka-net-core/Model/BrokerRoute.cs:
--------------------------------------------------------------------------------
1 | namespace KafkaNet
2 | {
3 | public class BrokerRoute
4 | {
5 | public string Topic { get; set; }
6 | public int PartitionId { get; set; }
7 | public IKafkaConnection Connection { get; set; }
8 | public override string ToString()
9 | {
10 | return string.Format("{0} Topic:{1} PartitionId:{2}", Connection.Endpoint.ServeUri, Topic, PartitionId);
11 | }
12 |
13 | #region Equals Override...
14 | protected bool Equals(BrokerRoute other)
15 | {
16 | return string.Equals(Topic, other.Topic) && PartitionId == other.PartitionId;
17 | }
18 |
19 | public override int GetHashCode()
20 | {
21 | unchecked
22 | {
23 | return ((Topic != null ? Topic.GetHashCode() : 0) * 397) ^ PartitionId;
24 | }
25 | }
26 |
27 | public override bool Equals(object obj)
28 | {
29 | if (ReferenceEquals(null, obj)) return false;
30 | if (ReferenceEquals(this, obj)) return true;
31 | if (obj.GetType() != this.GetType()) return false;
32 | return Equals((BrokerRoute)obj);
33 | }
34 | #endregion
35 | }
36 | }
--------------------------------------------------------------------------------
/kafka-net-core/Model/ConsumerOptions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using KafkaNet.Protocol;
4 |
5 | namespace KafkaNet.Model
6 | {
7 | public class ConsumerOptions
8 | {
9 | private const int DefaultMaxConsumerBufferSize = 50;
10 | private const int DefaultBackoffIntervalMS = 1000;
11 | private const double DefaulFetchBufferMultiplier = 1.5;
12 |
13 | ///
14 | /// The topic to consume messages from.
15 | ///
16 | public string Topic { get; set; }
17 | ///
18 | /// Whitelist of partitions to consume from. Empty list indicates all partitions.
19 | ///
20 | public List PartitionWhitelist { get; set; }
21 | ///
22 | /// Log object to record operational messages.
23 | ///
24 | public IKafkaLog Log { get; set; }
25 | ///
26 | /// The broker router used to provide connection to each partition server.
27 | ///
28 | public IBrokerRouter Router { get; set; }
29 | ///
30 | /// The time in milliseconds between queries to look for any new partitions being created.
31 | ///
32 | public int TopicPartitionQueryTimeMs { get; set; }
33 | ///
34 | /// The size of the internal buffer queue which stores messages from Kafka.
35 | ///
36 | public int ConsumerBufferSize { get; set; }
37 | ///
38 | /// The interval for the consumer to sleep before try fetch next message if previous fetch received no message.
39 | ///
40 | public TimeSpan BackoffInterval { get; set; }
41 | ///
42 | /// The max wait time is the maximum amount of time in milliseconds to block waiting if insufficient data is available at the time the request is issued.
43 | ///
44 | public TimeSpan MaxWaitTimeForMinimumBytes { get; set; }
45 | ///
46 | /// This is the minimum number of bytes of messages that must be available to give a response. If the client sets this to 0 the server will always respond immediately,
47 | /// however if there is no new data since their last request they will just get back empty message sets. If this is set to 1, the server will respond as soon as at least
48 | /// one partition has at least 1 byte of data or the specified timeout occurs. By setting higher values in combination with the timeout the consumer can tune for throughput
49 | /// and trade a little additional latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k would allow the server to wait
50 | /// up to 100ms to try to accumulate 64k of data before responding).
51 | ///
52 | public int MinimumBytes { get; set; }
53 |
54 | ///
55 | /// In the event of a buffer under run, this multiplier will allow padding the new buffer size.
56 | ///
57 | public double FetchBufferMultiplier { get; set; }
58 |
59 | public ConsumerOptions(string topic, IBrokerRouter router)
60 | {
61 | Topic = topic;
62 | Router = router;
63 | PartitionWhitelist = new List();
64 | Log = new DefaultTraceLog();
65 | TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds;
66 | ConsumerBufferSize = DefaultMaxConsumerBufferSize;
67 | BackoffInterval = TimeSpan.FromMilliseconds(DefaultBackoffIntervalMS);
68 | FetchBufferMultiplier = DefaulFetchBufferMultiplier;
69 | MaxWaitTimeForMinimumBytes = TimeSpan.FromMilliseconds(FetchRequest.DefaultMaxBlockingWaitTime);
70 | MinimumBytes = FetchRequest.DefaultMinBlockingByteBufferSize;
71 | }
72 | }
73 | }
--------------------------------------------------------------------------------
/kafka-net-core/Model/KafkaEndpoint.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Net;
3 |
4 | namespace KafkaNet.Model
5 | {
6 | public class KafkaEndpoint
7 | {
8 | public Uri ServeUri { get; set; }
9 | public IPEndPoint Endpoint { get; set; }
10 |
11 | protected bool Equals(KafkaEndpoint other)
12 | {
13 | return Equals(Endpoint, other.Endpoint);
14 | }
15 |
16 | public override int GetHashCode()
17 | {
18 | //calculated like this to ensure ports on same address sort in the desc order
19 | return (Endpoint != null ? Endpoint.Address.GetHashCode() + Endpoint.Port : 0);
20 | }
21 |
22 | public override bool Equals(object obj)
23 | {
24 | if (ReferenceEquals(null, obj)) return false;
25 | if (ReferenceEquals(this, obj)) return true;
26 | if (obj.GetType() != this.GetType()) return false;
27 | return Equals((KafkaEndpoint) obj);
28 | }
29 |
30 | public override string ToString()
31 | {
32 | return ServeUri.ToString();
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-net-core/Model/KafkaOptions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using KafkaNet.Protocol;
5 |
6 | namespace KafkaNet.Model
7 | {
8 | public class KafkaOptions
9 | {
10 | private const int DefaultResponseTimeout = 60000;
11 |
12 | ///
13 | /// List of Uri connections to kafka servers. The are used to query for metadata from Kafka. More than one is recommended.
14 | ///
15 | public List KafkaServerUri { get; set; }
16 | ///
17 | /// Safely attempts to resolve endpoints from the KafkaServerUri, ignoreing all resolvable ones.
18 | ///
19 | public IEnumerable KafkaServerEndpoints
20 | {
21 | get
22 | {
23 | foreach (var uri in KafkaServerUri)
24 | {
25 | KafkaEndpoint endpoint = null;
26 | try
27 | {
28 | endpoint = KafkaConnectionFactory.Resolve(uri, Log);
29 | }
30 | catch (UnresolvedHostnameException ex)
31 | {
32 | Log.WarnFormat("Ignoring the following uri as it could not be resolved. Uri:{0} Exception:{1}", uri, ex);
33 | }
34 |
35 | if (endpoint != null) yield return endpoint;
36 | }
37 | }
38 | }
39 | ///
40 | /// Provides a factory for creating new kafka connections.
41 | ///
42 | public IKafkaConnectionFactory KafkaConnectionFactory { get; set; }
43 | ///
44 | /// Selector function for routing messages to partitions. Default is key/hash and round robin.
45 | ///
46 | public IPartitionSelector PartitionSelector { get; set; }
47 | ///
48 | /// Timeout length in milliseconds waiting for a response from kafka.
49 | ///
50 | public TimeSpan ResponseTimeoutMs { get; set; }
51 | ///
52 | /// Log object to record operational messages.
53 | ///
54 | public IKafkaLog Log { get; set; }
55 | ///
56 | /// The maximum time to wait when backing off on reconnection attempts.
57 | ///
58 | public TimeSpan? MaximumReconnectionTimeout { get; set; }
59 |
60 | public KafkaOptions(params Uri[] kafkaServerUri)
61 | {
62 | KafkaServerUri = kafkaServerUri.ToList();
63 | PartitionSelector = new DefaultPartitionSelector();
64 | Log = new DefaultTraceLog();
65 | KafkaConnectionFactory = new DefaultKafkaConnectionFactory();
66 | ResponseTimeoutMs = TimeSpan.FromMilliseconds(DefaultResponseTimeout);
67 | }
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/BaseRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using KafkaNet.Common;
3 |
4 | namespace KafkaNet.Protocol
5 | {
6 | public abstract class BaseRequest
7 | {
8 | ///
9 | /// From Documentation:
10 | /// The replica id indicates the node id of the replica initiating this request. Normal client consumers should always specify this as -1 as they have no node id.
11 | /// Other brokers set this to be their own node id. The value -2 is accepted to allow a non-broker to issue fetch requests as if it were a replica broker for debugging purposes.
12 | ///
13 | /// Kafka Protocol implementation:
14 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
15 | ///
16 | protected const int ReplicaId = -1;
17 | protected const Int16 ApiVersion = 0;
18 | private string _clientId = "Kafka-Net";
19 | private int _correlationId = 1;
20 |
21 | ///
22 | /// Descriptive name of the source of the messages sent to kafka
23 | ///
24 | public string ClientId { get { return _clientId; } set { _clientId = value; } }
25 |
26 | ///
27 | /// Value supplied will be passed back in the response by the server unmodified.
28 | /// It is useful for matching request and response between the client and server.
29 | ///
30 | public int CorrelationId { get { return _correlationId; } set { _correlationId = value; } }
31 |
32 | ///
33 | /// Flag which tells the broker call to expect a response for this request.
34 | ///
35 | public virtual bool ExpectResponse { get { return true; } }
36 |
37 | ///
38 | /// Encode the common head for kafka request.
39 | ///
40 | /// KafkaMessagePacker with header populated
41 | /// Format: (hhihs)
42 | public static KafkaMessagePacker EncodeHeader(IKafkaRequest request)
43 | {
44 | return new KafkaMessagePacker()
45 | .Pack(((Int16)request.ApiKey))
46 | .Pack(ApiVersion)
47 | .Pack(request.CorrelationId)
48 | .Pack(request.ClientId, StringPrefixEncoding.Int16);
49 | }
50 | }
51 | }
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/Broker.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using KafkaNet.Common;
3 |
4 | namespace KafkaNet.Protocol
5 | {
6 | public class Broker
7 | {
8 | public int BrokerId { get; set; }
9 | public string Host { get; set; }
10 | public int Port { get; set; }
11 | public Uri Address { get { return new Uri(string.Format("http://{0}:{1}", Host, Port));} }
12 |
13 | public static Broker FromStream(BigEndianBinaryReader stream)
14 | {
15 | return new Broker
16 | {
17 | BrokerId = stream.ReadInt32(),
18 | Host = stream.ReadInt16String(),
19 | Port = stream.ReadInt32()
20 | };
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/ConsumerMetadataRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using KafkaNet.Common;
4 |
5 | namespace KafkaNet.Protocol
6 | {
7 | ///
8 | /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest
9 | /// The offsets for a given consumer group is maintained by a specific broker called the offset coordinator. i.e., a consumer needs
10 | /// to issue its offset commit and fetch requests to this specific broker. It can discover the current offset coordinator by issuing a consumer metadata request.
11 | ///
12 | public class ConsumerMetadataRequest : BaseRequest, IKafkaRequest
13 | {
14 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.ConsumerMetadataRequest; } }
15 | public string ConsumerGroup { get; set; }
16 |
17 | public KafkaDataPayload Encode()
18 | {
19 | return EncodeConsumerMetadataRequest(this);
20 | }
21 |
22 |
23 | public IEnumerable Decode(byte[] payload)
24 | {
25 | return DecodeConsumerMetadataResponse(payload);
26 | }
27 |
28 | private KafkaDataPayload EncodeConsumerMetadataRequest(ConsumerMetadataRequest request)
29 | {
30 | using (var message = EncodeHeader(request).Pack(request.ConsumerGroup, StringPrefixEncoding.Int16))
31 | {
32 | return new KafkaDataPayload
33 | {
34 | Buffer = message.Payload(),
35 | CorrelationId = request.CorrelationId,
36 | ApiKey = ApiKey
37 | };
38 | }
39 | }
40 |
41 | private IEnumerable DecodeConsumerMetadataResponse(byte[] data)
42 | {
43 | using (var stream = new BigEndianBinaryReader(data))
44 | {
45 | var correlationId = stream.ReadInt32();
46 |
47 | var response = new ConsumerMetadataResponse
48 | {
49 | Error = stream.ReadInt16(),
50 | CoordinatorId = stream.ReadInt32(),
51 | CoordinatorHost = stream.ReadInt16String(),
52 | CoordinatorPort = stream.ReadInt32()
53 | };
54 |
55 | yield return response;
56 | }
57 | }
58 | }
59 |
60 | public class ConsumerMetadataResponse
61 | {
62 | ///
63 | /// Error code of exception that occured during the request. Zero if no error.
64 | ///
65 | public Int16 Error;
66 |
67 | public int CoordinatorId;
68 | public string CoordinatorHost;
69 | public int CoordinatorPort;
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/FetchRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using KafkaNet.Common;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | public class FetchRequest : BaseRequest, IKafkaRequest
9 | {
10 | internal const int DefaultMinBlockingByteBufferSize = 4096;
11 | internal const int DefaultBufferSize = DefaultMinBlockingByteBufferSize * 8;
12 | internal const int DefaultMaxBlockingWaitTime = 5000;
13 |
14 | ///
15 | /// Indicates the type of kafka encoding this request is
16 | ///
17 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.Fetch; } }
18 | ///
19 | /// The max wait time is the maximum amount of time in milliseconds to block waiting if insufficient data is available at the time the request is issued.
20 | ///
21 | public int MaxWaitTime = DefaultMaxBlockingWaitTime;
22 | ///
23 | /// This is the minimum number of bytes of messages that must be available to give a response.
24 | /// If the client sets this to 0 the server will always respond immediately, however if there is no new data since their last request they will just get back empty message sets.
25 | /// If this is set to 1, the server will respond as soon as at least one partition has at least 1 byte of data or the specified timeout occurs.
26 | /// By setting higher values in combination with the timeout the consumer can tune for throughput and trade a little additional latency for reading only large chunks of data
27 | /// (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k would allow the server to wait up to 100ms to try to accumulate 64k of data before responding).
28 | ///
29 | public int MinBytes = DefaultMinBlockingByteBufferSize;
30 |
31 | public List Fetches { get; set; }
32 |
33 | public KafkaDataPayload Encode()
34 | {
35 | return EncodeFetchRequest(this);
36 | }
37 |
38 | public IEnumerable Decode(byte[] payload)
39 | {
40 | return DecodeFetchResponse(payload);
41 | }
42 |
43 | private KafkaDataPayload EncodeFetchRequest(FetchRequest request)
44 | {
45 | if (request.Fetches == null) request.Fetches = new List();
46 |
47 | using (var message = EncodeHeader(request))
48 | {
49 | var topicGroups = request.Fetches.GroupBy(x => x.Topic).ToList();
50 | message.Pack(ReplicaId)
51 | .Pack(request.MaxWaitTime)
52 | .Pack(request.MinBytes)
53 | .Pack(topicGroups.Count);
54 |
55 | foreach (var topicGroup in topicGroups)
56 | {
57 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
58 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
59 | .Pack(partitions.Count);
60 |
61 | foreach (var partition in partitions)
62 | {
63 | foreach (var fetch in partition)
64 | {
65 | message.Pack(partition.Key)
66 | .Pack(fetch.Offset)
67 | .Pack(fetch.MaxBytes);
68 | }
69 | }
70 | }
71 |
72 | return new KafkaDataPayload
73 | {
74 | Buffer = message.Payload(),
75 | CorrelationId = request.CorrelationId,
76 | ApiKey = ApiKey
77 | };
78 | }
79 | }
80 |
81 | private IEnumerable DecodeFetchResponse(byte[] data)
82 | {
83 | using (var stream = new BigEndianBinaryReader(data))
84 | {
85 | var correlationId = stream.ReadInt32();
86 |
87 | var topicCount = stream.ReadInt32();
88 | for (int i = 0; i < topicCount; i++)
89 | {
90 | var topic = stream.ReadInt16String();
91 |
92 | var partitionCount = stream.ReadInt32();
93 | for (int j = 0; j < partitionCount; j++)
94 | {
95 | var partitionId = stream.ReadInt32();
96 | var response = new FetchResponse
97 | {
98 | Topic = topic,
99 | PartitionId = partitionId,
100 | Error = stream.ReadInt16(),
101 | HighWaterMark = stream.ReadInt64()
102 | };
103 | //note: dont use initializer here as it breaks stream position.
104 | response.Messages = Message.DecodeMessageSet(stream.ReadIntPrefixedBytes())
105 | .Select(x => { x.Meta.PartitionId = partitionId; return x; })
106 | .ToList();
107 | yield return response;
108 | }
109 | }
110 | }
111 | }
112 | }
113 |
114 | public class Fetch
115 | {
116 | public Fetch()
117 | {
118 | MaxBytes = FetchRequest.DefaultMinBlockingByteBufferSize * 8;
119 | }
120 |
121 | ///
122 | /// The name of the topic.
123 | ///
124 | public string Topic { get; set; }
125 | ///
126 | /// The id of the partition the fetch is for.
127 | ///
128 | public int PartitionId { get; set; }
129 | ///
130 | /// The offset to begin this fetch from.
131 | ///
132 | public long Offset { get; set; }
133 | ///
134 | /// The maximum bytes to include in the message set for this partition. This helps bound the size of the response.
135 | ///
136 | public int MaxBytes { get; set; }
137 | }
138 |
139 | public class FetchResponse
140 | {
141 | ///
142 | /// The name of the topic this response entry is for.
143 | ///
144 | public string Topic { get; set; }
145 | ///
146 | /// The id of the partition this response is for.
147 | ///
148 | public int PartitionId { get; set; }
149 | ///
150 | /// Error code of exception that occured during the request. Zero if no error.
151 | ///
152 | public Int16 Error { get; set; }
153 | ///
154 | /// The offset at the end of the log for this partition. This can be used by the client to determine how many messages behind the end of the log they are.
155 | ///
156 | public long HighWaterMark { get; set; }
157 |
158 | public List Messages { get; set; }
159 |
160 | public FetchResponse()
161 | {
162 | Messages = new List();
163 | }
164 | }
165 | }
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/Message.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Security.Cryptography;
5 | using KafkaNet.Common;
6 |
7 | namespace KafkaNet.Protocol
8 | {
9 | ///
10 | /// Buffer represents a collection of messages to be posted to a specified Topic on specified Partition.
11 | ///
12 | public class Payload
13 | {
14 | public Payload()
15 | {
16 | Codec = MessageCodec.CodecNone;
17 | }
18 |
19 | public string Topic { get; set; }
20 | public int Partition { get; set; }
21 | public MessageCodec Codec { get; set; }
22 | public List Messages { get; set; }
23 | }
24 |
25 | ///
26 | /// Message represents the data from a single event occurance.
27 | ///
28 | public class Message
29 | {
30 | private const int MessageHeaderSize = 12;
31 | private const long InitialMessageOffset = 0;
32 |
33 | ///
34 | /// Metadata on source offset and partition location for this message.
35 | ///
36 | public MessageMetadata Meta { get; set; }
37 | ///
38 | /// This is a version id used to allow backwards compatible evolution of the message binary format. Reserved for future use.
39 | ///
40 | public byte MagicNumber { get; set; }
41 | ///
42 | /// Attribute value outside message body used for added codec/compression info.
43 | ///
44 | public byte Attribute { get; set; }
45 | ///
46 | /// Key value used for routing message to partitions.
47 | ///
48 | public byte[] Key { get; set; }
49 | ///
50 | /// The message body contents. Can contain compress message set.
51 | ///
52 | public byte[] Value { get; set; }
53 |
54 | ///
55 | /// Construct an empty message.
56 | ///
57 | public Message() { }
58 |
59 | ///
60 | /// Convenience constructor will encode both the key and message to byte streams.
61 | /// Most of the time a message will be string based.
62 | ///
63 | /// The key value for the message. Can be null.
64 | /// The main content data of this message.
65 | public Message(string value, string key = null)
66 | {
67 | Key = key == null ? null : key.ToBytes();
68 | Value = value.ToBytes();
69 | }
70 |
71 | ///
72 | /// Encodes a collection of messages into one byte[]. Encoded in order of list.
73 | ///
74 | /// The collection of messages to encode together.
75 | /// Encoded byte[] representing the collection of messages.
76 | public static byte[] EncodeMessageSet(IEnumerable messages)
77 | {
78 | using (var stream = new KafkaMessagePacker())
79 | {
80 | foreach (var message in messages)
81 | {
82 | stream.Pack(InitialMessageOffset)
83 | .Pack(EncodeMessage(message));
84 | }
85 |
86 | return stream.PayloadNoLength();
87 | }
88 | }
89 |
90 | ///
91 | /// Decode a byte[] that represents a collection of messages.
92 | ///
93 | /// The byte[] encode as a message set from kafka.
94 | /// Enumerable representing stream of messages decoded from byte[]
95 | public static IEnumerable DecodeMessageSet(byte[] messageSet)
96 | {
97 | using (var stream = new BigEndianBinaryReader(messageSet))
98 | {
99 | while (stream.HasData)
100 | {
101 | //this checks that we have at least the minimum amount of data to retrieve a header
102 | if (stream.Available(MessageHeaderSize) == false)
103 | yield break;
104 |
105 | var offset = stream.ReadInt64();
106 | var messageSize = stream.ReadInt32();
107 |
108 | //if messagessize is greater than the total payload, our max buffer is insufficient.
109 | if ((stream.Length - MessageHeaderSize) < messageSize)
110 | throw new BufferUnderRunException(MessageHeaderSize, messageSize);
111 |
112 | //if the stream does not have enough left in the payload, we got only a partial message
113 | if (stream.Available(messageSize) == false)
114 | yield break;
115 |
116 | foreach (var message in DecodeMessage(offset, stream.RawRead(messageSize)))
117 | {
118 | yield return message;
119 | }
120 | }
121 | }
122 | }
123 |
124 | ///
125 | /// Encodes a message object to byte[]
126 | ///
127 | /// Message data to encode.
128 | /// Encoded byte[] representation of the message object.
129 | ///
130 | /// Format:
131 | /// Crc (Int32), MagicByte (Byte), Attribute (Byte), Key (Byte[]), Value (Byte[])
132 | ///
133 | public static byte[] EncodeMessage(Message message)
134 | {
135 | using(var stream = new KafkaMessagePacker())
136 | {
137 | return stream.Pack(message.MagicNumber)
138 | .Pack(message.Attribute)
139 | .Pack(message.Key)
140 | .Pack(message.Value)
141 | .CrcPayload();
142 | }
143 | }
144 |
145 | ///
146 | /// Decode messages from a payload and assign it a given kafka offset.
147 | ///
148 | /// The offset represting the log entry from kafka of this message.
149 | /// The byte[] encode as a message from kafka.
150 | /// Enumerable representing stream of messages decoded from byte[].
151 | /// The return type is an Enumerable as the message could be a compressed message set.
152 | public static IEnumerable DecodeMessage(long offset, byte[] payload)
153 | {
154 | var crc = payload.Take(4).ToArray();
155 | using (var stream = new BigEndianBinaryReader(payload.Skip(4)))
156 | {
157 | if (crc.SequenceEqual(stream.CrcHash()) == false)
158 | throw new FailCrcCheckException("Buffer did not match CRC validation.");
159 |
160 | var message = new Message
161 | {
162 | Meta = new MessageMetadata { Offset = offset },
163 | MagicNumber = stream.ReadByte(),
164 | Attribute = stream.ReadByte(),
165 | Key = stream.ReadIntPrefixedBytes()
166 | };
167 |
168 | var codec = (MessageCodec)(ProtocolConstants.AttributeCodeMask & message.Attribute);
169 | switch (codec)
170 | {
171 | case MessageCodec.CodecNone:
172 | message.Value = stream.ReadIntPrefixedBytes();
173 | yield return message;
174 | break;
175 | case MessageCodec.CodecGzip:
176 | var gZipData = stream.ReadIntPrefixedBytes();
177 | foreach (var m in DecodeMessageSet(Compression.Unzip(gZipData)))
178 | {
179 | yield return m;
180 | }
181 | break;
182 | default:
183 | throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", codec));
184 | }
185 | }
186 | }
187 | }
188 |
189 | ///
190 | /// Provides metadata about the message received from the FetchResponse
191 | ///
192 | ///
193 | /// The purpose of this metadata is to allow client applications to track their own offset information about messages received from Kafka.
194 | ///
195 | ///
196 | public class MessageMetadata
197 | {
198 | ///
199 | /// The log offset of this message as stored by the Kafka server.
200 | ///
201 | public long Offset { get; set; }
202 | ///
203 | /// The partition id this offset is from.
204 | ///
205 | public int PartitionId { get; set; }
206 | }
207 | }
208 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/MetadataRequest.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Generic;
2 | using System.Linq;
3 | using KafkaNet.Common;
4 | using KafkaNet.Model;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | public class MetadataRequest : BaseRequest, IKafkaRequest
9 | {
10 | ///
11 | /// Indicates the type of kafka encoding this request is
12 | ///
13 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.MetaData; } }
14 |
15 | ///
16 | /// The list of topics to get metadata for.
17 | ///
18 | public List Topics { get; set; }
19 |
20 | public KafkaDataPayload Encode()
21 | {
22 | return EncodeMetadataRequest(this);
23 | }
24 |
25 | public IEnumerable Decode(byte[] payload)
26 | {
27 | return new[] { DecodeMetadataResponse(payload) };
28 | }
29 |
30 | ///
31 | /// Encode a request for metadata about topic and broker information.
32 | ///
33 | /// The MetaDataRequest to encode.
34 | /// Encoded byte[] representing the request.
35 | /// Format: (PayloadSize), Header, ix(hs)
36 | private KafkaDataPayload EncodeMetadataRequest(MetadataRequest request)
37 | {
38 | if (request.Topics == null) request.Topics = new List();
39 |
40 | using (var message = EncodeHeader(request)
41 | .Pack(request.Topics.Count)
42 | .Pack(request.Topics, StringPrefixEncoding.Int16))
43 | {
44 | return new KafkaDataPayload
45 | {
46 | Buffer = message.Payload(),
47 | CorrelationId = request.CorrelationId,
48 | ApiKey = ApiKey
49 | };
50 | }
51 | }
52 |
53 | ///
54 | /// Decode the metadata response from kafka server.
55 | ///
56 | ///
57 | ///
58 | private MetadataResponse DecodeMetadataResponse(byte[] data)
59 | {
60 | using (var stream = new BigEndianBinaryReader(data))
61 | {
62 | var response = new MetadataResponse();
63 | response.CorrelationId = stream.ReadInt32();
64 |
65 | var brokerCount = stream.ReadInt32();
66 | for (var i = 0; i < brokerCount; i++)
67 | {
68 | response.Brokers.Add(Broker.FromStream(stream));
69 | }
70 |
71 | var topicCount = stream.ReadInt32();
72 | for (var i = 0; i < topicCount; i++)
73 | {
74 | response.Topics.Add(Topic.FromStream(stream));
75 | }
76 |
77 | return response;
78 | }
79 | }
80 |
81 | }
82 |
83 | public class MetadataResponse
84 | {
85 | public int CorrelationId { get; set; }
86 | public MetadataResponse()
87 | {
88 | Brokers = new List();
89 | Topics = new List();
90 | }
91 |
92 | public List Brokers { get; set; }
93 | public List Topics { get; set; }
94 | }
95 | }
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/OffsetCommitRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using KafkaNet.Common;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | ///
9 | /// Class that represents the api call to commit a specific set of offsets for a given topic. The offset is saved under the
10 | /// arbitrary ConsumerGroup name provided by the call.
11 | ///
12 | public class OffsetCommitRequest : BaseRequest, IKafkaRequest
13 | {
14 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.OffsetCommit; } }
15 | public string ConsumerGroup { get; set; }
16 | public List OffsetCommits { get; set; }
17 |
18 | public KafkaDataPayload Encode()
19 | {
20 | return EncodeOffsetCommitRequest(this);
21 | }
22 |
23 | public IEnumerable Decode(byte[] payload)
24 | {
25 | return DecodeOffsetCommitResponse(payload);
26 | }
27 |
28 | private KafkaDataPayload EncodeOffsetCommitRequest(OffsetCommitRequest request)
29 | {
30 | if (request.OffsetCommits == null) request.OffsetCommits = new List();
31 |
32 | using (var message = EncodeHeader(request).Pack(request.ConsumerGroup, StringPrefixEncoding.Int16))
33 | {
34 | var topicGroups = request.OffsetCommits.GroupBy(x => x.Topic).ToList();
35 | message.Pack(topicGroups.Count);
36 |
37 | foreach (var topicGroup in topicGroups)
38 | {
39 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
40 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
41 | .Pack(partitions.Count);
42 |
43 | foreach (var partition in partitions)
44 | {
45 | foreach (var commit in partition)
46 | {
47 | message.Pack(partition.Key)
48 | .Pack(commit.Offset)
49 | .Pack(commit.TimeStamp)
50 | .Pack(commit.Metadata, StringPrefixEncoding.Int16);
51 | }
52 | }
53 | }
54 |
55 | return new KafkaDataPayload
56 | {
57 | Buffer = message.Payload(),
58 | CorrelationId = request.CorrelationId,
59 | ApiKey = ApiKey
60 | };
61 | }
62 | }
63 |
64 | private IEnumerable DecodeOffsetCommitResponse(byte[] data)
65 | {
66 | using (var stream = new BigEndianBinaryReader(data))
67 | {
68 | var correlationId = stream.ReadInt32();
69 |
70 | var topicCount = stream.ReadInt32();
71 | for (int i = 0; i < topicCount; i++)
72 | {
73 | var topic = stream.ReadInt16String();
74 |
75 | var partitionCount = stream.ReadInt32();
76 | for (int j = 0; j < partitionCount; j++)
77 | {
78 | var response = new OffsetCommitResponse()
79 | {
80 | Topic = topic,
81 | PartitionId = stream.ReadInt32(),
82 | Error = stream.ReadInt16()
83 | };
84 |
85 | yield return response;
86 | }
87 | }
88 | }
89 | }
90 | }
91 |
92 | public class OffsetCommit
93 | {
94 | ///
95 | /// The topic the offset came from.
96 | ///
97 | public string Topic { get; set; }
98 | ///
99 | /// The partition the offset came from.
100 | ///
101 | public int PartitionId { get; set; }
102 | ///
103 | /// The offset number to commit as completed.
104 | ///
105 | public long Offset { get; set; }
106 | ///
107 | /// If the time stamp field is set to -1, then the broker sets the time stamp to the receive time before committing the offset.
108 | ///
109 | public long TimeStamp { get; set; }
110 | ///
111 | /// Descriptive metadata about this commit.
112 | ///
113 | public string Metadata { get; set; }
114 |
115 | public OffsetCommit()
116 | {
117 | TimeStamp = -1;
118 | }
119 |
120 | }
121 |
122 | public class OffsetCommitResponse
123 | {
124 | ///
125 | /// The name of the topic this response entry is for.
126 | ///
127 | public string Topic;
128 | ///
129 | /// The id of the partition this response is for.
130 | ///
131 | public Int32 PartitionId;
132 | ///
133 | /// Error code of exception that occured during the request. Zero if no error.
134 | ///
135 | public Int16 Error;
136 | }
137 | }
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/OffsetFetchRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 |
5 | using KafkaNet.Common;
6 |
7 | namespace KafkaNet.Protocol
8 | {
9 | ///
10 | /// Class that represents both the request and the response from a kafka server of requesting a stored offset value
11 | /// for a given consumer group. Essentially this part of the api allows a user to save/load a given offset position
12 | /// under any abritrary name.
13 | ///
14 | public class OffsetFetchRequest : BaseRequest, IKafkaRequest
15 | {
16 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.OffsetFetch; } }
17 | public string ConsumerGroup { get; set; }
18 | public List Topics { get; set; }
19 |
20 | public KafkaDataPayload Encode()
21 | {
22 | return EncodeOffsetFetchRequest(this);
23 | }
24 |
25 | protected KafkaDataPayload EncodeOffsetFetchRequest(OffsetFetchRequest request)
26 | {
27 | if (request.Topics == null) request.Topics = new List();
28 |
29 | using (var message = EncodeHeader(request))
30 | {
31 | var topicGroups = request.Topics.GroupBy(x => x.Topic).ToList();
32 |
33 | message.Pack(ConsumerGroup, StringPrefixEncoding.Int16)
34 | .Pack(topicGroups.Count);
35 |
36 | foreach (var topicGroup in topicGroups)
37 | {
38 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
39 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
40 | .Pack(partitions.Count);
41 |
42 | foreach (var partition in partitions)
43 | {
44 | foreach (var offset in partition)
45 | {
46 | message.Pack(offset.PartitionId);
47 | }
48 | }
49 | }
50 |
51 | return new KafkaDataPayload
52 | {
53 | Buffer = message.Payload(),
54 | CorrelationId = request.CorrelationId,
55 | ApiKey = ApiKey
56 | };
57 | }
58 | }
59 |
60 | public IEnumerable Decode(byte[] payload)
61 | {
62 | return DecodeOffsetFetchResponse(payload);
63 | }
64 |
65 |
66 | protected IEnumerable DecodeOffsetFetchResponse(byte[] data)
67 | {
68 | using (var stream = new BigEndianBinaryReader(data))
69 | {
70 | var correlationId = stream.ReadInt32();
71 |
72 | var topicCount = stream.ReadInt32();
73 | for (int i = 0; i < topicCount; i++)
74 | {
75 | var topic = stream.ReadInt16String();
76 |
77 | var partitionCount = stream.ReadInt32();
78 | for (int j = 0; j < partitionCount; j++)
79 | {
80 | var response = new OffsetFetchResponse()
81 | {
82 | Topic = topic,
83 | PartitionId = stream.ReadInt32(),
84 | Offset = stream.ReadInt64(),
85 | MetaData = stream.ReadInt16String(),
86 | Error = stream.ReadInt16()
87 | };
88 | yield return response;
89 | }
90 | }
91 | }
92 | }
93 |
94 | }
95 |
96 | public class OffsetFetch
97 | {
98 | ///
99 | /// The topic the offset came from.
100 | ///
101 | public string Topic { get; set; }
102 | ///
103 | /// The partition the offset came from.
104 | ///
105 | public int PartitionId { get; set; }
106 | }
107 |
108 | public class OffsetFetchResponse
109 | {
110 | ///
111 | /// The name of the topic this response entry is for.
112 | ///
113 | public string Topic;
114 | ///
115 | /// The id of the partition this response is for.
116 | ///
117 | public Int32 PartitionId;
118 | ///
119 | /// The offset position saved to the server.
120 | ///
121 | public Int64 Offset;
122 | ///
123 | /// Any arbitrary metadata stored during a CommitRequest.
124 | ///
125 | public string MetaData;
126 | ///
127 | /// Error code of exception that occured during the request. Zero if no error.
128 | ///
129 | public Int16 Error;
130 |
131 | public override string ToString()
132 | {
133 | return string.Format("[OffsetFetchResponse TopicName={0}, PartitionID={1}, Offset={2}, MetaData={3}, ErrorCode={4}]", Topic, PartitionId, Offset, MetaData, Error);
134 | }
135 |
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/OffsetRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using KafkaNet.Common;
5 |
6 | namespace KafkaNet.Protocol
7 | {
8 | ///
9 | /// A funky Protocol for requesting the starting offset of each segment for the requested partition
10 | ///
11 | public class OffsetRequest : BaseRequest, IKafkaRequest
12 | {
13 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.Offset; } }
14 | public List Offsets { get; set; }
15 |
16 | public KafkaDataPayload Encode()
17 | {
18 | return EncodeOffsetRequest(this);
19 | }
20 |
21 | public IEnumerable Decode(byte[] payload)
22 | {
23 | return DecodeOffsetResponse(payload);
24 | }
25 |
26 | private KafkaDataPayload EncodeOffsetRequest(OffsetRequest request)
27 | {
28 | if (request.Offsets == null) request.Offsets = new List();
29 | using (var message = EncodeHeader(request))
30 | {
31 | var topicGroups = request.Offsets.GroupBy(x => x.Topic).ToList();
32 | message.Pack(ReplicaId)
33 | .Pack(topicGroups.Count);
34 |
35 | foreach (var topicGroup in topicGroups)
36 | {
37 | var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList();
38 | message.Pack(topicGroup.Key, StringPrefixEncoding.Int16)
39 | .Pack(partitions.Count);
40 |
41 | foreach (var partition in partitions)
42 | {
43 | foreach (var offset in partition)
44 | {
45 | message.Pack(partition.Key)
46 | .Pack(offset.Time)
47 | .Pack(offset.MaxOffsets);
48 | }
49 | }
50 | }
51 |
52 | return new KafkaDataPayload
53 | {
54 | Buffer = message.Payload(),
55 | CorrelationId = request.CorrelationId,
56 | ApiKey = ApiKey
57 | };
58 | }
59 | }
60 |
61 |
62 | private IEnumerable DecodeOffsetResponse(byte[] data)
63 | {
64 | using (var stream = new BigEndianBinaryReader(data))
65 | {
66 | var correlationId = stream.ReadInt32();
67 |
68 | var topicCount = stream.ReadInt32();
69 | for (int i = 0; i < topicCount; i++)
70 | {
71 | var topic = stream.ReadInt16String();
72 |
73 | var partitionCount = stream.ReadInt32();
74 | for (int j = 0; j < partitionCount; j++)
75 | {
76 | var response = new OffsetResponse()
77 | {
78 | Topic = topic,
79 | PartitionId = stream.ReadInt32(),
80 | Error = stream.ReadInt16(),
81 | Offsets = new List()
82 | };
83 | var offsetCount = stream.ReadInt32();
84 | for (int k = 0; k < offsetCount; k++)
85 | {
86 | response.Offsets.Add(stream.ReadInt64());
87 | }
88 |
89 | yield return response;
90 | }
91 | }
92 | }
93 | }
94 | }
95 |
96 | public class Offset
97 | {
98 | public Offset()
99 | {
100 | Time = -1;
101 | MaxOffsets = 1;
102 | }
103 | public string Topic { get; set; }
104 | public int PartitionId { get; set; }
105 | ///
106 | /// Used to ask for all messages before a certain time (ms). There are two special values.
107 | /// Specify -1 to receive the latest offsets and -2 to receive the earliest available offset.
108 | /// Note that because offsets are pulled in descending order, asking for the earliest offset will always return you a single element.
109 | ///
110 | public long Time { get; set; }
111 | public int MaxOffsets { get; set; }
112 | }
113 |
114 | public class OffsetResponse
115 | {
116 | public string Topic { get; set; }
117 | public int PartitionId { get; set; }
118 | public Int16 Error { get; set; }
119 | public List Offsets { get; set; }
120 | }
121 |
122 | public class OffsetPosition
123 | {
124 | public OffsetPosition() { }
125 | public OffsetPosition(int partitionId, long offset)
126 | {
127 | PartitionId = partitionId;
128 | Offset = offset;
129 | }
130 | public int PartitionId { get; set; }
131 | public long Offset { get; set; }
132 |
133 | public override string ToString()
134 | {
135 | return string.Format("PartitionId:{0}, Offset:{1}", PartitionId, Offset);
136 | }
137 |
138 | public override bool Equals(object obj)
139 | {
140 | if (ReferenceEquals(null, obj)) return false;
141 | if (ReferenceEquals(this, obj)) return true;
142 | if (obj.GetType() != this.GetType()) return false;
143 | return Equals((OffsetPosition)obj);
144 | }
145 |
146 | protected bool Equals(OffsetPosition other)
147 | {
148 | return PartitionId == other.PartitionId && Offset == other.Offset;
149 | }
150 |
151 | public override int GetHashCode()
152 | {
153 | unchecked
154 | {
155 | return (PartitionId * 397) ^ Offset.GetHashCode();
156 | }
157 | }
158 | }
159 | }
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/ProduceRequest.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Threading;
5 | using KafkaNet.Common;
6 | using KafkaNet.Statistics;
7 |
8 | namespace KafkaNet.Protocol
9 | {
10 | public class ProduceRequest : BaseRequest, IKafkaRequest
11 | {
12 | ///
13 | /// Provide a hint to the broker call not to expect a response for requests without Acks.
14 | ///
15 | public override bool ExpectResponse { get { return Acks != 0; } }
16 | ///
17 | /// Indicates the type of kafka encoding this request is.
18 | ///
19 | public ApiKeyRequestType ApiKey { get { return ApiKeyRequestType.Produce; } }
20 | ///
21 | /// Time kafka will wait for requested ack level before returning.
22 | ///
23 | public int TimeoutMS = 1000;
24 | ///
25 | /// Level of ack required by kafka. 0 immediate, 1 written to leader, 2+ replicas synced, -1 all replicas
26 | ///
27 | public Int16 Acks = 1;
28 | ///
29 | /// Collection of payloads to post to kafka
30 | ///
31 | public List Payload = new List();
32 |
33 |
34 | public KafkaDataPayload Encode()
35 | {
36 | return EncodeProduceRequest(this);
37 | }
38 |
39 | public IEnumerable Decode(byte[] payload)
40 | {
41 | return DecodeProduceResponse(payload);
42 | }
43 |
44 | #region Protocol...
45 | private KafkaDataPayload EncodeProduceRequest(ProduceRequest request)
46 | {
47 | int totalCompressedBytes = 0;
48 | if (request.Payload == null) request.Payload = new List();
49 |
50 | var groupedPayloads = (from p in request.Payload
51 | group p by new
52 | {
53 | p.Topic,
54 | p.Partition,
55 | p.Codec
56 | } into tpc
57 | select tpc).ToList();
58 |
59 | using (var message = EncodeHeader(request)
60 | .Pack(request.Acks)
61 | .Pack(request.TimeoutMS)
62 | .Pack(groupedPayloads.Count))
63 | {
64 | foreach (var groupedPayload in groupedPayloads)
65 | {
66 | var payloads = groupedPayload.ToList();
67 | message.Pack(groupedPayload.Key.Topic, StringPrefixEncoding.Int16)
68 | .Pack(payloads.Count)
69 | .Pack(groupedPayload.Key.Partition);
70 |
71 | switch (groupedPayload.Key.Codec)
72 | {
73 |
74 | case MessageCodec.CodecNone:
75 | message.Pack(Message.EncodeMessageSet(payloads.SelectMany(x => x.Messages)));
76 | break;
77 | case MessageCodec.CodecGzip:
78 | var compressedBytes = CreateGzipCompressedMessage(payloads.SelectMany(x => x.Messages));
79 | Interlocked.Add(ref totalCompressedBytes, compressedBytes.CompressedAmount);
80 | message.Pack(Message.EncodeMessageSet(new[] { compressedBytes.CompressedMessage }));
81 | break;
82 | default:
83 | throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", groupedPayload.Key.Codec));
84 | }
85 | }
86 |
87 | var result = new KafkaDataPayload
88 | {
89 | Buffer = message.Payload(),
90 | CorrelationId = request.CorrelationId,
91 | MessageCount = request.Payload.Sum(x => x.Messages.Count)
92 | };
93 | StatisticsTracker.RecordProduceRequest(result.MessageCount, result.Buffer.Length, totalCompressedBytes);
94 | return result;
95 | }
96 | }
97 |
98 | private CompressedMessageResult CreateGzipCompressedMessage(IEnumerable messages)
99 | {
100 | var messageSet = Message.EncodeMessageSet(messages);
101 |
102 | var gZipBytes = Compression.Zip(messageSet);
103 |
104 | var compressedMessage = new Message
105 | {
106 | Attribute = (byte)(0x00 | (ProtocolConstants.AttributeCodeMask & (byte)MessageCodec.CodecGzip)),
107 | Value = gZipBytes
108 | };
109 |
110 | return new CompressedMessageResult
111 | {
112 | CompressedAmount = messageSet.Length - compressedMessage.Value.Length,
113 | CompressedMessage = compressedMessage
114 | };
115 | }
116 |
117 | private IEnumerable DecodeProduceResponse(byte[] data)
118 | {
119 | using (var stream = new BigEndianBinaryReader(data))
120 | {
121 | var correlationId = stream.ReadInt32();
122 |
123 | var topicCount = stream.ReadInt32();
124 | for (int i = 0; i < topicCount; i++)
125 | {
126 | var topic = stream.ReadInt16String();
127 |
128 | var partitionCount = stream.ReadInt32();
129 | for (int j = 0; j < partitionCount; j++)
130 | {
131 | var response = new ProduceResponse()
132 | {
133 | Topic = topic,
134 | PartitionId = stream.ReadInt32(),
135 | Error = stream.ReadInt16(),
136 | Offset = stream.ReadInt64()
137 | };
138 |
139 | yield return response;
140 | }
141 | }
142 | }
143 | }
144 | #endregion
145 | }
146 |
147 | class CompressedMessageResult
148 | {
149 | public int CompressedAmount { get; set; }
150 | public Message CompressedMessage { get; set; }
151 | }
152 |
153 | public class ProduceResponse
154 | {
155 | ///
156 | /// The topic the offset came from.
157 | ///
158 | public string Topic { get; set; }
159 | ///
160 | /// The partition the offset came from.
161 | ///
162 | public int PartitionId { get; set; }
163 | ///
164 | /// Error response code. 0 is success.
165 | ///
166 | public Int16 Error { get; set; }
167 | ///
168 | /// The offset number to commit as completed.
169 | ///
170 | public long Offset { get; set; }
171 |
172 | public override bool Equals(object obj)
173 | {
174 | if (ReferenceEquals(null, obj)) return false;
175 | if (ReferenceEquals(this, obj)) return true;
176 | if (obj.GetType() != this.GetType()) return false;
177 | return Equals((ProduceResponse)obj);
178 | }
179 |
180 | protected bool Equals(ProduceResponse other)
181 | {
182 | return string.Equals(Topic, other.Topic) && PartitionId == other.PartitionId && Error == other.Error && Offset == other.Offset;
183 | }
184 |
185 | public override int GetHashCode()
186 | {
187 | unchecked
188 | {
189 | int hashCode = (Topic != null ? Topic.GetHashCode() : 0);
190 | hashCode = (hashCode * 397) ^ PartitionId;
191 | hashCode = (hashCode * 397) ^ Error.GetHashCode();
192 | hashCode = (hashCode * 397) ^ Offset.GetHashCode();
193 | return hashCode;
194 | }
195 | }
196 | }
197 | }
198 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/Protocol.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.IO;
3 | using System.IO.Compression;
4 |
5 | namespace KafkaNet.Protocol
6 | {
7 | ///
8 | /// Extension methods which allow compression of byte arrays
9 | ///
10 | public static class Compression
11 | {
12 | public static byte[] Zip(byte[] bytes)
13 | {
14 | using (var destination = new MemoryStream())
15 | using (var gzip = new GZipStream(destination, CompressionLevel.Fastest, false))
16 | {
17 | gzip.Write(bytes, 0, bytes.Length);
18 | gzip.Flush();
19 | gzip.Close();
20 | return destination.ToArray();
21 | }
22 | }
23 |
24 | public static byte[] Unzip(byte[] bytes)
25 | {
26 | using (var source = new MemoryStream(bytes))
27 | using (var destination = new MemoryStream())
28 | using (var gzip = new GZipStream(source, CompressionMode.Decompress, false))
29 | {
30 | gzip.CopyTo(destination);
31 | gzip.Flush();
32 | gzip.Close();
33 | return destination.ToArray();
34 | }
35 | }
36 | }
37 |
38 | ///
39 | /// Enumeration of numeric codes that the ApiKey in the request can take for each request types.
40 | ///
41 | public enum ApiKeyRequestType
42 | {
43 | Produce = 0,
44 | Fetch = 1,
45 | Offset = 2,
46 | MetaData = 3,
47 | OffsetCommit = 8,
48 | OffsetFetch = 9,
49 | ConsumerMetadataRequest = 10
50 | }
51 |
52 | ///
53 | /// Enumeration of error codes that might be returned from a Kafka server
54 | ///
55 | public enum ErrorResponseCode : short
56 | {
57 | ///
58 | /// No error--it worked!
59 | ///
60 | NoError = 0,
61 |
62 | ///
63 | /// An unexpected server error
64 | ///
65 | Unknown = -1,
66 |
67 | ///
68 | /// The requested offset is outside the range of offsets maintained by the server for the given topic/partition.
69 | ///
70 | OffsetOutOfRange = 1,
71 |
72 | ///
73 | /// This indicates that a message contents does not match its CRC
74 | ///
75 | InvalidMessage = 2,
76 |
77 | ///
78 | /// This request is for a topic or partition that does not exist on this broker.
79 | ///
80 | UnknownTopicOrPartition = 3,
81 |
82 | ///
83 | /// The message has a negative size
84 | ///
85 | InvalidMessageSize = 4,
86 |
87 | ///
88 | /// This error is thrown if we are in the middle of a leadership election and there is currently no leader for this partition and hence it is unavailable for writes.
89 | ///
90 | LeaderNotAvailable = 5,
91 |
92 | ///
93 | /// This error is thrown if the client attempts to send messages to a replica that is not the leader for some partition. It indicates that the clients metadata is out of date.
94 | ///
95 | NotLeaderForPartition = 6,
96 |
97 | ///
98 | /// This error is thrown if the request exceeds the user-specified time limit in the request.
99 | ///
100 | RequestTimedOut = 7,
101 |
102 | ///
103 | /// This is not a client facing error and is used only internally by intra-cluster broker communication.
104 | ///
105 | BrokerNotAvailable = 8,
106 |
107 | ///
108 | /// If replica is expected on a broker, but is not.
109 | ///
110 | ReplicaNotAvailable = 9,
111 |
112 | ///
113 | /// The server has a configurable maximum message size to avoid unbounded memory allocation. This error is thrown if the client attempt to produce a message larger than this maximum.
114 | ///
115 | MessageSizeTooLarge = 10,
116 |
117 | ///
118 | /// Internal error code for broker-to-broker communication.
119 | ///
120 | StaleControllerEpochCode = 11,
121 |
122 | ///
123 | /// If you specify a string larger than configured maximum for offset metadata
124 | ///
125 | OffsetMetadataTooLargeCode = 12,
126 |
127 | ///
128 | /// The broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition).
129 | ///
130 | OffsetsLoadInProgressCode = 14,
131 |
132 | ///
133 | /// The broker returns this error code for consumer metadata requests or offset commit requests if the offsets topic has not yet been created.
134 | ///
135 | ConsumerCoordinatorNotAvailableCode = 15,
136 |
137 | ///
138 | /// The broker returns this error code if it receives an offset fetch or commit request for a consumer group that it is not a coordinator for.
139 | ///
140 | NotCoordinatorForConsumerCode = 16
141 | }
142 |
143 | ///
144 | /// Protocol specific constants
145 | ///
146 | public struct ProtocolConstants
147 | {
148 | ///
149 | /// The lowest 2 bits contain the compression codec used for the message. The other bits should be set to 0.
150 | ///
151 | public static byte AttributeCodeMask = 0x03;
152 | }
153 |
154 | ///
155 | /// Enumeration which specifies the compression type of messages
156 | ///
157 | public enum MessageCodec
158 | {
159 | CodecNone = 0x00,
160 | CodecGzip = 0x01,
161 | CodecSnappy = 0x02
162 | }
163 |
164 | #region Exceptions...
165 | public class FailCrcCheckException : ApplicationException
166 | {
167 | public FailCrcCheckException(string message, params object[] args) : base(string.Format(message, args)) { }
168 | }
169 |
170 | public class ResponseTimeoutException : ApplicationException
171 | {
172 | public ResponseTimeoutException(string message, params object[] args) : base(string.Format(message, args)) { }
173 | }
174 |
175 | public class InvalidPartitionException : ApplicationException
176 | {
177 | public InvalidPartitionException(string message, params object[] args) : base(string.Format(message, args)) { }
178 | }
179 |
180 | public class ServerDisconnectedException : ApplicationException
181 | {
182 | public ServerDisconnectedException(string message, params object[] args) : base(string.Format(message, args)) { }
183 | }
184 |
185 | public class ServerUnreachableException : ApplicationException
186 | {
187 | public ServerUnreachableException(string message, params object[] args) : base(string.Format(message, args)) { }
188 | }
189 |
190 | public class InvalidTopicMetadataException : ApplicationException
191 | {
192 | public InvalidTopicMetadataException(ErrorResponseCode code, string message, params object[] args)
193 | : base(string.Format(message, args))
194 | {
195 | ErrorResponseCode = code;
196 | }
197 | public ErrorResponseCode ErrorResponseCode { get; private set; }
198 | }
199 |
200 | public class LeaderNotFoundException : ApplicationException
201 | {
202 | public LeaderNotFoundException(string message, params object[] args) : base(string.Format(message, args)) { }
203 | }
204 |
205 | public class UnresolvedHostnameException : ApplicationException
206 | {
207 | public UnresolvedHostnameException(string message, params object[] args) : base(string.Format(message, args)) { }
208 | }
209 |
210 | public class InvalidMetadataException : ApplicationException
211 | {
212 | public int ErrorCode { get; set; }
213 | public InvalidMetadataException(string message, params object[] args) : base(string.Format(message, args)) { }
214 | }
215 |
216 | public class OffsetOutOfRangeException : ApplicationException
217 | {
218 | public Fetch FetchRequest { get; set; }
219 | public OffsetOutOfRangeException(string message, params object[] args) : base(string.Format(message, args)) { }
220 | }
221 |
222 | public class BufferUnderRunException : ApplicationException
223 | {
224 | public int MessageHeaderSize { get; set; }
225 | public int RequiredBufferSize { get; set; }
226 |
227 | public BufferUnderRunException(int messageHeaderSize, int requiredBufferSize)
228 | : base("The size of the message from Kafka exceeds the provide buffer size.")
229 | {
230 | MessageHeaderSize = messageHeaderSize;
231 | RequiredBufferSize = requiredBufferSize;
232 | }
233 | }
234 |
235 | public class KafkaApplicationException : ApplicationException
236 | {
237 | public int ErrorCode { get; set; }
238 | public KafkaApplicationException(string message, params object[] args) : base(string.Format(message, args)) { }
239 | }
240 | #endregion
241 |
242 |
243 | }
244 |
--------------------------------------------------------------------------------
/kafka-net-core/Protocol/Topic.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using KafkaNet.Common;
4 |
5 | namespace KafkaNet.Protocol
6 | {
7 | public class Topic
8 | {
9 | public Int16 ErrorCode { get; set; }
10 | public string Name { get; set; }
11 | public List Partitions { get; set; }
12 |
13 | public static Topic FromStream(BigEndianBinaryReader stream)
14 | {
15 | var topic = new Topic
16 | {
17 | ErrorCode = stream.ReadInt16(),
18 | Name = stream.ReadInt16String(),
19 | Partitions = new List()
20 | };
21 |
22 | var numPartitions = stream.ReadInt32();
23 | for (int i = 0; i < numPartitions; i++)
24 | {
25 | topic.Partitions.Add(Partition.FromStream(stream));
26 | }
27 |
28 | return topic;
29 | }
30 | }
31 |
32 | public class Partition
33 | {
34 | ///
35 | /// Error code. 0 indicates no error occured.
36 | ///
37 | public Int16 ErrorCode { get; set; }
38 | ///
39 | /// The Id of the partition that this metadata describes.
40 | ///
41 | public int PartitionId { get; set; }
42 | ///
43 | /// The node id for the kafka broker currently acting as leader for this partition. If no leader exists because we are in the middle of a leader election this id will be -1.
44 | ///
45 | public int LeaderId { get; set; }
46 | ///
47 | /// The set of alive nodes that currently acts as slaves for the leader for this partition.
48 | ///
49 | public List Replicas { get; set; }
50 | ///
51 | /// The set subset of the replicas that are "caught up" to the leader
52 | ///
53 | public List Isrs { get; set; }
54 |
55 | public static Partition FromStream(BigEndianBinaryReader stream)
56 | {
57 | var partition = new Partition {
58 | ErrorCode = stream.ReadInt16(),
59 | PartitionId = stream.ReadInt32(),
60 | LeaderId = stream.ReadInt32(),
61 | Replicas = new List(),
62 | Isrs = new List()
63 | };
64 |
65 | var numReplicas = stream.ReadInt32();
66 | for (int i = 0; i < numReplicas; i++)
67 | {
68 | partition.Replicas.Add(stream.ReadInt32());
69 | }
70 |
71 | var numIsr = stream.ReadInt32();
72 | for (int i = 0; i < numIsr; i++)
73 | {
74 | partition.Isrs.Add(stream.ReadInt32());
75 | }
76 |
77 | return partition;
78 | }
79 |
80 | protected bool Equals(Partition other)
81 | {
82 | return PartitionId == other.PartitionId;
83 | }
84 |
85 | public override int GetHashCode()
86 | {
87 | return PartitionId;
88 | }
89 |
90 | public override bool Equals(object obj)
91 | {
92 | if (ReferenceEquals(null, obj)) return false;
93 | if (ReferenceEquals(this, obj)) return true;
94 | if (obj.GetType() != this.GetType()) return false;
95 | return Equals((Partition) obj);
96 | }
97 | }
98 |
99 | }
100 |
--------------------------------------------------------------------------------
/kafka-net-core/Statistics/StatisticsTracker.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Concurrent;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Threading;
6 | using KafkaNet.Common;
7 | using KafkaNet.Model;
8 |
9 | namespace KafkaNet.Statistics
10 | {
11 | ///
12 | /// Statistics tracker uses circular buffers to capture a maximum set of current statistics.
13 | ///
14 | public static class StatisticsTracker
15 | {
16 | public static event Action OnStatisticsHeartbeat;
17 |
18 | private static readonly IScheduledTimer HeartbeatTimer;
19 | private static readonly Gauges Gauges = new Gauges();
20 | private static readonly ConcurrentCircularBuffer ProduceRequestStatistics = new ConcurrentCircularBuffer(500);
21 | private static readonly ConcurrentCircularBuffer CompletedNetworkWriteStatistics = new ConcurrentCircularBuffer(500);
22 | private static readonly ConcurrentDictionary NetworkWriteQueuedIndex = new ConcurrentDictionary();
23 |
24 | static StatisticsTracker()
25 | {
26 | HeartbeatTimer = new ScheduledTimer()
27 | .StartingAt(DateTime.Now)
28 | .Every(TimeSpan.FromSeconds(5))
29 | .Do(HeartBeatAction)
30 | .Begin();
31 | }
32 |
33 | private static void HeartBeatAction()
34 | {
35 | if (OnStatisticsHeartbeat != null)
36 | {
37 | OnStatisticsHeartbeat(new StatisticsSummary(ProduceRequestStatistics.ToList(),
38 | NetworkWriteQueuedIndex.Values.ToList(),
39 | CompletedNetworkWriteStatistics.ToList(),
40 | Gauges));
41 | }
42 | }
43 |
44 | public static void RecordProduceRequest(int messageCount, int payloadBytes, int compressedBytes)
45 | {
46 | ProduceRequestStatistics.Enqueue(new ProduceRequestStatistic(messageCount, payloadBytes, compressedBytes));
47 | }
48 |
49 | public static void IncrementGauge(StatisticGauge gauge)
50 | {
51 | switch (gauge)
52 | {
53 | case StatisticGauge.ActiveReadOperation:
54 | Interlocked.Increment(ref Gauges.ActiveReadOperation);
55 | break;
56 | case StatisticGauge.ActiveWriteOperation:
57 | Interlocked.Increment(ref Gauges.ActiveWriteOperation);
58 | break;
59 | case StatisticGauge.QueuedWriteOperation:
60 | Interlocked.Increment(ref Gauges.QueuedWriteOperation);
61 | break;
62 | }
63 | }
64 |
65 | public static void DecrementGauge(StatisticGauge gauge)
66 | {
67 | switch (gauge)
68 | {
69 | case StatisticGauge.ActiveReadOperation:
70 | Interlocked.Decrement(ref Gauges.ActiveReadOperation);
71 | break;
72 | case StatisticGauge.ActiveWriteOperation:
73 | Interlocked.Decrement(ref Gauges.ActiveWriteOperation);
74 | break;
75 | case StatisticGauge.QueuedWriteOperation:
76 | Interlocked.Decrement(ref Gauges.QueuedWriteOperation);
77 | break;
78 | }
79 | }
80 |
81 | public static void QueueNetworkWrite(KafkaEndpoint endpoint, KafkaDataPayload payload)
82 | {
83 | if (payload.TrackPayload == false) return;
84 |
85 | var stat = new NetworkWriteStatistic(endpoint, payload);
86 | NetworkWriteQueuedIndex.TryAdd(payload.CorrelationId, stat);
87 | Interlocked.Increment(ref Gauges.QueuedWriteOperation);
88 | }
89 |
90 | public static void CompleteNetworkWrite(KafkaDataPayload payload, long milliseconds, bool failed)
91 | {
92 | if (payload.TrackPayload == false) return;
93 |
94 | NetworkWriteStatistic stat;
95 | if (NetworkWriteQueuedIndex.TryRemove(payload.CorrelationId, out stat))
96 | {
97 | stat.SetCompleted(milliseconds, failed);
98 | CompletedNetworkWriteStatistics.Enqueue(stat);
99 | }
100 | Interlocked.Decrement(ref Gauges.QueuedWriteOperation);
101 | }
102 | }
103 |
104 | public enum StatisticGauge
105 | {
106 | QueuedWriteOperation,
107 | ActiveWriteOperation,
108 | ActiveReadOperation
109 | }
110 |
111 | public class StatisticsSummary
112 | {
113 | public ProduceRequestSummary ProduceRequestSummary { get; private set; }
114 | public List NetworkWriteSummaries { get; private set; }
115 |
116 | public List ProduceRequestStatistics { get; private set; }
117 | public List CompletedNetworkWriteStatistics { get; private set; }
118 | public List QueuedNetworkWriteStatistics { get; private set; }
119 | public Gauges Gauges { get; private set; }
120 |
121 | public StatisticsSummary(List produceRequestStatistics,
122 | List queuedWrites,
123 | List completedWrites,
124 | Gauges gauges)
125 | {
126 | ProduceRequestStatistics = produceRequestStatistics;
127 | QueuedNetworkWriteStatistics = queuedWrites;
128 | CompletedNetworkWriteStatistics = completedWrites;
129 | Gauges = gauges;
130 |
131 |
132 | if (queuedWrites.Count > 0 || completedWrites.Count > 0)
133 | {
134 | var queuedSummary = queuedWrites.GroupBy(x => x.Endpoint)
135 | .Select(e => new
136 | {
137 | Endpoint = e.Key,
138 | QueuedSummary = new NetworkQueueSummary
139 | {
140 | SampleSize = e.Count(),
141 | OldestBatchInQueue = e.Max(x => x.TotalDuration),
142 | BytesQueued = e.Sum(x => x.Payload.Buffer.Length),
143 | QueuedMessages = e.Sum(x => x.Payload.MessageCount),
144 | QueuedBatchCount = Gauges.QueuedWriteOperation,
145 | }
146 | }).ToList();
147 |
148 | var networkWriteSampleTimespan = completedWrites.Count <= 0 ? TimeSpan.FromMilliseconds(0) : DateTime.UtcNow - completedWrites.Min(x => x.CreatedOnUtc);
149 | var completedSummary = completedWrites.GroupBy(x => x.Endpoint)
150 | .Select(e =>
151 | new
152 | {
153 | Endpoint = e.Key,
154 | CompletedSummary = new NetworkTcpSummary
155 | {
156 | MessagesPerSecond = (int)(e.Sum(x => x.Payload.MessageCount) /
157 | networkWriteSampleTimespan.TotalSeconds),
158 | MessagesLastBatch = e.OrderByDescending(x => x.CompletedOnUtc).Select(x => x.Payload.MessageCount).FirstOrDefault(),
159 | MaxMessagesPerSecond = e.Max(x => x.Payload.MessageCount),
160 | BytesPerSecond = (int)(e.Sum(x => x.Payload.Buffer.Length) /
161 | networkWriteSampleTimespan.TotalSeconds),
162 | AverageWriteDuration = TimeSpan.FromMilliseconds(e.Sum(x => x.WriteDuration.TotalMilliseconds) /
163 | completedWrites.Count),
164 | AverageTotalDuration = TimeSpan.FromMilliseconds(e.Sum(x => x.TotalDuration.TotalMilliseconds) /
165 | completedWrites.Count),
166 | SampleSize = completedWrites.Count
167 | }
168 | }
169 | ).ToList();
170 |
171 | NetworkWriteSummaries = new List();
172 | var endpoints = queuedSummary.Select(x => x.Endpoint).Union(completedWrites.Select(x => x.Endpoint));
173 | foreach (var endpoint in endpoints)
174 | {
175 | NetworkWriteSummaries.Add(new NetworkWriteSummary
176 | {
177 | Endpoint = endpoint,
178 | QueueSummary = queuedSummary.Where(x => x.Endpoint.Equals(endpoint)).Select(x => x.QueuedSummary).FirstOrDefault(),
179 | TcpSummary = completedSummary.Where(x => x.Endpoint.Equals(endpoint)).Select(x => x.CompletedSummary).FirstOrDefault()
180 | });
181 | }
182 | }
183 | else
184 | {
185 | NetworkWriteSummaries = new List();
186 | }
187 |
188 | if (ProduceRequestStatistics.Count > 0)
189 | {
190 | var produceRequestSampleTimespan = DateTime.UtcNow -
191 | ProduceRequestStatistics.Min(x => x.CreatedOnUtc);
192 |
193 | ProduceRequestSummary = new ProduceRequestSummary
194 | {
195 | SampleSize = ProduceRequestStatistics.Count,
196 | MessageCount = ProduceRequestStatistics.Sum(s => s.MessageCount),
197 | MessageBytesPerSecond = (int)
198 | (ProduceRequestStatistics.Sum(s => s.MessageBytes) / produceRequestSampleTimespan.TotalSeconds),
199 | PayloadBytesPerSecond = (int)
200 | (ProduceRequestStatistics.Sum(s => s.PayloadBytes) / produceRequestSampleTimespan.TotalSeconds),
201 | CompressedBytesPerSecond = (int)
202 | (ProduceRequestStatistics.Sum(s => s.CompressedBytes) / produceRequestSampleTimespan.TotalSeconds),
203 | AverageCompressionRatio =
204 | Math.Round(ProduceRequestStatistics.Sum(s => s.CompressionRatio) / ProduceRequestStatistics.Count, 4),
205 | MessagesPerSecond = (int)
206 | (ProduceRequestStatistics.Sum(x => x.MessageCount) / produceRequestSampleTimespan.TotalSeconds)
207 | };
208 | }
209 | else
210 | {
211 | ProduceRequestSummary = new ProduceRequestSummary();
212 | }
213 | }
214 | }
215 |
216 | public class Gauges
217 | {
218 | public int ActiveWriteOperation;
219 | public int ActiveReadOperation;
220 | public int QueuedWriteOperation;
221 | }
222 |
223 | public class NetworkWriteStatistic
224 | {
225 | public DateTime CreatedOnUtc { get; private set; }
226 | public DateTime CompletedOnUtc { get; private set; }
227 | public bool IsCompleted { get; private set; }
228 | public bool IsFailed { get; private set; }
229 | public KafkaEndpoint Endpoint { get; private set; }
230 | public KafkaDataPayload Payload { get; private set; }
231 | public TimeSpan TotalDuration { get { return (IsCompleted ? CompletedOnUtc : DateTime.UtcNow) - CreatedOnUtc; } }
232 | public TimeSpan WriteDuration { get; private set; }
233 |
234 | public NetworkWriteStatistic(KafkaEndpoint endpoint, KafkaDataPayload payload)
235 | {
236 | CreatedOnUtc = DateTime.UtcNow;
237 | Endpoint = endpoint;
238 | Payload = payload;
239 | }
240 |
241 | public void SetCompleted(long milliseconds, bool failedFlag)
242 | {
243 | IsCompleted = true;
244 | IsFailed = failedFlag;
245 | CompletedOnUtc = DateTime.UtcNow;
246 | WriteDuration = TimeSpan.FromMilliseconds(milliseconds);
247 | }
248 |
249 | public void SetSuccess(bool failed)
250 | {
251 | IsFailed = failed;
252 | }
253 | }
254 |
255 | public class NetworkWriteSummary
256 | {
257 | public KafkaEndpoint Endpoint;
258 |
259 | public NetworkTcpSummary TcpSummary = new NetworkTcpSummary();
260 | public NetworkQueueSummary QueueSummary = new NetworkQueueSummary();
261 | }
262 |
263 | public class NetworkQueueSummary
264 | {
265 | public int BytesQueued;
266 | public double KilobytesQueued { get { return MathHelper.ConvertToKilobytes(BytesQueued); } }
267 | public TimeSpan OldestBatchInQueue { get; set; }
268 | public int QueuedMessages { get; set; }
269 | public int QueuedBatchCount;
270 | public int SampleSize { get; set; }
271 | }
272 |
273 | public class NetworkTcpSummary
274 | {
275 | public int MessagesPerSecond;
276 | public int MaxMessagesPerSecond;
277 | public int BytesPerSecond;
278 | public TimeSpan AverageWriteDuration;
279 | public double KilobytesPerSecond { get { return MathHelper.ConvertToKilobytes(BytesPerSecond); } }
280 | public TimeSpan AverageTotalDuration { get; set; }
281 | public int SampleSize { get; set; }
282 | public int MessagesLastBatch { get; set; }
283 | }
284 |
285 | public class ProduceRequestSummary
286 | {
287 | public int SampleSize;
288 | public int MessageCount;
289 | public int MessagesPerSecond;
290 | public int MessageBytesPerSecond;
291 | public double MessageKilobytesPerSecond { get { return MathHelper.ConvertToKilobytes(MessageBytesPerSecond); } }
292 | public int PayloadBytesPerSecond;
293 | public double PayloadKilobytesPerSecond { get { return MathHelper.ConvertToKilobytes(PayloadBytesPerSecond); } }
294 | public int CompressedBytesPerSecond;
295 | public double CompressedKilobytesPerSecond { get { return MathHelper.ConvertToKilobytes(CompressedBytesPerSecond); } }
296 | public double AverageCompressionRatio;
297 | }
298 |
299 | public class ProduceRequestStatistic
300 | {
301 | public DateTime CreatedOnUtc { get; private set; }
302 | public int MessageCount { get; private set; }
303 | public int MessageBytes { get; private set; }
304 | public int PayloadBytes { get; private set; }
305 | public int CompressedBytes { get; private set; }
306 | public double CompressionRatio { get; private set; }
307 |
308 | public ProduceRequestStatistic(int messageCount, int payloadBytes, int compressedBytes)
309 | {
310 | CreatedOnUtc = DateTime.UtcNow;
311 | MessageCount = messageCount;
312 | MessageBytes = payloadBytes + compressedBytes;
313 | PayloadBytes = payloadBytes;
314 | CompressedBytes = compressedBytes;
315 |
316 | CompressionRatio = MessageBytes == 0 ? 0 : Math.Round((double)compressedBytes / MessageBytes, 4);
317 | }
318 |
319 |
320 | }
321 |
322 | public static class MathHelper
323 | {
324 | public static double ConvertToMegabytes(int bytes)
325 | {
326 | if (bytes == 0) return 0;
327 | return Math.Round((double)bytes / 1048576, 4);
328 | }
329 |
330 | public static double ConvertToKilobytes(int bytes)
331 | {
332 | if (bytes == 0) return 0;
333 | return Math.Round((double)bytes / 1000, 4);
334 | }
335 | }
336 | }
337 |
--------------------------------------------------------------------------------
/kafka-net-core/kafka-net-core.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netstandard2.0
5 | kafka_net_core
6 | 1.1.0
7 | kafka net-core kafka-client net-standard kafka-netcore kafka-netstandard
8 | change target framework to netstandard 2
9 | https://github.com/snmslavk/kafka-net-core
10 | https://github.com/snmslavk/kafka-net-core/blob/master/LICENSE
11 | https://github.com/snmslavk/kafka-net-core
12 | Viacheslav Avsenev
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------