├── .gitignore ├── .travis.yml ├── LICENSE ├── OWNERS ├── README.md ├── appveyor.yml ├── buildAndTest.sh ├── kafka-sharp ├── kafka-sharp-net45.sln ├── kafka-sharp-netstd.sln ├── kafka-sharp.UTest │ ├── AsyncCountdownEvent.cs │ ├── Kafka.UTest.csproj │ ├── Mocks.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── TestBatching.cs │ ├── TestClient.cs │ ├── TestCluster.cs │ ├── TestCommon.cs │ ├── TestConnection.cs │ ├── TestConsumer.cs │ ├── TestConsumerGroup.cs │ ├── TestGeneral.cs │ ├── TestKafkaConsumer.cs │ ├── TestMessageKeyPartitionSelection.cs │ ├── TestNode.cs.cs │ ├── TestPartitioner.cs │ ├── TestRouter.cs │ ├── TestRoutingTable.cs │ ├── TestSerialization.cs │ ├── TestVarIntConverter.cs │ ├── kafka.UTest.netstandard.csproj │ └── packages.config ├── kafka-sharp │ ├── Batching │ │ ├── Accumulator.cs │ │ ├── BatchStrategy.cs │ │ └── Grouping.cs │ ├── Cluster │ │ ├── Cluster.cs │ │ ├── Node.cs │ │ ├── Pools.cs │ │ ├── ProduceMessage.cs │ │ └── TimeoutScheduler.cs │ ├── Common │ │ ├── ActionBlockTaskScheduler.cs │ │ ├── BigEndianConverter.cs │ │ ├── Crc32.cs │ │ ├── MemoryStreamExtensions.cs │ │ ├── Pool.cs │ │ ├── ReusableMemoryStream.cs │ │ ├── Timestamp.cs │ │ └── VarIntConverter.cs │ ├── Kafka.csproj │ ├── Kafka.netstandard.csproj │ ├── Kafka.nuspec │ ├── Network │ │ └── Connection.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── Protocol │ │ ├── Basics.cs │ │ ├── CommonRequest.cs │ │ ├── CommonResponse.cs │ │ ├── ConsumerGroupRequests.cs │ │ ├── ConsumerGroupResponses.cs │ │ ├── DefaultSerialization.cs │ │ ├── Errors.cs │ │ ├── FetchRequest.cs │ │ ├── FetchResponse.cs │ │ ├── GroupCoordinationRequests.cs │ │ ├── GroupCoordinationResponses.cs │ │ ├── KafkaLz4.cs │ │ ├── Message.cs │ │ ├── Metadata.cs │ │ ├── MetadataResponse.cs │ │ ├── OffsetRequest.cs │ │ ├── OffsetResponse.cs │ │ ├── ProduceRequest.cs │ │ ├── ProduceResponse.cs │ │ ├── ProtocolException.cs │ │ ├── RecordBatch.cs │ │ ├── TopicData.cs │ │ └── TopicRequest.cs │ ├── Public │ │ ├── ClusterClient.cs │ │ ├── Configuration.cs │ │ ├── Exceptions.cs │ │ ├── ILogger.cs │ │ ├── KafkaConsumer.cs │ │ ├── KafkaProducer.cs │ │ ├── KafkaRecord.cs │ │ ├── Loggers │ │ │ ├── ConsoleLogger.cs │ │ │ └── DevNullLogger.cs │ │ ├── PartitionSelectionConfig.cs │ │ ├── Serialization.cs │ │ └── Statistics.cs │ ├── Routing │ │ ├── ConsumerGroup.cs │ │ ├── ConsumerRouter.cs │ │ ├── PartitionSelection │ │ │ ├── IPartitionSelection.cs │ │ │ ├── MessageKeyPartitionSelection.cs │ │ │ ├── PartitionSelector.cs │ │ │ └── RoundRobinPartitionSelection.cs │ │ ├── ProducerRouter.cs │ │ └── RoutingTable.cs │ └── packages.config └── sample-kafka-sharp │ ├── App.config │ ├── Program.cs │ ├── Properties │ └── AssemblyInfo.cs │ ├── packages.config │ ├── sample-kafka-sharp.csproj │ └── sample-kafka-sharp.netstandard.csproj └── scripts ├── make-nuget-package.bat └── presubmit.sh /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | #ignore thumbnails created by windows 3 | Thumbs.db 4 | #Ignore files build by Visual Studio 5 | *.obj 6 | *.exe 7 | *.pdb 8 | *.user 9 | *.aps 10 | *.pch 11 | *.vspscc 12 | *.psess 13 | *_i.c 14 | *_p.c 15 | *.ncb 16 | *.suo 17 | *.tlb 18 | *.tlh 19 | *.bak 20 | *.cache 21 | *.ilk 22 | *.log 23 | *.vsp 24 | [Bb]in 25 | [Dd]ebug*/ 26 | *.lib 27 | *.sbr 28 | obj/ 29 | [Rr]elease*/ 30 | _ReSharper*/ 31 | [Tt]est[Rr]esult* 32 | log.txt 33 | packages 34 | vagrant/files 35 | /Build.proj 36 | /src/Properties/AssemblyInfoGenerated.cs 37 | nuget/*.nupkg 38 | .vs/ 39 | *.lock.json 40 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: csharp 2 | env: 3 | - CLI_VERSION=latest 4 | sudo: required 5 | dist: trusty 6 | mono: 7 | - latest 8 | dotnet: 1.0.4 9 | os: 10 | - linux 11 | script: 12 | - ./buildAndTest.sh 13 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | inherited: true 2 | owners: 3 | - webscale-team@criteo.com 4 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | version: '{build}' 2 | pull_requests: 3 | do_not_increment_build_number: true 4 | nuget: 5 | disable_publish_on_pr: true 6 | configuration: 7 | - Release 8 | artifacts: 9 | - path: .\artifacts\*.nupkg 10 | name: NuGet 11 | environment: 12 | DOTNET_CLI_TELEMETRY_OPTOUT: 1 13 | before_build: 14 | - appveyor-retry dotnet restore "kafka-sharp/kafka-sharp-netstd.sln" --verbosity Minimal 15 | build_script: 16 | - dotnet build "kafka-sharp/kafka-sharp-netstd.sln" -c %CONFIGURATION% 17 | after_build: 18 | - cmd: IF "%APPVEYOR_REPO_TAG%" == "true" (dotnet pack /p:PackageVersion="%APPVEYOR_REPO_TAG_NAME%" "kafka-sharp/kafka-sharp/Kafka.netstandard.csproj" -c %CONFIGURATION% --no-build -o %APPVEYOR_BUILD_FOLDER%\artifacts) 19 | test_script: 20 | - dotnet test "kafka-sharp/kafka-sharp.UTest/kafka.UTest.netstandard.csproj" -c %CONFIGURATION% 21 | cache: 22 | - '%USERPROFILE%\.nuget\packages' 23 | deploy: 24 | - provider: NuGet 25 | api_key: 26 | secure: zPPFd3VfauVBkWzlhniGLohV/zJzSCR7EzY/xgfGKE+vddJY3HmFHPDHRI14k47I 27 | on: 28 | branch: master 29 | appveyor_repo_tag: true 30 | -------------------------------------------------------------------------------- /buildAndTest.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | function check_availability() { 5 | binary=$1 6 | which $binary 2>&1 > /dev/null 7 | if [ $? -ne 0 ]; then 8 | echo "$binary could not be found in PATH" 9 | exit 1 10 | fi 11 | } 12 | 13 | check_availability "dotnet" 14 | 15 | solutionName=kafka-sharp-netstd.sln 16 | testProject=kafka-sharp.UTest/kafka.UTest.netstandard.csproj 17 | 18 | cd kafka-sharp 19 | dotnet restore $solutionName 20 | dotnet build $solutionName 21 | dotnet test $testProject -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp-net45.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 14 4 | VisualStudioVersion = 14.0.25420.1 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Kafka", "kafka-sharp\Kafka.csproj", "{C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E}" 7 | EndProject 8 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Kafka.UTest", "kafka-sharp.UTest\Kafka.UTest.csproj", "{316DEA8D-E7D4-48C3-BFD1-9186956FD43B}" 9 | EndProject 10 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "sample-kafka-sharp", "sample-kafka-sharp\sample-kafka-sharp.csproj", "{322B004A-B8B1-4D33-9CC6-2BA67267A4F4}" 11 | EndProject 12 | Global 13 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 14 | Debug|Any CPU = Debug|Any CPU 15 | Release|Any CPU = Release|Any CPU 16 | EndGlobalSection 17 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 18 | {C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 19 | {C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E}.Debug|Any CPU.Build.0 = Debug|Any CPU 20 | {C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E}.Release|Any CPU.ActiveCfg = Release|Any CPU 21 | {C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E}.Release|Any CPU.Build.0 = Release|Any CPU 22 | {316DEA8D-E7D4-48C3-BFD1-9186956FD43B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 23 | {316DEA8D-E7D4-48C3-BFD1-9186956FD43B}.Debug|Any CPU.Build.0 = Debug|Any CPU 24 | {316DEA8D-E7D4-48C3-BFD1-9186956FD43B}.Release|Any CPU.ActiveCfg = Release|Any CPU 25 | {316DEA8D-E7D4-48C3-BFD1-9186956FD43B}.Release|Any CPU.Build.0 = Release|Any CPU 26 | {322B004A-B8B1-4D33-9CC6-2BA67267A4F4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 27 | {322B004A-B8B1-4D33-9CC6-2BA67267A4F4}.Debug|Any CPU.Build.0 = Debug|Any CPU 28 | {322B004A-B8B1-4D33-9CC6-2BA67267A4F4}.Release|Any CPU.ActiveCfg = Release|Any CPU 29 | {322B004A-B8B1-4D33-9CC6-2BA67267A4F4}.Release|Any CPU.Build.0 = Release|Any CPU 30 | EndGlobalSection 31 | GlobalSection(SolutionProperties) = preSolution 32 | HideSolutionNode = FALSE 33 | EndGlobalSection 34 | EndGlobal 35 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp-netstd.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 15 4 | VisualStudioVersion = 15.0.26430.6 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Kafka.netstandard", "kafka-sharp\Kafka.netstandard.csproj", "{04C50BEA-1E10-4FBD-9341-BBC185A27A9D}" 7 | EndProject 8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "kafka.UTest.netstandard", "kafka-sharp.UTest\kafka.UTest.netstandard.csproj", "{9F335136-1C65-4BD4-BFEE-E1710C9AD883}" 9 | EndProject 10 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "sample-kafka-sharp.netstandard", "sample-kafka-sharp\sample-kafka-sharp.netstandard.csproj", "{CF8DE5DE-FEE7-4FC5-99B1-536A11FDE1F1}" 11 | EndProject 12 | Global 13 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 14 | Debug|Any CPU = Debug|Any CPU 15 | Release|Any CPU = Release|Any CPU 16 | EndGlobalSection 17 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 18 | {04C50BEA-1E10-4FBD-9341-BBC185A27A9D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 19 | {04C50BEA-1E10-4FBD-9341-BBC185A27A9D}.Debug|Any CPU.Build.0 = Debug|Any CPU 20 | {04C50BEA-1E10-4FBD-9341-BBC185A27A9D}.Release|Any CPU.ActiveCfg = Release|Any CPU 21 | {04C50BEA-1E10-4FBD-9341-BBC185A27A9D}.Release|Any CPU.Build.0 = Release|Any CPU 22 | {9F335136-1C65-4BD4-BFEE-E1710C9AD883}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 23 | {9F335136-1C65-4BD4-BFEE-E1710C9AD883}.Debug|Any CPU.Build.0 = Debug|Any CPU 24 | {9F335136-1C65-4BD4-BFEE-E1710C9AD883}.Release|Any CPU.ActiveCfg = Release|Any CPU 25 | {9F335136-1C65-4BD4-BFEE-E1710C9AD883}.Release|Any CPU.Build.0 = Release|Any CPU 26 | {CF8DE5DE-FEE7-4FC5-99B1-536A11FDE1F1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 27 | {CF8DE5DE-FEE7-4FC5-99B1-536A11FDE1F1}.Debug|Any CPU.Build.0 = Debug|Any CPU 28 | {CF8DE5DE-FEE7-4FC5-99B1-536A11FDE1F1}.Release|Any CPU.ActiveCfg = Release|Any CPU 29 | {CF8DE5DE-FEE7-4FC5-99B1-536A11FDE1F1}.Release|Any CPU.Build.0 = Release|Any CPU 30 | EndGlobalSection 31 | GlobalSection(SolutionProperties) = preSolution 32 | HideSolutionNode = FALSE 33 | EndGlobalSection 34 | EndGlobal 35 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/AsyncCountdownEvent.cs: -------------------------------------------------------------------------------- 1 | using System.Threading; 2 | using System.Threading.Tasks; 3 | 4 | 5 | namespace tests_kafka_sharp 6 | { 7 | public sealed class AsyncCountdownEvent 8 | { 9 | private readonly TaskCompletionSource _tcs; 10 | 11 | private int _count; 12 | 13 | public AsyncCountdownEvent(int count) 14 | { 15 | _tcs = new TaskCompletionSource(); 16 | _count = count; 17 | } 18 | 19 | public Task WaitAsync() 20 | { 21 | return _tcs.Task; 22 | } 23 | 24 | public void Signal() 25 | { 26 | var count = Interlocked.Decrement(ref _count); 27 | if (count == 0) 28 | _tcs.SetResult(true); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/Kafka.UTest.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Debug 7 | AnyCPU 8 | {316DEA8D-E7D4-48C3-BFD1-9186956FD43B} 9 | Library 10 | Properties 11 | tests_kafka_sharp 12 | Kafka.UTest 13 | v4.7.1 14 | 512 15 | 16 | 17 | 18 | true 19 | full 20 | false 21 | bin\Debug\ 22 | DEBUG;TRACE 23 | prompt 24 | 4 25 | 26 | 27 | pdbonly 28 | true 29 | bin\Release\ 30 | TRACE 31 | prompt 32 | 4 33 | 34 | 35 | 36 | ..\packages\Castle.Core.4.0.0\lib\net45\Castle.Core.dll 37 | True 38 | 39 | 40 | ..\packages\Moq.4.7.10\lib\net45\Moq.dll 41 | True 42 | 43 | 44 | ..\packages\NUnit.3.7.0\lib\net45\nunit.framework.dll 45 | True 46 | 47 | 48 | 49 | 50 | ..\packages\System.Reactive.Core.3.1.1\lib\net45\System.Reactive.Core.dll 51 | True 52 | 53 | 54 | ..\packages\System.Reactive.Interfaces.3.1.1\lib\net45\System.Reactive.Interfaces.dll 55 | True 56 | 57 | 58 | ..\packages\System.Reactive.Linq.3.1.1\lib\net45\System.Reactive.Linq.dll 59 | True 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | ..\packages\System.ValueTuple.4.4.0\lib\net47\System.ValueTuple.dll 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | {c2e6d32f-69d0-41b4-a1ea-9ade27a99b5e} 95 | kafka-sharp 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. 108 | 109 | 110 | 111 | 118 | 119 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.InteropServices; 3 | 4 | // General Information about an assembly is controlled through the following 5 | // set of attributes. Change these attribute values to modify the information 6 | // associated with an assembly. 7 | [assembly: AssemblyTitle("tests-kafka-sharp")] 8 | [assembly: AssemblyDescription("")] 9 | [assembly: AssemblyConfiguration("")] 10 | [assembly: AssemblyCompany("Criteo")] 11 | [assembly: AssemblyProduct("tests-kafka-sharp")] 12 | [assembly: AssemblyCopyright("Copyright © Criteo 2015")] 13 | [assembly: AssemblyTrademark("")] 14 | [assembly: AssemblyCulture("")] 15 | 16 | // Setting ComVisible to false makes the types in this assembly not visible 17 | // to COM components. If you need to access a type in this assembly from 18 | // COM, set the ComVisible attribute to true on that type. 19 | [assembly: ComVisible(false)] 20 | 21 | // The following GUID is for the ID of the typelib if this project is exposed to COM 22 | [assembly: Guid("275536a6-ff39-4726-aaa7-768351625897")] 23 | 24 | // Version information for an assembly consists of the following four values: 25 | // 26 | // Major Version 27 | // Minor Version 28 | // Build Number 29 | // Revision 30 | // 31 | // You can specify all the values or you can default the Build and Revision Numbers 32 | // by using the '*' as shown below: 33 | // [assembly: AssemblyVersion("1.0.*")] 34 | [assembly: AssemblyVersion("1.0.0.0")] 35 | [assembly: AssemblyFileVersion("1.0.0.0")] 36 | 37 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestCommon.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Reactive.Concurrency; 5 | using System.Text; 6 | using Kafka.Cluster; 7 | using Kafka.Common; 8 | using Kafka.Public; 9 | using NUnit.Framework; 10 | 11 | namespace tests_kafka_sharp 12 | { 13 | [TestFixture] 14 | class TestCommon 15 | { 16 | [Test] 17 | public void TestReusableMemoryStream() 18 | { 19 | var pool = new Pools(new Statistics(), logger: null); 20 | pool.InitRequestsBuffersPool(); 21 | using (var stream = pool.RequestsBuffersPool.Reserve()) 22 | { 23 | Assert.AreEqual(0, stream.Length); 24 | Assert.AreEqual(0, stream.Position); 25 | Assert.IsTrue(stream.CanWrite); 26 | Assert.IsTrue(stream.CanRead); 27 | 28 | var b = Encoding.UTF8.GetBytes("I see dead beef people"); 29 | stream.Write(b, 0, b.Length); 30 | } 31 | 32 | var s = pool.RequestsBuffersPool.Reserve(); 33 | s.Dispose(); 34 | Assert.AreSame(s, pool.RequestsBuffersPool.Reserve()); 35 | } 36 | 37 | class Item 38 | { 39 | public int Value; 40 | } 41 | 42 | [Test] 43 | public void TestPool() 44 | { 45 | int over = 0; 46 | var pool = new Pool(6, () => new Item(), (i, b) => 47 | { 48 | i.Value = 0; 49 | if (!b) ++over; 50 | }); 51 | 52 | var item = pool.Reserve(); 53 | Assert.IsNotNull(item); 54 | 55 | item.Value = 10; 56 | pool.Release(item); 57 | Assert.That(item.Value, Is.EqualTo(0)); 58 | Assert.That(pool.Watermark, Is.EqualTo(1)); 59 | 60 | var items = Enumerable.Range(0, 10).Select(i => pool.Reserve()).ToList(); 61 | foreach (var i in items) 62 | { 63 | pool.Release(i); 64 | } 65 | Assert.That(pool.Watermark, Is.EqualTo(6)); 66 | Assert.That(over, Is.EqualTo(4)); 67 | 68 | Assert.That(() => pool.Release(null), Throws.Nothing); 69 | Assert.That(() => new Pool(() => new Item(), null), Throws.InstanceOf()); 70 | Assert.That(() => new Pool(null, (i, _) => i.Value = 0), Throws.InstanceOf()); 71 | } 72 | 73 | [Test] 74 | public void TestTimestamp() 75 | { 76 | Assert.AreEqual(new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc), Timestamp.Epoch); 77 | 78 | var now = DateTime.UtcNow; 79 | var datenow = new DateTime(now.Year, now.Month, now.Day, now.Hour, now.Minute, now.Second, now.Millisecond, DateTimeKind.Utc); 80 | 81 | Assert.AreEqual(datenow, Timestamp.FromUnixTimestamp(Timestamp.ToUnixTimestamp(now))); 82 | } 83 | } 84 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestKafkaConsumer.cs: -------------------------------------------------------------------------------- 1 | namespace tests_kafka_sharp 2 | { 3 | using System.Collections.Generic; 4 | using System.Linq; 5 | using System.Reactive.Linq; 6 | 7 | using Kafka.Public; 8 | 9 | using Moq; 10 | 11 | using NUnit.Framework; 12 | 13 | public class TestKafkaConsumer 14 | { 15 | [Test] 16 | public void RaisesPartitionsAssignedEvent() 17 | { 18 | var clusterClientStub = CreateClusterClientStub(); 19 | 20 | using (var sut = new KafkaConsumer("ANYTOPIC", clusterClientStub.Object)) 21 | { 22 | var assignments = new Dictionary>(); 23 | 24 | var eventRisen = false; 25 | 26 | sut.PartitionsAssigned += x => eventRisen = true; 27 | 28 | clusterClientStub.Raise(x => x.PartitionsAssigned += null, assignments); 29 | 30 | Assert.That(eventRisen, Is.True); 31 | } 32 | } 33 | 34 | [Test] 35 | public void RaisesPartitionsRevokedEvent() 36 | { 37 | var clusterClientStub = CreateClusterClientStub(); 38 | using (var sut = new KafkaConsumer("ANYTOPIC", clusterClientStub.Object)) 39 | { 40 | var eventRisen = false; 41 | 42 | sut.PartitionsRevoked += () => eventRisen = true; 43 | 44 | clusterClientStub.Raise(x => x.PartitionsRevoked += null); 45 | 46 | Assert.That(eventRisen, Is.True); 47 | } 48 | } 49 | 50 | private Mock CreateClusterClientStub() 51 | { 52 | var stub = new Mock(); 53 | 54 | stub.SetupGet(x => x.Messages).Returns(Observable.Empty()); 55 | 56 | return stub; 57 | } 58 | } 59 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestMessageKeyPartitionSelection.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using Kafka.Cluster; 4 | using Kafka.Protocol; 5 | using Kafka.Public; 6 | using Kafka.Routing; 7 | using Kafka.Routing.PartitionSelection; 8 | using Moq; 9 | using NUnit.Framework; 10 | 11 | namespace tests_kafka_sharp 12 | { 13 | internal class TestMessageKeyPartitionSelection 14 | { 15 | private static readonly ISerializer Serializer = new StringSerializer(); 16 | private static readonly RoundRobinPartitionSelection RoundRobinPartitionSelection = new RoundRobinPartitionSelection(); 17 | 18 | [Test] 19 | public void Test_MessageKeyPartitionSelection_Is_Consistent() 20 | { 21 | var nodeMock = new NodeMock(); 22 | var partitions = new[] 23 | { 24 | new Partition {Id = 0, Leader = nodeMock}, 25 | new Partition {Id = 1, Leader = nodeMock}, 26 | new Partition {Id = 2, Leader = nodeMock}, 27 | }; 28 | var partitionStrategy = new MessageKeyPartitionSelection(Serializer, RoundRobinPartitionSelection, Mock.Of()); 29 | var partitioner = new PartitionSelector(partitionStrategy); 30 | var message1 = ProduceMessage.New(string.Empty, Partitions.Any, new Message { Key = "ThisIsMyKey" }, new DateTime()); 31 | var message2 = ProduceMessage.New(string.Empty, Partitions.Any, new Message { Key = "ThisIsMyOtherKey" }, new DateTime()); 32 | 33 | var expectedPartition1 = partitioner.GetPartition(message1, partitions); 34 | var expectedPartition2 = partitioner.GetPartition(message2, partitions); 35 | for (var i = 0; i < 300; i++) 36 | { 37 | var currentPartition1 = partitioner.GetPartition(message1, partitions); 38 | var currentPartition2 = partitioner.GetPartition(message2, partitions); 39 | Assert.AreEqual(expectedPartition1.Id, currentPartition1.Id); 40 | Assert.AreEqual(expectedPartition2.Id, currentPartition2.Id); 41 | } 42 | } 43 | 44 | [Test] 45 | public void Test_MessageKeyPartitionSelection_Fallbacks_To_RoundRobin_If_MessageKey_Null() 46 | { 47 | var nodeMock = new NodeMock(); 48 | var partitions = new[] 49 | { 50 | new Partition {Id = 0, Leader = nodeMock}, 51 | new Partition {Id = 1, Leader = nodeMock}, 52 | new Partition {Id = 2, Leader = nodeMock}, 53 | }; 54 | var partitionStrategy = new MessageKeyPartitionSelection(Serializer, RoundRobinPartitionSelection, Mock.Of()); 55 | var partitioner = new PartitionSelector(partitionStrategy); 56 | var message = ProduceMessage.New(string.Empty, Partitions.Any, new Message { Key = null }, new DateTime()); 57 | 58 | var partition = partitioner.GetPartition(message, partitions); 59 | Assert.IsTrue(partition.Id != Partition.None.Id); 60 | } 61 | 62 | [TestCase(0)] 63 | [TestCase(1)] 64 | [TestCase(2)] 65 | public void Test_MessageKeyPartitionSelection_Fallbacks_To_RoundRobin_If_Partition_Blacklisted(int partitionIdBlacklisted) 66 | { 67 | var nodeMock = new NodeMock(); 68 | var partitions = new[] 69 | { 70 | new Partition {Id = 0, Leader = nodeMock}, 71 | new Partition {Id = 1, Leader = nodeMock}, 72 | new Partition {Id = 2, Leader = nodeMock}, 73 | }; 74 | var blacklistedPartitions = new Dictionary { { partitionIdBlacklisted, DateTime.MaxValue } }; 75 | var partitionStrategy = new MessageKeyPartitionSelection(Serializer, RoundRobinPartitionSelection, Mock.Of()); 76 | var partitioner = new PartitionSelector(partitionStrategy); 77 | var message = ProduceMessage.New(string.Empty, Partitions.Any, new Message { Key = "ThisIsMyKey" }, new DateTime()); 78 | 79 | for (var i = 0; i < 300; i++) 80 | { 81 | var partition = partitioner.GetPartition(message, partitions, blacklistedPartitions); 82 | Assert.IsTrue(partition.Id != Partition.None.Id); 83 | Assert.IsTrue(partition.Id != partitionIdBlacklisted); 84 | } 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestPartitioner.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using Kafka.Cluster; 5 | using Kafka.Protocol; 6 | using Kafka.Public; 7 | using Kafka.Routing; 8 | using Kafka.Routing.PartitionSelection; 9 | using NUnit.Framework; 10 | 11 | namespace tests_kafka_sharp 12 | { 13 | [TestFixture] 14 | class TestPartitioner 15 | { 16 | [Test] 17 | [TestCase(1)] 18 | [TestCase(2)] 19 | [TestCase(0)] 20 | [TestCase(-1)] 21 | [TestCase(42)] 22 | public void TestRoundRobinPartitionAssign(int delay) 23 | { 24 | var nodeMock = new NodeMock(); 25 | var partitions = new[] 26 | { 27 | new Partition {Id = 0, Leader = nodeMock}, 28 | new Partition {Id = 1, Leader = nodeMock}, 29 | new Partition {Id = 2, Leader = nodeMock}, 30 | new Partition {Id = 3, Leader = nodeMock}, 31 | new Partition {Id = 4, Leader = nodeMock}, 32 | }; 33 | var partitionStrategy = new RoundRobinPartitionSelection(delay); 34 | var partitioner = new PartitionSelector(partitionStrategy); 35 | delay = delay <= 0 ? 1 : delay; 36 | foreach (var partition in partitions) 37 | { 38 | for (var j = 0; j < delay; ++j) 39 | { 40 | Assert.AreEqual(partition.Id, partitioner 41 | .GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions) 42 | .Id); 43 | } 44 | } 45 | } 46 | 47 | [Test] 48 | public void TestRoundRobinPartitionAssignNoPartitionReturnsNone() 49 | { 50 | var partitions = new Partition[0]; 51 | var partitionStrategy = new RoundRobinPartitionSelection(); 52 | var partitioner = new PartitionSelector(partitionStrategy); 53 | Assert.AreEqual(0, Partition.None.CompareTo(partitioner.GetPartition( 54 | ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions))); 55 | } 56 | 57 | [Test] 58 | public void TestFilter() 59 | { 60 | var nodeMock = new NodeMock(); 61 | var partitions = new[] 62 | { 63 | new Partition {Id = 0, Leader = nodeMock}, 64 | new Partition {Id = 1, Leader = nodeMock}, 65 | new Partition {Id = 2, Leader = nodeMock}, 66 | new Partition {Id = 3, Leader = nodeMock}, 67 | new Partition {Id = 4, Leader = nodeMock}, 68 | }; 69 | var filter = new Dictionary(); 70 | filter[0] = DateTime.UtcNow; 71 | filter[2] = DateTime.UtcNow; 72 | filter[4] = DateTime.UtcNow; 73 | var partitionStrategy = new RoundRobinPartitionSelection(); 74 | var partitioner = new PartitionSelector(partitionStrategy); 75 | 76 | var partition = partitioner.GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()),partitions, filter); 77 | Assert.AreEqual(1, partition.Id); 78 | 79 | partition = partitioner.GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions, filter); 80 | Assert.AreEqual(3, partition.Id); 81 | 82 | partition = partitioner.GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions, filter); 83 | Assert.AreEqual(1, partition.Id); 84 | } 85 | 86 | /// 87 | /// Make sure the round-robin threshold is reset when a partition is blacklisted half-way 88 | /// 89 | [Test] 90 | public void TestRobinPartitionAssignWhenFiltered() 91 | { 92 | var nodeMock = new NodeMock(); 93 | 94 | var partitions = new[] 95 | { 96 | new Partition { Id = 1, Leader = nodeMock }, 97 | new Partition { Id = 2, Leader = nodeMock }, 98 | new Partition { Id = 3, Leader = nodeMock }, 99 | }; 100 | 101 | var filter = new Dictionary(); 102 | 103 | int delay = partitions.Length + 2; 104 | 105 | var partitionStrategy = new RoundRobinPartitionSelection(delay); 106 | var partitioner = new PartitionSelector(partitionStrategy); 107 | 108 | var partition = partitioner.GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions, filter); 109 | 110 | Assert.AreEqual(1, partition.Id); 111 | 112 | filter.Add(1, DateTime.UtcNow); 113 | 114 | var batch = GetPartitions(delay, partitioner, partitions, filter); 115 | 116 | Assert.AreEqual(delay, batch.Count); 117 | Assert.IsTrue(batch.All(p => p.Id == 2), "The round-robin threshold wasn't properly reset after previous partition was blacklisted"); 118 | } 119 | 120 | /// 121 | /// Regression test: in some cases, the PartitionSelector could fail to find an available partition 122 | /// if current partition was blacklisted and delay was greater than the number of partition 123 | /// 124 | [Test] 125 | public void TestFilterWithHighDelay() 126 | { 127 | var nodeMock = new NodeMock(); 128 | 129 | var partitions = new[] 130 | { 131 | new Partition { Id = 1, Leader = nodeMock }, 132 | new Partition { Id = 2, Leader = nodeMock }, 133 | new Partition { Id = 3, Leader = nodeMock } 134 | }; 135 | 136 | var filter = new Dictionary { { 2, DateTime.UtcNow } }; 137 | 138 | // Pick a delay greater than the number of partitions 139 | int delay = partitions.Length + 2; 140 | 141 | var partitionStrategy = new RoundRobinPartitionSelection(delay); 142 | var partitioner = new PartitionSelector(partitionStrategy); 143 | 144 | var firstBatch = GetPartitions(delay, partitioner, partitions, filter); 145 | 146 | Assert.AreEqual(delay, firstBatch.Count); 147 | Assert.IsTrue(firstBatch.All(p => p.Id == 1)); 148 | 149 | var secondBatch = GetPartitions(delay, partitioner, partitions, filter); 150 | 151 | Assert.AreEqual(delay, secondBatch.Count); 152 | Assert.IsTrue(secondBatch.All(p => p.Id == 3)); 153 | } 154 | 155 | /// 156 | /// Test what happens when startSeed is high enough that _next will get bigger than int.MaxValue 157 | /// This is purely meant as regression testing, as this condition caused issues in previous versions of the code 158 | /// 159 | [Test] 160 | public void TestOverflow() 161 | { 162 | var nodeMock = new NodeMock(); 163 | 164 | var partitions = Enumerable.Range(0, 10).Select(i => new Partition { Id = i, Leader = nodeMock }).ToArray(); 165 | 166 | var partitionStrategy = new RoundRobinPartitionSelection(delay: 1, startSeed: int.MaxValue); 167 | var partitioner = new PartitionSelector(partitionStrategy); 168 | 169 | var batch = GetPartitions(partitions.Length, partitioner, partitions, null); 170 | 171 | var ids = batch.Select(p => p.Id).ToArray(); 172 | 173 | var expectedIds = new[] { 7, 8, 9, 0, 1, 2, 3, 4, 5, 6 }; 174 | 175 | Assert.IsTrue(expectedIds.SequenceEqual(ids)); 176 | } 177 | 178 | [Test] 179 | [TestCase(0, 1)] 180 | [TestCase(1, 1)] 181 | [TestCase(1, 5)] 182 | [TestCase(42, 1)] 183 | [TestCase(42, 2)] 184 | public void TestRoundRobinPartitionWithStartSeed(int startSeed, int delay) 185 | { 186 | var nodeMock = new NodeMock(); 187 | var partitions = new[] 188 | { 189 | new Partition {Id = 0, Leader = nodeMock}, 190 | new Partition {Id = 1, Leader = nodeMock}, 191 | new Partition {Id = 2, Leader = nodeMock}, 192 | new Partition {Id = 3, Leader = nodeMock}, 193 | new Partition {Id = 4, Leader = nodeMock}, 194 | }; 195 | var partitionStrategy = new RoundRobinPartitionSelection(delay: delay, startSeed: startSeed); 196 | var partitioner = new PartitionSelector(partitionStrategy); 197 | foreach (var partition in partitions) 198 | { 199 | for (var j = 0; j < delay; ++j) 200 | { 201 | Assert.AreEqual((partition.Id + startSeed) % partitions.Length, partitioner.GetPartition( 202 | ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions).Id); 203 | } 204 | } 205 | } 206 | 207 | private static List GetPartitions(int count, PartitionSelector partitioner, Partition[] partitions, Dictionary filter) 208 | { 209 | var result = new List(count); 210 | 211 | for (int i = 0; i < count; i++) 212 | { 213 | result.Add(partitioner.GetPartition(ProduceMessage.New(string.Empty, Partitions.Any, new Message(), new DateTime()), partitions, filter)); 214 | } 215 | 216 | return result; 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestRoutingTable.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using Kafka.Routing; 3 | using NUnit.Framework; 4 | 5 | namespace tests_kafka_sharp 6 | { 7 | [TestFixture] 8 | class TestRoutingTable 9 | { 10 | [Test] 11 | public void TestRoutingTableReturnsPartitions() 12 | { 13 | var node = new NodeMock(); 14 | var routes = new Dictionary 15 | { 16 | {"test1p", new[] {new Partition {Id = 0, Leader = node}}}, 17 | }; 18 | var routingTable = new RoutingTable(routes); 19 | 20 | var partitions = routingTable.GetPartitions("test1p"); 21 | Assert.AreEqual(1, partitions.Length); 22 | Assert.AreEqual(0, partitions[0].Id); 23 | Assert.AreSame(node, partitions[0].Leader); 24 | } 25 | 26 | [Test] 27 | public void TestGetNodeForPartition() 28 | { 29 | var node = new NodeMock(); 30 | var node2 = new NodeMock(); 31 | var node3 = new NodeMock(); 32 | var routes = new Dictionary 33 | { 34 | {"test2p", new[] {new Partition {Id = 1, Leader = node}, new Partition {Id = 2, Leader = node2}, new Partition {Id = 3, Leader = node3}}}, 35 | }; 36 | var rt = new RoutingTable(routes); 37 | Assert.AreSame(node, rt.GetLeaderForPartition("test2p", 1)); 38 | Assert.AreSame(node2, rt.GetLeaderForPartition("test2p", 2)); 39 | Assert.AreSame(node3, rt.GetLeaderForPartition("test2p", 3)); 40 | Assert.IsNull(rt.GetLeaderForPartition("test2p", 8)); 41 | Assert.IsNull(rt.GetLeaderForPartition("test2poulpe", 8423)); 42 | } 43 | 44 | [Test] 45 | public void TestRoutingTableReturnsEmptyForAbsentTopic() 46 | { 47 | var node = new NodeMock(); 48 | var routes = new Dictionary 49 | { 50 | {"test1p", new[] {new Partition {Id = 0, Leader = node}}}, 51 | }; 52 | var routingTable = new RoutingTable(routes); 53 | 54 | Assert.Less(0, routingTable.GetPartitions("test1p").Length); 55 | Assert.AreEqual(0, routingTable.GetPartitions("tortemoque").Length); 56 | } 57 | 58 | [Test] 59 | public void TestSignalDeadNode() 60 | { 61 | var node = new NodeMock("n1"); 62 | var routes = new Dictionary 63 | { 64 | {"test1p", new[] {new Partition {Id = 0, Leader = node}}}, 65 | {"test2p", new[] {new Partition {Id = 1, Leader = new NodeMock("n2")}, new Partition {Id = 2, Leader = node}, new Partition {Id = 3, Leader = new NodeMock("n3")}}}, 66 | }; 67 | var routingTable = new RoutingTable(routes); 68 | 69 | Assert.AreEqual(1, routingTable.GetPartitions("test1p").Length); 70 | Assert.AreEqual(3, routingTable.GetPartitions("test2p").Length); 71 | 72 | routingTable = new RoutingTable(routingTable, node); 73 | 74 | Assert.AreEqual(0, routingTable.GetPartitions("test1p").Length); 75 | Assert.AreEqual(2, routingTable.GetPartitions("test2p").Length); 76 | } 77 | 78 | [Test] 79 | public void TestFilterMinInSync() 80 | { 81 | var node = new NodeMock(); 82 | var routes = new Dictionary 83 | { 84 | {"test1p", new[] {new Partition {Id = 0, Leader = node}}}, 85 | {"test2p", new[] {new Partition {Id = 1, Leader = new NodeMock(), NbIsr = 1}, new Partition {Id = 2, Leader = node}, new Partition {Id = 3, Leader = new NodeMock()}}}, 86 | }; 87 | var routingTable = new RoutingTable(routes); 88 | 89 | Assert.AreEqual(1, routingTable.GetPartitions("test1p").Length); 90 | Assert.AreEqual(3, routingTable.GetPartitions("test2p").Length); 91 | 92 | routingTable = new RoutingTable(routingTable, 1); 93 | 94 | Assert.AreEqual(0, routingTable.GetPartitions("test1p").Length); 95 | Assert.AreEqual(1, routingTable.GetPartitions("test2p").Length); 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/TestVarIntConverter.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using System.IO; 3 | using NUnit.Framework; 4 | using Kafka.Common; 5 | using System; 6 | 7 | namespace tests_kafka_sharp 8 | { 9 | [TestFixture] 10 | internal class TestVarIntConverter 11 | { 12 | private static readonly IDictionary VarIntNumbers = new Dictionary 13 | { 14 | [0L] = new byte[] { 0x00 }, 15 | [1L] = new byte[] { 0x02 }, 16 | [-1L] = new byte[] { 0x01 }, 17 | [-64] = new byte[] { 0x7f }, 18 | [0x7f] = new byte[] {0xfe, 0x01 }, 19 | [0xff] = new byte[] {0xfe, 0x03 }, 20 | [short.MinValue] = new byte[] { 0xff, 0xff, 0x03 }, 21 | [short.MaxValue] = new byte[] { 0xfe, 0xff, 0x03 }, 22 | [short.MaxValue + 1L] = new byte[] { 0x80, 0x80, 0x04 }, 23 | [int.MinValue] = new byte[] { 0xff, 0xff, 0xff, 0xff, 0x0f }, 24 | [int.MaxValue] = new byte[] { 0xfe, 0xff, 0xff, 0xff, 0x0f }, 25 | [int.MaxValue + 1L] = new byte[] { 0x80, 0x80, 0x80, 0x80, 0x10 }, 26 | [long.MinValue] = new byte[] { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01 }, 27 | [long.MaxValue] = new byte[] { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01 } 28 | }; 29 | 30 | [Test] 31 | public void TestSizeOfVarInt() 32 | { 33 | var buffer = new byte[255]; 34 | var previousPosition = 0L; 35 | using (var stream = new MemoryStream(buffer)) 36 | { 37 | foreach (var value in VarIntNumbers.Keys) 38 | { 39 | VarIntConverter.Write(stream, value); 40 | Assert.AreEqual(stream.Position - previousPosition, VarIntConverter.SizeOfVarInt(value)); 41 | previousPosition = stream.Position; 42 | } 43 | } 44 | } 45 | 46 | [Test] 47 | [TestCase(new byte[] { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 })] 48 | [TestCase(new byte[] { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02 })] 49 | public void TestReadInt64Overflows(byte[] value) 50 | { 51 | Assert.Throws(() => VarIntConverter.ReadAsInt64(new MemoryStream(value))); 52 | } 53 | 54 | [Test] 55 | public void TestReadInt64() 56 | { 57 | RunReadIntTestFor(VarIntConverter.ReadAsInt64, long.MinValue, long.MaxValue); 58 | } 59 | 60 | [Test] 61 | public void TestReadInt32() 62 | { 63 | RunReadIntTestFor(VarIntConverter.ReadAsInt32, int.MinValue, int.MaxValue); 64 | } 65 | 66 | [Test] 67 | public void TestReadInt16() 68 | { 69 | RunReadIntTestFor(VarIntConverter.ReadAsInt16, short.MinValue, short.MaxValue); 70 | } 71 | 72 | [Test] 73 | public void TestReadByte() 74 | { 75 | RunReadIntTestFor(VarIntConverter.ReadAsByte, byte.MinValue, byte.MaxValue, true); 76 | } 77 | 78 | [Test] 79 | public void TestReadBool() 80 | { 81 | Assert.IsFalse(VarIntConverter.ReadAsBool(new MemoryStream(new byte[] { 0x00 }))); 82 | 83 | Assert.IsTrue(VarIntConverter.ReadAsBool(new MemoryStream(new byte[] { 0x01 }))); 84 | Assert.IsTrue(VarIntConverter.ReadAsBool(new MemoryStream(new byte[] { 0x02 }))); 85 | Assert.IsTrue(VarIntConverter.ReadAsBool(new MemoryStream(new byte[] { 0x80, 0x01 }))); 86 | } 87 | 88 | private void RunReadIntTestFor(Func readFunc, TInteger minValue, 89 | TInteger maxValue) where TInteger : IComparable => 90 | RunReadIntTestFor(readFunc, minValue, maxValue, true); 91 | 92 | private void RunReadIntTestFor(Func readFunc, TInteger minValue, TInteger maxValue, bool unsigned) 93 | where TInteger: IComparable 94 | { 95 | foreach (KeyValuePair entry in VarIntNumbers) 96 | { 97 | if (unsigned && entry.Key < 0) 98 | { 99 | continue; 100 | } 101 | 102 | // If the value is out of the range of the tested type, an OverflowException must be thrown. 103 | if (Convert.ToInt64(minValue).CompareTo(entry.Key) <= 0 && 104 | entry.Key.CompareTo(Convert.ToInt64(maxValue)) <= 0) 105 | { 106 | Assert.AreEqual(entry.Key, readFunc(new MemoryStream(entry.Value))); 107 | } 108 | else 109 | { 110 | Assert.Throws(() => readFunc(new MemoryStream(entry.Value))); 111 | } 112 | } 113 | } 114 | 115 | [Test] 116 | public void TestWriteInt64() 117 | { 118 | foreach (KeyValuePair entry in VarIntNumbers) 119 | { 120 | // 64 bits fit in ceil(64/7) = 10 bytes 121 | var actual = new byte[10]; 122 | VarIntConverter.Write(new MemoryStream(actual), entry.Key); 123 | AssertVarIntAreEqual(entry.Value, actual); 124 | } 125 | } 126 | 127 | [Test] 128 | public void TestWriteInt32() 129 | { 130 | foreach (KeyValuePair entry in VarIntNumbers) 131 | { 132 | if (entry.Key < int.MinValue || entry.Key > int.MaxValue) 133 | { 134 | continue; 135 | } 136 | 137 | var actual = new byte[5]; 138 | VarIntConverter.Write(new MemoryStream(actual), (int)entry.Key); 139 | AssertVarIntAreEqual(entry.Value, actual); 140 | } 141 | } 142 | 143 | [Test] 144 | public void TestWriteInt16() 145 | { 146 | foreach (KeyValuePair entry in VarIntNumbers) 147 | { 148 | if (entry.Key < short.MinValue || entry.Key > short.MaxValue) 149 | { 150 | continue; 151 | } 152 | 153 | var actual = new byte[3]; 154 | VarIntConverter.Write(new MemoryStream(actual), (short)entry.Key); 155 | AssertVarIntAreEqual(entry.Value, actual); 156 | } 157 | } 158 | 159 | [Test] 160 | public void TestWrite() 161 | { 162 | foreach (KeyValuePair entry in VarIntNumbers) 163 | { 164 | if (entry.Key < byte.MinValue || entry.Key > byte.MaxValue) 165 | { 166 | continue; 167 | } 168 | 169 | var actual = new byte[2]; 170 | VarIntConverter.Write(new MemoryStream(actual), (byte)entry.Key); 171 | AssertVarIntAreEqual(entry.Value, actual); 172 | } 173 | } 174 | 175 | [Test] 176 | public void TestWriteBool() 177 | { 178 | var actual = new byte[4]; 179 | VarIntConverter.Write(new MemoryStream(actual), false); 180 | AssertVarIntAreEqual(new byte[] { 0x00 }, actual); 181 | 182 | VarIntConverter.Write(new MemoryStream(actual), true); 183 | AssertVarIntAreEqual(new byte[] { 0x02 }, actual); /* zigzag(1) = 0x02 */ 184 | } 185 | 186 | [Test] 187 | public void TestAssertVarIntEqual() 188 | { 189 | // This test ensures that our assertVarIntEqual is working as expected. 190 | AssertVarIntAreEqual(new byte[0], new byte[0]); 191 | AssertVarIntAreEqual(new byte[] {0x01}, new byte[] {0x01}); 192 | AssertVarIntAreEqual(new byte[] { 0x81, 0x01 }, new byte[] { 0x81, 0x01 }); 193 | AssertVarIntAreEqual(new byte[] { 0x01 }, new byte[] { 0x01, 0x00 }); 194 | AssertVarIntAreEqual(new byte[] { 0x01, 0xAF }, new byte[] { 0x01 }); 195 | 196 | Assert.Throws(() => AssertVarIntAreEqual( 197 | new byte[] {0x01}, new byte[] {0x02})); 198 | Assert.Throws(() => AssertVarIntAreEqual( 199 | new byte[] { 0x81, 0x03 }, new byte[] { 0x81, 0x01 })); 200 | Assert.Throws(() => AssertVarIntAreEqual( 201 | new byte[] { 0x81, 0x01 }, new byte[] { 0x81, 0x81 })); 202 | Assert.Throws(() => AssertVarIntAreEqual( 203 | new byte[0], new byte[] { 0x02 })); 204 | 205 | } 206 | /// 207 | /// Compare two VarInt values in the given buffers. The buffers can be larger than the VarInt 208 | /// (ie: trailing bytes in both arrays are allowed). 209 | /// 210 | /// Throws a NUnit.Framework.AssertionException if the values don't match. 211 | /// 212 | /// Expected value 213 | /// Actual value 214 | public void AssertVarIntAreEqual(byte[] expected, byte[] actual) 215 | { 216 | if (expected.Length == 0 && actual.Length == 0) 217 | { 218 | return; 219 | } 220 | 221 | var length = Math.Min(expected.Length, actual.Length); 222 | var areEqual = false; 223 | var lastByteFound = false; 224 | int i; 225 | 226 | for(i = 0; i < length; ++i) 227 | { 228 | lastByteFound = (expected[i] & 0x80) == 0; 229 | areEqual = expected[i] == actual[i]; 230 | 231 | if(!areEqual || lastByteFound) 232 | { 233 | break; 234 | } 235 | 236 | } 237 | 238 | if(!areEqual || !lastByteFound) 239 | { 240 | throw new AssertionException( 241 | $"VarInt are not equal at byte {i}, " + 242 | $"expected: [{string.Join(", ", expected)}], " + 243 | $"actual: [{string.Join(", ", actual)}]"); 244 | } 245 | } 246 | } 247 | } 248 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/kafka.UTest.netstandard.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | netcoreapp1.1 5 | kafka-sharp.UTest 6 | kafka-sharp.UTest 7 | true 8 | false 9 | false 10 | false 11 | false 12 | false 13 | false 14 | false 15 | false 16 | exe 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | $(DefineConstants);NET_CORE 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp.UTest/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Batching/BatchStrategy.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using Kafka.Cluster; 6 | 7 | namespace Kafka.Batching 8 | { 9 | interface IBatchStrategy : IDisposable 10 | { 11 | bool Send(INode node, TMessage message); 12 | } 13 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Batching/Grouping.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections; 6 | using System.Collections.Generic; 7 | using System.Linq; 8 | using Kafka.Common; 9 | 10 | namespace Kafka.Batching 11 | { 12 | class Grouping : IGrouping, IDisposable 13 | { 14 | private static readonly Pool> _pool = 15 | new Pool>( 16 | 16384, // Hard coded security to avoid memory explosion if you're doing someting wrong 17 | () => new Grouping(), 18 | (g, _) => g.Clear()); 19 | 20 | private readonly List _data = new List(); 21 | 22 | public void Clear() 23 | { 24 | Key = default(TKey); 25 | _data.Clear(); 26 | } 27 | 28 | public static Grouping New(TKey key) 29 | { 30 | var g = _pool.Reserve(); 31 | g.Key = key; 32 | return g; 33 | } 34 | 35 | public void Add(TData data) 36 | { 37 | _data.Add(data); 38 | } 39 | 40 | private Grouping() 41 | { 42 | } 43 | 44 | #region IGrouping Members 45 | 46 | public TKey Key { get; private set; } 47 | 48 | #endregion 49 | 50 | #region IEnumerable Members 51 | 52 | public System.Collections.Generic.IEnumerator GetEnumerator() 53 | { 54 | return _data.GetEnumerator(); 55 | } 56 | 57 | #endregion 58 | 59 | #region IEnumerable Members 60 | 61 | System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() 62 | { 63 | return _data.GetEnumerator(); 64 | } 65 | 66 | #endregion 67 | 68 | #region IDisposable Members 69 | 70 | public void Dispose() 71 | { 72 | _pool.Release(this); 73 | } 74 | 75 | #endregion 76 | } 77 | 78 | class Grouping : IGrouping>, IDisposable 79 | { 80 | private static readonly Pool> _pool = new Pool>( 81 | 16384, // Hard coded security to avoid memory explosion if you're doing someting wrong 82 | () => new Grouping(), 83 | (g, _) => g.Clear()); 84 | 85 | private readonly Dictionary> _key2Groupings = new Dictionary>(); 86 | 87 | public static Grouping New(TKey1 key1) 88 | { 89 | var g = _pool.Reserve(); 90 | g.Key = key1; 91 | return g; 92 | } 93 | 94 | private void Clear() 95 | { 96 | Key = default(TKey1); 97 | foreach (var g in _key2Groupings.Values) 98 | { 99 | g.Dispose(); 100 | } 101 | _key2Groupings.Clear(); 102 | } 103 | 104 | public void Add(TKey2 key2, TData data) 105 | { 106 | Grouping key2Grouping; 107 | if (!_key2Groupings.TryGetValue(key2, out key2Grouping)) 108 | { 109 | key2Grouping = Grouping.New(key2); 110 | _key2Groupings[key2] = key2Grouping; 111 | } 112 | key2Grouping.Add(data); 113 | } 114 | 115 | #region IGrouping> Members 116 | 117 | public TKey1 Key { get; private set; } 118 | 119 | #endregion 120 | 121 | #region IEnumerable> Members 122 | 123 | public IEnumerator> GetEnumerator() 124 | { 125 | return _key2Groupings.Values.GetEnumerator(); 126 | } 127 | 128 | #endregion 129 | 130 | #region IEnumerable Members 131 | 132 | IEnumerator IEnumerable.GetEnumerator() 133 | { 134 | return _key2Groupings.Values.GetEnumerator(); 135 | } 136 | 137 | #endregion 138 | 139 | #region IDisposable Members 140 | 141 | public void Dispose() 142 | { 143 | _pool.Release(this); 144 | } 145 | 146 | #endregion 147 | } 148 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Cluster/Pools.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | using Kafka.Public; 6 | 7 | namespace Kafka.Cluster 8 | { 9 | /// 10 | /// A utility class used to configure and manage internally used object pools. 11 | /// 12 | class Pools 13 | { 14 | private readonly IStatistics _stats; 15 | private readonly ILogger _logger; 16 | 17 | public Pools(IStatistics stats, ILogger logger) 18 | { 19 | _stats = stats; 20 | _logger = logger; 21 | } 22 | 23 | public Pool SocketBuffersPool { get; private set; } 24 | 25 | public void InitSocketBuffersPool(int buffersLength) 26 | { 27 | SocketBuffersPool = new Pool( 28 | () => 29 | { 30 | _stats.UpdateSocketBuffers(1); 31 | return new byte[buffersLength]; 32 | }, (s, b) => { }); 33 | } 34 | 35 | public Pool MessageBuffersPool { get; private set; } 36 | 37 | public void InitMessageBuffersPool(int limit, int maxChunkSize) 38 | { 39 | MessageBuffersPool = new Pool( 40 | limit, 41 | () => 42 | { 43 | _stats.UpdateMessageBuffers(1); 44 | return new ReusableMemoryStream(MessageBuffersPool, _logger); 45 | }, 46 | (b, reused) => 47 | { 48 | if (!reused) 49 | { 50 | _stats.UpdateMessageBuffers(-1); 51 | } 52 | else 53 | { 54 | b.SetLength(0); 55 | if (b.Capacity > maxChunkSize) 56 | { 57 | b.Capacity = maxChunkSize; 58 | } 59 | } 60 | }); 61 | } 62 | 63 | public Pool RequestsBuffersPool { get; private set; } 64 | 65 | public void InitRequestsBuffersPool() 66 | { 67 | RequestsBuffersPool = new Pool( 68 | () => 69 | { 70 | _stats.UpdateRequestsBuffers(1); 71 | return new ReusableMemoryStream(RequestsBuffersPool, _logger); 72 | }, 73 | (b, reused) => 74 | { 75 | if (!reused) 76 | { 77 | _stats.UpdateRequestsBuffers(-1); 78 | } 79 | else 80 | { 81 | b.SetLength(0); 82 | } 83 | }); 84 | } 85 | } 86 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Cluster/ProduceMessage.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using Kafka.Protocol; 6 | 7 | namespace Kafka.Cluster 8 | { 9 | /// 10 | /// Encapsulation of a message sent to Kafka brokers. 11 | /// 12 | struct ProduceMessage 13 | { 14 | public string Topic; 15 | public Message Message; 16 | public DateTime ExpirationDate; 17 | public int RequiredPartition; 18 | public int Partition; 19 | public int Retried; 20 | 21 | public static ProduceMessage New(string topic, int partition, Message message, DateTime expirationDate) 22 | { 23 | return new ProduceMessage 24 | { 25 | Topic = topic, 26 | Partition = partition, 27 | RequiredPartition = partition, 28 | Message = message, 29 | ExpirationDate = expirationDate 30 | }; 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Cluster/TimeoutScheduler.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using System.Threading; 7 | 8 | namespace Kafka.Cluster 9 | { 10 | /// 11 | /// Handle timeout checks for nodes. 12 | /// 13 | class TimeoutScheduler : IDisposable 14 | { 15 | private readonly Dictionary _checkers = new Dictionary(); 16 | private readonly Timer _timer; 17 | 18 | // Scheduler that does nothing, useful for tests 19 | public TimeoutScheduler() 20 | { 21 | } 22 | 23 | /// 24 | /// Scheduler that checks timeout every periodMs milliseconds. 25 | /// 26 | /// 27 | public TimeoutScheduler(int periodMs) 28 | { 29 | _timer = new Timer(_ => Check(), null, periodMs, periodMs); 30 | } 31 | 32 | public void Register(INode node, Action checker) 33 | { 34 | lock (_checkers) 35 | { 36 | _checkers.Add(node, checker); 37 | } 38 | } 39 | 40 | public void Unregister(INode node) 41 | { 42 | lock (_checkers) 43 | { 44 | _checkers.Remove(node); 45 | } 46 | } 47 | 48 | public void Check() 49 | { 50 | lock (_checkers) 51 | { 52 | foreach (var checker in _checkers.Values) 53 | { 54 | checker(); 55 | } 56 | } 57 | } 58 | 59 | public void Dispose() 60 | { 61 | if (_timer != null) 62 | { 63 | _timer.Dispose(); 64 | } 65 | } 66 | } 67 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/ActionBlockTaskScheduler.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using System.Linq; 7 | using System.Threading; 8 | using System.Threading.Tasks; 9 | using System.Threading.Tasks.Dataflow; 10 | 11 | namespace Kafka.Common 12 | { 13 | /// 14 | /// A TaskScheduler that use an ActionBlock to dispatch tasks. This is a simple way 15 | /// to quickly set up a TaskScheduler with limited concurrency while still using 16 | /// .NET Threadpool threads. 17 | /// 18 | public class ActionBlockTaskScheduler : TaskScheduler 19 | { 20 | private readonly int _dop; 21 | private readonly ActionBlock _pool; 22 | #if DEBUG 23 | private readonly HashSet _tasks = new HashSet(); 24 | #endif 25 | 26 | public ActionBlockTaskScheduler(int dop) 27 | { 28 | _pool = new ActionBlock(a => a(), new ExecutionDataflowBlockOptions {MaxDegreeOfParallelism = dop}); 29 | _dop = dop; 30 | } 31 | 32 | protected override IEnumerable GetScheduledTasks() 33 | { 34 | #if DEBUG 35 | bool lockTaken = false; 36 | try 37 | { 38 | Monitor.TryEnter(_tasks, ref lockTaken); 39 | return _tasks.ToArray(); 40 | } 41 | finally 42 | { 43 | if (lockTaken) 44 | Monitor.Exit(_tasks); 45 | } 46 | #else 47 | yield break; 48 | #endif 49 | } 50 | 51 | protected override void QueueTask(Task task) 52 | { 53 | #if DEBUG 54 | lock (_tasks) 55 | { 56 | _tasks.Add(task); 57 | } 58 | _pool.Post(() => 59 | { 60 | lock (_tasks) 61 | { 62 | _tasks.Remove(task); 63 | } 64 | TryExecuteTask(task); 65 | }); 66 | #else 67 | _pool.Post(() => TryExecuteTask(task)); 68 | #endif 69 | } 70 | 71 | protected override bool TryExecuteTaskInline(Task task, bool taskWasPreviouslyQueued) 72 | { 73 | return !taskWasPreviouslyQueued && TryExecuteTask(task); 74 | } 75 | 76 | public override int MaximumConcurrencyLevel 77 | { 78 | get { return _dop; } 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/BigEndianConverter.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.IO; 6 | 7 | namespace Kafka.Common 8 | { 9 | /// 10 | /// 11 | /// Originally comes from kafka4net project: https://github.com/ntent-ad/kafka4net 12 | /// (https://github.com/ntent-ad/kafka4net/blob/master/src/Utils/BigEndianConverter.cs) 13 | /// 14 | /// License was Apache 2: http://www.apache.org/licenses/LICENSE-2.0 15 | /// 16 | /// 17 | internal static class BigEndianConverter 18 | { 19 | public static int ReadInt32(MemoryStream s) 20 | { 21 | if (s.Position + 4 > s.Length) 22 | throw new Exception(string.Format("ReadInt32 needs 4 bytes but got only {0}", s.Length - s.Position)); 23 | return s.ReadByte() << 3*8 | s.ReadByte() << 2*8 | s.ReadByte() << 8 | s.ReadByte(); 24 | } 25 | 26 | public static short ReadInt16(MemoryStream s) 27 | { 28 | if (s.Position + 2 > s.Length) 29 | throw new Exception(string.Format("ReadInt16 needs 2 bytes but got only {0}", s.Length - s.Position)); 30 | return (short) ((s.ReadByte() << 8) | s.ReadByte()); 31 | } 32 | 33 | public static long ReadInt64(MemoryStream stream) 34 | { 35 | if (stream.Position + 8 > stream.Length) 36 | throw new Exception(string.Format("ReadInt64 needs 8 bytes but got only {0}", 37 | stream.Length - stream.Position)); 38 | 39 | var res = 0L; 40 | for (int i = 0; i < 8; i++) 41 | res = res << 8 | (uint) stream.ReadByte(); 42 | return res; 43 | } 44 | 45 | public static void Write(MemoryStream stream, long i) 46 | { 47 | ulong ui = (ulong) i; 48 | for (int j = 7; j >= 0; j--) 49 | stream.WriteByte((byte) (ui >> j*8 & 0xff)); 50 | } 51 | 52 | public static void Write(MemoryStream stream, int i) 53 | { 54 | WriteByte(stream, i >> 8*3); 55 | WriteByte(stream, i >> 8*2); 56 | WriteByte(stream, i >> 8); 57 | WriteByte(stream, i); 58 | } 59 | 60 | public static void Write(MemoryStream stream, short i) 61 | { 62 | WriteByte(stream, i >> 8); 63 | WriteByte(stream, i); 64 | } 65 | 66 | public static void WriteByte(MemoryStream stream, int i) 67 | { 68 | stream.WriteByte((byte) (i & 0xff)); 69 | } 70 | 71 | public static void Write(byte[] buff, int i) 72 | { 73 | Write(buff, i, 0); 74 | } 75 | 76 | public static void Write(byte[] buff, int i, int offset) 77 | { 78 | buff[offset + 0] = (byte)(i >> 8 * 3); 79 | buff[offset + 1] = (byte)((i & 0xff0000) >> 8 * 2); 80 | buff[offset + 2] = (byte)((i & 0xff00) >> 8); 81 | buff[offset + 3] = (byte)(i & 0xff); 82 | } 83 | 84 | public static int ToInt32(byte[] buff) 85 | { 86 | return ToInt32(buff, 0); 87 | } 88 | 89 | public static int ToInt32(byte[] buff, int offset) 90 | { 91 | return (buff[offset] << 8*3) | (buff[offset + 1] << 8*2) | (buff[offset + 2] << 8) | buff[offset + 3]; 92 | } 93 | } 94 | } 95 | 96 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/Crc32.cs: -------------------------------------------------------------------------------- 1 | // Copyright (c) Damien Guard. All rights reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 3 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 4 | // Originally published at http://damieng.com/blog/2006/08/08/calculating_crc32_in_c_and_net 5 | 6 | // The original code has been stripped of all non used parts and adapted to our use. 7 | 8 | 9 | using Kafka.Protocol; 10 | 11 | namespace Kafka.Common 12 | { 13 | /// 14 | /// Implements a 32-bit CRC hash algorithm. 15 | /// 16 | internal class Crc32 17 | { 18 | public const uint DefaultPolynomial = 0xedb88320u; 19 | public const uint DefaultSeed = 0xffffffffu; 20 | 21 | public const uint CastagnoliPolynomial = 0x82F63B78u; 22 | public const uint CastagnoliSeed = DefaultSeed; 23 | 24 | public uint Polynomial { get; private set; } 25 | public uint Seed { get; private set; } 26 | 27 | private uint[] Table; 28 | 29 | private static readonly Crc32 DefaultCrc32; 30 | private static readonly Crc32 CastagnoliCrc32; 31 | 32 | /// 33 | /// Compute the CRC-32 of the byte sequence using IEEE standard polynomial values. 34 | /// This is the regular "internet" CRC. 35 | /// 36 | /// byte stream 37 | /// start offset of the byte sequence 38 | /// size of the byte sequence 39 | /// 40 | public static uint Compute(ReusableMemoryStream stream, long start, long size) => 41 | DefaultCrc32.ComputeForStream(stream, start, size); 42 | 43 | /// 44 | /// Compute the CRC-32 of the byte sequence using Castagnoli polynomial values. 45 | /// This alternate CRC-32 is often used to compute the CRC as it is often yield 46 | /// better chances to detect errors in larger payloads. 47 | /// 48 | /// In particular, it is used to compute the CRC of a RecordBatch in newer versions 49 | /// of the Kafka protocol. 50 | /// 51 | /// byte stream 52 | /// start offset of the byte sequence 53 | /// size of the byte sequence 54 | /// 55 | public static uint ComputeCastagnoli(ReusableMemoryStream stream, long start, long size) => 56 | CastagnoliCrc32.ComputeForStream(stream, start, size); 57 | 58 | private Crc32(uint polynomial, uint seed) 59 | { 60 | Polynomial = polynomial; 61 | Seed = seed; 62 | InitializeTable(); 63 | } 64 | 65 | static Crc32() 66 | { 67 | DefaultCrc32 = new Crc32(DefaultPolynomial, DefaultSeed); 68 | CastagnoliCrc32 = new Crc32(CastagnoliPolynomial, CastagnoliSeed); 69 | } 70 | 71 | private uint ComputeForStream(ReusableMemoryStream stream, long start, long size) 72 | { 73 | var crc = Seed; 74 | 75 | var buffer = stream.GetBuffer(); 76 | for (var i = start; i < start + size; ++i) 77 | crc = (crc >> 8) ^ Table[buffer[i] ^ crc & 0xff]; 78 | 79 | return ~crc; 80 | } 81 | 82 | private void InitializeTable() 83 | { 84 | Table = new uint[256]; 85 | for (var i = 0; i < 256; i++) 86 | { 87 | var entry = (uint)i; 88 | for (var j = 0; j < 8; j++) 89 | if ((entry & 1) == 1) 90 | entry = (entry >> 1) ^ Polynomial; 91 | else 92 | entry = entry >> 1; 93 | Table[i] = entry; 94 | } 95 | } 96 | 97 | internal static void CheckCrcCastagnoli(int crc, ReusableMemoryStream stream, long crcStartPos, long crcLength = -1) 98 | { 99 | CheckCrc(CastagnoliCrc32, crc, stream, crcStartPos, crcLength); 100 | } 101 | 102 | internal static void CheckCrc(int crc, ReusableMemoryStream stream, long crcStartPos) 103 | { 104 | CheckCrc(DefaultCrc32, crc, stream, crcStartPos); 105 | } 106 | 107 | private static void CheckCrc(Crc32 crcAlgo, int crc, ReusableMemoryStream stream, long crcStartPos, long crcLength = -1) 108 | { 109 | var length = crcLength == -1 ? stream.Position - crcStartPos : crcLength; 110 | var computedCrc = (int)crcAlgo.ComputeForStream(stream, crcStartPos, length); 111 | if (computedCrc != crc) 112 | { 113 | throw new CrcException( 114 | string.Format("Corrupt message: CRC32 does not match. Calculated {0} but got {1}", computedCrc, 115 | crc)); 116 | } 117 | } 118 | } 119 | } 120 | 121 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/MemoryStreamExtensions.cs: -------------------------------------------------------------------------------- 1 | #if NET_CORE 2 | using System; 3 | using System.IO; 4 | 5 | namespace Kafka.Common 6 | { 7 | internal static class MemoryStreamExtensions 8 | { 9 | public static byte[] GetBuffer(this MemoryStream memoryStream) 10 | { 11 | ArraySegment arraySegment; 12 | if (!memoryStream.TryGetBuffer(out arraySegment)) 13 | { 14 | // Shouldn't happen since we are never constructing a memory stream with "publiclyVisible" set to false 15 | throw new NotSupportedException(); 16 | } 17 | 18 | // Don't care about the offset since we are never providing an array and specifying an origin index != 0 19 | return arraySegment.Array; 20 | } 21 | } 22 | } 23 | #endif -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/Pool.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Concurrent; 6 | using System.Threading; 7 | 8 | namespace Kafka.Common 9 | { 10 | /// 11 | /// Manage a pool of T. 12 | /// 13 | internal class Pool where T : class 14 | { 15 | private readonly ConcurrentQueue _pool = new ConcurrentQueue(); 16 | private readonly Func _constructor; 17 | private readonly Action _clearAction; 18 | private readonly int _limit; 19 | private int _watermark; 20 | 21 | public int Watermark 22 | { 23 | get { return _watermark; } 24 | } 25 | 26 | public Pool(Func constructor, Action clearAction) : this(0, constructor, clearAction) 27 | { 28 | } 29 | 30 | public Pool(int limit, Func constructor, Action clearAction) 31 | { 32 | if (constructor == null) 33 | { 34 | throw new ArgumentNullException("constructor"); 35 | } 36 | if (clearAction == null) 37 | { 38 | throw new ArgumentNullException("clearAction"); 39 | } 40 | _limit = limit; 41 | _constructor = constructor; 42 | _clearAction = clearAction; 43 | } 44 | 45 | public T Reserve() 46 | { 47 | T item; 48 | if (!_pool.TryDequeue(out item)) 49 | { 50 | return _constructor(); 51 | } 52 | 53 | Interlocked.Decrement(ref _watermark); 54 | return item; 55 | } 56 | 57 | public void Release(T item) 58 | { 59 | if (item == null) return; 60 | if (_limit > 0 && Interlocked.Increment(ref _watermark) > _limit) 61 | { 62 | _clearAction(item, false); 63 | Interlocked.Decrement(ref _watermark); 64 | } 65 | else 66 | { 67 | _clearAction(item, true); 68 | _pool.Enqueue(item); 69 | } 70 | } 71 | } 72 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/ReusableMemoryStream.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.IO; 6 | using System.Threading; 7 | using Kafka.Public; 8 | 9 | namespace Kafka.Common 10 | { 11 | /// 12 | /// Implement a pool of MemoryStream allowing for recycling 13 | /// of underlying buffers. This kills two birds with one stone: 14 | /// we can minimize MemoryStream/buffers creation when (de)serialing requests/responses 15 | /// and we can minimize the number of buffers passed to the network layers. 16 | /// 17 | class ReusableMemoryStream : MemoryStream, ISizedMemorySerializable, IDisposable 18 | { 19 | private static int _nextId; 20 | private readonly int _id; // Useful to track leaks while debugging 21 | private readonly Pool _myPool; 22 | public readonly ILogger Logger; 23 | 24 | public ReusableMemoryStream(Pool myPool, ILogger logger = null) 25 | { 26 | _id = Interlocked.Increment(ref _nextId); 27 | _myPool = myPool; 28 | Logger = logger; 29 | } 30 | 31 | public Pool Pool 32 | { 33 | get 34 | { 35 | return _myPool; 36 | } 37 | } 38 | 39 | internal byte this[int index] 40 | { 41 | get { return this.GetBuffer()[index]; } 42 | set { this.GetBuffer()[index] = value; } 43 | } 44 | 45 | public new void Dispose() 46 | { 47 | (this as IDisposable).Dispose(); 48 | } 49 | 50 | void IDisposable.Dispose() 51 | { 52 | if (_myPool != null) 53 | { 54 | _myPool.Release(this); 55 | } 56 | } 57 | 58 | public void Serialize(MemoryStream toStream) 59 | { 60 | byte[] array = this.GetBuffer(); 61 | int length = (int) Length; 62 | toStream.Write(array, 0, length); 63 | } 64 | 65 | public long SerializedSize() 66 | { 67 | return this.Length; 68 | } 69 | } 70 | 71 | static class ReusableExtentions 72 | { 73 | // CopyTo allocates a temporary buffer. As we're already pooling buffers, 74 | // we might as well provide a CopyTo that makes use of that instead of 75 | // using Stream.CopyTo (which allocates a buffer of its own, even if it 76 | // does so efficiently). 77 | public static void ReusableCopyTo(this Stream input, Stream destination, ReusableMemoryStream tmpBuffer) 78 | { 79 | tmpBuffer.SetLength(81920); 80 | var buffer = tmpBuffer.GetBuffer(); 81 | int read; 82 | while ((read = input.Read(buffer, 0, buffer.Length)) != 0) 83 | { 84 | destination.Write(buffer, 0, read); 85 | } 86 | } 87 | } 88 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/Timestamp.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | 7 | namespace Kafka.Common 8 | { 9 | static class Timestamp 10 | { 11 | public static DateTime Epoch 12 | { 13 | get { return new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); } 14 | } 15 | 16 | public static long Now { get { return ToUnixTimestamp(DateTime.UtcNow); } } 17 | 18 | public static long ToUnixTimestamp(DateTime datetime) 19 | { 20 | return (long)datetime.ToUniversalTime().Subtract(Epoch).TotalMilliseconds; 21 | } 22 | 23 | public static DateTime FromUnixTimestamp(long timestamp) 24 | { 25 | return Epoch.Add(TimeSpan.FromMilliseconds(timestamp)); 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Common/VarIntConverter.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.IO; 6 | 7 | namespace Kafka.Common 8 | { 9 | /// 10 | /// Implement encoding and decoding for VarInt format. 11 | /// Values are encoded in ZigZag format (a format which allows to keep small 12 | /// binary values for negative integers). 13 | /// 14 | /// When reading a VarInt, an OverflowError is thrown if the value doesn't fit 15 | /// in the expected return type. 16 | /// 17 | /// Note: we only deal with signed types, since Kafka is implemented in 18 | /// Java, which doesn't have unsigned integers. 19 | /// 20 | /// See: https://developers.google.com/protocol-buffers/docs/encoding#varints 21 | /// 22 | internal static class VarIntConverter 23 | { 24 | private static ulong ToZigZag(long i) 25 | { 26 | return unchecked((ulong)((i << 1) ^ (i >> 63))); 27 | } 28 | 29 | /// 30 | /// Returns the minimum required size in bytes to store the given value as a VarInt. 31 | /// 32 | /// value to be represented as a VarInt 33 | /// 34 | public static int SizeOfVarInt(long value) 35 | { 36 | var asZigZag = ToZigZag(value); 37 | int nbBytes = 1; 38 | while ((asZigZag & 0xffffffffffffff80L) != 0L) 39 | { 40 | nbBytes += 1; 41 | asZigZag = asZigZag >> 7; 42 | } 43 | 44 | return nbBytes; 45 | } 46 | 47 | public static void Write(MemoryStream stream, long i) 48 | { 49 | var asZigZag = ToZigZag(i); 50 | 51 | // value & 1111 1111 ... 1000 0000 will zero the last 7 bytes, 52 | // if the result is zero, it means we only have those last 7 bytes 53 | // to write. 54 | while((asZigZag & 0xffffffffffffff80L) != 0L) 55 | { 56 | // keep only the 7 most significant bytes: 57 | // value = (value & 0111 1111) 58 | // and add a 1 in the most significant bit of the byte, meaning 59 | // it's not the last byte of the VarInt: 60 | // value = (value | 1000 0000) 61 | stream.WriteByte((byte)((asZigZag & 0x7f) | 0x80)); 62 | // Shift the 7 bits we just wrote to the stream and continue: 63 | asZigZag = asZigZag >> 7; 64 | } 65 | stream.WriteByte((byte)asZigZag); 66 | } 67 | 68 | public static void Write(MemoryStream stream, int i) 69 | { 70 | Write(stream, (long)i); 71 | } 72 | 73 | public static void Write(MemoryStream stream, short i) 74 | { 75 | Write(stream, (long)i); 76 | } 77 | 78 | public static void Write(MemoryStream stream, byte i) 79 | { 80 | Write(stream, (long)i); 81 | } 82 | 83 | public static void Write(MemoryStream stream, bool i) 84 | { 85 | Write(stream, i ? 1L : 0L); 86 | } 87 | 88 | public static long ReadAsInt64(MemoryStream stream) 89 | { 90 | ulong asZigZag = 0L; // Result value 91 | int i = 0; // Number of bits written 92 | long b; // Byte read 93 | 94 | // Check if the 8th bit of the byte is 1, meaning there will be more to read: 95 | // b & 1000 0000 96 | while (((b = stream.ReadByte()) & 0x80) != 0) { 97 | // Take the 7 bits of the byte we want to add and insert them at the 98 | // right location (offset i) 99 | asZigZag |= (ulong)(b & 0x7f) << i; 100 | i += 7; 101 | if (i > 63) 102 | throw new OverflowException(); 103 | } 104 | 105 | if (i == 63 && b != 0x01) 106 | { 107 | // We read 63 bits, we can only read one more (the most significant bit, MSB), 108 | // or it means that the VarInt can't fit in a long. 109 | // If the bit to read was 0, we would not have read it (as it's the MSB), thus, it must be 1. 110 | throw new OverflowException(); 111 | } 112 | 113 | asZigZag |= (ulong)b << i; 114 | 115 | // The value is signed 116 | if ((asZigZag & 0x1) == 0x1) 117 | { 118 | return (-1 * ((long)(asZigZag >> 1) + 1)); 119 | } 120 | 121 | 122 | return (long)(asZigZag >> 1); 123 | } 124 | 125 | public static int ReadAsInt32(MemoryStream stream) 126 | { 127 | return checked((int)ReadAsInt64(stream)); 128 | } 129 | 130 | public static short ReadAsInt16(MemoryStream stream) 131 | { 132 | return checked((short)ReadAsInt64(stream)); 133 | } 134 | 135 | public static byte ReadAsByte(MemoryStream stream) 136 | { 137 | return checked((byte)ReadAsInt64(stream)); 138 | } 139 | 140 | public static bool ReadAsBool(MemoryStream stream) 141 | { 142 | return ReadAsInt64(stream) != 0; 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Kafka.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {C2E6D32F-69D0-41B4-A1EA-9ADE27A99B5E} 8 | Library 9 | Properties 10 | Kafka 11 | Kafka 12 | v4.7.1 13 | 512 14 | 15 | 16 | 17 | true 18 | full 19 | false 20 | bin\Debug\ 21 | DEBUG;TRACE 22 | prompt 23 | 4 24 | 25 | 26 | pdbonly 27 | true 28 | bin\Release\ 29 | TRACE 30 | prompt 31 | 4 32 | 33 | 34 | 35 | ..\packages\Crc32C.NET.1.0.5.0\lib\net20\Crc32C.NET.dll 36 | True 37 | 38 | 39 | ..\packages\lz4net.1.0.15.93\lib\net4-client\LZ4.dll 40 | True 41 | 42 | 43 | ..\packages\Snappy.NET.1.1.1.8\lib\net45\Snappy.NET.dll 44 | True 45 | 46 | 47 | 48 | 49 | ..\packages\System.Reactive.Core.3.1.1\lib\net45\System.Reactive.Core.dll 50 | True 51 | 52 | 53 | ..\packages\System.Reactive.Interfaces.3.1.1\lib\net45\System.Reactive.Interfaces.dll 54 | True 55 | 56 | 57 | ..\packages\System.Reactive.Linq.3.1.1\lib\net45\System.Reactive.Linq.dll 58 | True 59 | 60 | 61 | 62 | ..\packages\Microsoft.Tpl.Dataflow.4.5.24\lib\portable-net45+win8+wpa81\System.Threading.Tasks.Dataflow.dll 63 | True 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 146 | 147 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Kafka.netstandard.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | netstandard1.6 5 | kafka-sharp 6 | High Performance .NET Kafka Client 7 | kafka-sharp 8 | Kafka;kafka-sharp;Criteo 9 | https://github.com/criteo/kafka-sharp 10 | LICENSE 11 | false 12 | false 13 | false 14 | false 15 | false 16 | false 17 | false 18 | false 19 | kafka-sharp 20 | Criteo 21 | Criteo 22 | Criteo 23 | High Performance .NET Kafka Client 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | $(DefineConstants);NET_CORE 43 | 44 | 45 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Kafka.nuspec: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | kafka-sharp 5 | $version$ 6 | A C# Kafka driver 7 | A .NET implementation of the Apache Kafka client side protocol geared toward performance (both throughput and memory wise). It is especially suited for scenarios where applications are streaming a large number of messages across a fair number of topics. 8 | Criteo 9 | $copyright$ 10 | https://github.com/criteo/kafka-sharp 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("kafka-sharp")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("Criteo")] 12 | [assembly: AssemblyProduct("kafka-sharp")] 13 | [assembly: AssemblyCopyright("Copyright © Criteo 2015")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("e6692333-8e76-4b7b-b3f6-5fd5c20cb741")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | 38 | // For testing purpose 39 | [assembly: InternalsVisibleTo("Kafka.UTest")] 40 | #if NET_CORE 41 | [assembly: InternalsVisibleTo("kafka-sharp.UTest")] // dotnet core considers the directory name when building the assembly 42 | #endif 43 | [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2")] 44 | 45 | 46 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/CommonRequest.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | // Convenience interface to avoid code duplication. 9 | // quite ugly from OO perspective but struct cannot inherit in C#. 10 | interface ISerializableRequest 11 | { 12 | ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object extra, Basics.ApiVersion version); 13 | void SerializeBody(ReusableMemoryStream stream, object extra, Basics.ApiVersion version); 14 | } 15 | 16 | // Convenience class to avoid code duplication. We cannot 17 | // use inheritance with structs so we resort to this dumb trick 18 | // and ugly ISerializableRequest. 19 | static class CommonRequest 20 | { 21 | public static ReusableMemoryStream Serialize(ReusableMemoryStream stream, TRequest request, int correlationId, byte[] clientId, 22 | Basics.ApiKey apiKey, Basics.ApiVersion apiVersion, object extra) where TRequest : ISerializableRequest 23 | { 24 | Basics.WriteRequestHeader(stream, correlationId, apiKey, apiVersion, clientId); 25 | request.SerializeBody(stream, extra, apiVersion); 26 | Basics.WriteMessageLength(stream); 27 | stream.Position = 0; 28 | return stream; 29 | } 30 | } 31 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/CommonResponse.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using Kafka.Common; 6 | 7 | namespace Kafka.Protocol 8 | { 9 | internal struct CommonResponse : IMemoryStreamSerializable where TPartitionData : IMemoryStreamSerializable, new() 10 | { 11 | public TopicData[] TopicsResponse; 12 | 13 | public void Serialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 14 | { 15 | throw new NotImplementedException(); 16 | } 17 | 18 | public void Deserialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 19 | { 20 | TopicsResponse = Basics.DeserializeArrayExtra>(stream, extra, version); 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/ConsumerGroupRequests.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using System.Linq; 7 | using System.Linq.Expressions; 8 | using Kafka.Common; 9 | 10 | namespace Kafka.Protocol 11 | { 12 | 13 | #region JoinConsumerGroupRequest 14 | 15 | struct ConsumerGroupProtocolMetadata : IMemoryStreamSerializable 16 | { 17 | public short Version; 18 | public IEnumerable Subscription; 19 | public byte[] UserData; 20 | 21 | #region Serialization 22 | 23 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 24 | { 25 | BigEndianConverter.Write(stream, Version); 26 | Basics.WriteArray(stream, Subscription ?? Enumerable.Empty(), Basics.SerializeString); 27 | Basics.SerializeBytes(stream, UserData); 28 | } 29 | 30 | #region Deserialization (for test) 31 | 32 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 33 | { 34 | Version = BigEndianConverter.ReadInt16(stream); 35 | Subscription = Basics.DeserializeArray(stream, Basics.DeserializeString); 36 | UserData = Basics.DeserializeBytes(stream); 37 | } 38 | 39 | #endregion 40 | 41 | #endregion 42 | } 43 | 44 | struct ConsumerGroupProtocol : IMemoryStreamSerializable 45 | { 46 | public string ProtocolName; 47 | public ConsumerGroupProtocolMetadata ProtocolMetadata; 48 | 49 | #region Serialization 50 | 51 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 52 | { 53 | Basics.SerializeString(stream, ProtocolName); 54 | var pm = ProtocolMetadata; 55 | Basics.WriteWithSize(stream, s => pm.Serialize(s, null, Basics.ApiVersion.Ignored)); 56 | } 57 | 58 | #region Deserialization (for test) 59 | 60 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 61 | { 62 | ProtocolName = Basics.DeserializeString(stream); 63 | BigEndianConverter.ReadInt32(stream); 64 | ProtocolMetadata = new ConsumerGroupProtocolMetadata(); 65 | ProtocolMetadata.Deserialize(stream, null, Basics.ApiVersion.Ignored); 66 | } 67 | 68 | #endregion 69 | 70 | #endregion 71 | } 72 | 73 | class JoinConsumerGroupRequest : ISerializableRequest 74 | { 75 | public string GroupId; 76 | public int SessionTimeout; 77 | public int RebalanceTimeout; 78 | public string MemberId; 79 | public IEnumerable Subscription; 80 | 81 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, 82 | object extra, Basics.ApiVersion version) 83 | { 84 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.JoinGroupRequest, 85 | version, null); 86 | } 87 | 88 | public void SerializeBody(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 89 | { 90 | Basics.SerializeString(stream, GroupId); 91 | BigEndianConverter.Write(stream, SessionTimeout); 92 | if (version > Basics.ApiVersion.V0) 93 | { 94 | BigEndianConverter.Write(stream, RebalanceTimeout); 95 | } 96 | Basics.SerializeString(stream, MemberId); 97 | Basics.SerializeString(stream, "consumer"); 98 | var metadata = new[] // Only one protocol is supported 99 | { 100 | new ConsumerGroupProtocol 101 | { 102 | ProtocolName = "kafka-sharp-consumer", 103 | ProtocolMetadata = 104 | new ConsumerGroupProtocolMetadata { Version = 0, Subscription = Subscription, UserData = null, } 105 | } 106 | }; 107 | Basics.WriteArray(stream, metadata, (s, d) => d.Serialize(s, null, Basics.ApiVersion.Ignored)); 108 | } 109 | } 110 | 111 | #endregion 112 | 113 | #region SyncConsumerGroupRequest 114 | 115 | struct PartitionAssignment : IMemoryStreamSerializable 116 | { 117 | public int Partition; 118 | 119 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 120 | { 121 | BigEndianConverter.Write(stream, Partition); 122 | } 123 | 124 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 125 | { 126 | Partition = BigEndianConverter.ReadInt32(stream); 127 | } 128 | } 129 | 130 | struct ConsumerGroupMemberAssignment : IMemoryStreamSerializable 131 | { 132 | public short Version; 133 | public IEnumerable> PartitionAssignments; 134 | public byte[] UserData; 135 | 136 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 137 | { 138 | BigEndianConverter.Write(stream, Version); 139 | Basics.WriteArray(stream, PartitionAssignments, Basics.ApiVersion.Ignored); 140 | Basics.SerializeBytes(stream, UserData); 141 | } 142 | 143 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 144 | { 145 | Version = BigEndianConverter.ReadInt16(stream); 146 | PartitionAssignments = Basics.DeserializeArray>(stream); 147 | UserData = Basics.DeserializeBytes(stream); 148 | } 149 | } 150 | 151 | struct ConsumerGroupAssignment : IMemoryStreamSerializable 152 | { 153 | public string MemberId; 154 | public ConsumerGroupMemberAssignment MemberAssignment; 155 | 156 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 157 | { 158 | Basics.SerializeString(stream, MemberId); 159 | var ma = MemberAssignment; 160 | Basics.WriteWithSize(stream, s => ma.Serialize(s, null, Basics.ApiVersion.Ignored)); 161 | } 162 | 163 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 164 | { 165 | MemberId = Basics.DeserializeString(stream); 166 | MemberAssignment = new ConsumerGroupMemberAssignment 167 | { 168 | PartitionAssignments = Enumerable.Empty>() 169 | }; 170 | if (BigEndianConverter.ReadInt32(stream) > 0) 171 | { 172 | MemberAssignment.Deserialize(stream, null, Basics.ApiVersion.Ignored); 173 | } 174 | } 175 | } 176 | 177 | class SyncConsumerGroupRequest : ISerializableRequest 178 | { 179 | public string GroupId; 180 | public int GenerationId; 181 | public string MemberId; 182 | public IEnumerable GroupAssignment; 183 | 184 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, 185 | object _, Basics.ApiVersion __) 186 | { 187 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.SyncGroupRequest, 188 | Basics.ApiVersion.V0, null); 189 | } 190 | 191 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 192 | { 193 | Basics.SerializeString(stream, GroupId); 194 | BigEndianConverter.Write(stream, GenerationId); 195 | Basics.SerializeString(stream, MemberId); 196 | Basics.WriteArray(stream, GroupAssignment); 197 | } 198 | } 199 | 200 | #endregion 201 | 202 | #region HeartbeatRequest 203 | 204 | class HeartbeatRequest : ISerializableRequest 205 | { 206 | public string GroupId; 207 | public int GenerationId; 208 | public string MemberId; 209 | 210 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion __) 211 | { 212 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.HeartbeatRequest, 213 | Basics.ApiVersion.V0, null); 214 | } 215 | 216 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 217 | { 218 | Basics.SerializeString(stream, GroupId); 219 | BigEndianConverter.Write(stream, GenerationId); 220 | Basics.SerializeString(stream, MemberId); 221 | } 222 | } 223 | 224 | #endregion 225 | 226 | #region LeaveGroupRequest 227 | 228 | class LeaveGroupRequest : ISerializableRequest 229 | { 230 | public string GroupId; 231 | public string MemberId; 232 | 233 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion __) 234 | { 235 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.LeaveGroupRequest, 236 | Basics.ApiVersion.V0, null); 237 | } 238 | 239 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 240 | { 241 | Basics.SerializeString(stream, GroupId); 242 | Basics.SerializeString(stream, MemberId); 243 | } 244 | } 245 | 246 | #endregion 247 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/ConsumerGroupResponses.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.Linq; 5 | using Kafka.Common; 6 | 7 | namespace Kafka.Protocol 8 | { 9 | 10 | #region JoinConsumerGroupResponse 11 | 12 | struct GroupMember : IMemoryStreamSerializable 13 | { 14 | public string MemberId; 15 | public ConsumerGroupProtocolMetadata Metadata; 16 | 17 | #region Serialization 18 | 19 | #region Serialization (for test) 20 | 21 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 22 | { 23 | Basics.SerializeString(stream, MemberId); 24 | var pm = Metadata; 25 | Basics.WriteWithSize(stream, s => pm.Serialize(s, null, Basics.ApiVersion.Ignored)); 26 | } 27 | 28 | #endregion 29 | 30 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 31 | { 32 | MemberId = Basics.DeserializeString(stream); 33 | BigEndianConverter.ReadInt32(stream); 34 | Metadata = new ConsumerGroupProtocolMetadata(); 35 | Metadata.Deserialize(stream, null, Basics.ApiVersion.Ignored); 36 | } 37 | 38 | #endregion 39 | } 40 | 41 | struct JoinConsumerGroupResponse : IMemoryStreamSerializable 42 | { 43 | public ErrorCode ErrorCode; 44 | public int GenerationId; 45 | public string GroupProtocol; 46 | public string LeaderId; 47 | public string MemberId; 48 | public GroupMember[] GroupMembers; 49 | 50 | #region Serialization 51 | 52 | #region Serialization (for test) 53 | 54 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 55 | { 56 | BigEndianConverter.Write(stream, (short) ErrorCode); 57 | BigEndianConverter.Write(stream, GenerationId); 58 | Basics.SerializeString(stream, GroupProtocol); 59 | Basics.SerializeString(stream, LeaderId); 60 | Basics.SerializeString(stream, MemberId); 61 | Basics.WriteArray(stream, GroupMembers, (s, m) => m.Serialize(s, null, Basics.ApiVersion.Ignored)); 62 | } 63 | 64 | #endregion 65 | 66 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 67 | { 68 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 69 | GenerationId = BigEndianConverter.ReadInt32(stream); 70 | GroupProtocol = Basics.DeserializeString(stream); 71 | LeaderId = Basics.DeserializeString(stream); 72 | MemberId = Basics.DeserializeString(stream); 73 | GroupMembers = Basics.DeserializeArray(stream); 74 | } 75 | 76 | #endregion 77 | } 78 | 79 | #endregion 80 | 81 | #region SyncConsumerGroupResponse 82 | 83 | struct SyncConsumerGroupResponse : IMemoryStreamSerializable 84 | { 85 | public ErrorCode ErrorCode; 86 | public ConsumerGroupMemberAssignment MemberAssignment; 87 | 88 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 89 | { 90 | BigEndianConverter.Write(stream, (short) ErrorCode); 91 | var ma = MemberAssignment; 92 | Basics.WriteWithSize(stream, s => ma.Serialize(s, null, Basics.ApiVersion.Ignored)); 93 | } 94 | 95 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 96 | { 97 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 98 | MemberAssignment = new ConsumerGroupMemberAssignment 99 | { 100 | PartitionAssignments = Enumerable.Empty>() 101 | }; 102 | if (BigEndianConverter.ReadInt32(stream) > 0) 103 | { 104 | MemberAssignment.Deserialize(stream, null, Basics.ApiVersion.Ignored); 105 | } 106 | } 107 | } 108 | 109 | #endregion 110 | 111 | #region HeartbeatResponse / LeaveGroupResponse 112 | 113 | struct SimpleResponse : IMemoryStreamSerializable 114 | { 115 | public ErrorCode ErrorCode; 116 | 117 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 118 | { 119 | BigEndianConverter.Write(stream, (short) ErrorCode); 120 | } 121 | 122 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 123 | { 124 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 125 | } 126 | } 127 | 128 | #endregion 129 | } 130 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/DefaultSerialization.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.IO; 6 | using Kafka.Public; 7 | 8 | namespace Kafka.Protocol 9 | { 10 | /// 11 | /// A default message serializer/deserializer that assumes the 12 | /// serialized objects are byte arrays. 13 | /// 14 | sealed class ByteArraySerialization : ISerializer, IDeserializer 15 | { 16 | public int Serialize(object input, MemoryStream toStream) 17 | { 18 | var toSerialize = input as byte[]; 19 | if (toSerialize == null) 20 | { 21 | throw new ArgumentException("Input cannot be converted to byte[]", "input"); 22 | } 23 | 24 | toStream.Write(toSerialize, 0, toSerialize.Length); 25 | return toSerialize.Length; 26 | } 27 | 28 | public object Deserialize(MemoryStream fromStream, int length) 29 | { 30 | var output = new byte[length]; 31 | fromStream.Read(output, 0, length); 32 | return output; 33 | } 34 | 35 | // Not constructible 36 | private ByteArraySerialization() 37 | { 38 | } 39 | 40 | /// 41 | /// The only one instance of this type. 42 | /// 43 | public static readonly ISerializer DefaultSerializer = new ByteArraySerialization(); 44 | public static readonly IDeserializer DefaultDeserializer = DefaultSerializer as IDeserializer; 45 | } 46 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/Errors.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | namespace Kafka.Protocol 5 | { 6 | static class Error 7 | { 8 | public static bool IsPartitionOkForClients(ErrorCode code) 9 | { 10 | return code == ErrorCode.NoError 11 | || code == ErrorCode.ReplicaNotAvailable; 12 | } 13 | 14 | public static bool IsPartitionErrorRecoverableForProducer(ErrorCode code) 15 | { 16 | switch (code) 17 | { 18 | case ErrorCode.LeaderNotAvailable: 19 | case ErrorCode.NotLeaderForPartition: 20 | case ErrorCode.RequestTimedOut: 21 | case ErrorCode.UnknownTopicOrPartition: 22 | case ErrorCode.InvalidMessage: 23 | case ErrorCode.InvalidMessageSize: 24 | case ErrorCode.NotEnoughReplicas: 25 | case ErrorCode.NotEnoughReplicasAfterAppend: 26 | return true; 27 | 28 | default: 29 | return false; 30 | } 31 | } 32 | } 33 | 34 | enum ErrorCode : short 35 | { 36 | ///No error--it worked! 37 | NoError = 0, 38 | 39 | ///An unexpected server error 40 | Unknown = -1, 41 | 42 | ///The requested offset is outside the range of offsets maintained by the server for the given 43 | /// topic/partition. 44 | OffsetOutOfRange = 1, 45 | 46 | ///This indicates that a message contents does not match its CRC 47 | InvalidMessage = 2, 48 | 49 | ///This request is for a topic or partition that does not exist on this broker. 50 | UnknownTopicOrPartition = 3, 51 | 52 | ///The message has a negative size 53 | InvalidMessageSize = 4, 54 | 55 | ///This error is thrown if we are in the middle of a leadership election and there is currently 56 | /// no leader for this partition and hence it is unavailable for writes. 57 | LeaderNotAvailable = 5, 58 | 59 | ///This error is thrown if the client attempts to send messages to a replica that is not 60 | /// the leader for some partition. It indicates that the clients metadata is out of date. 61 | NotLeaderForPartition = 6, 62 | 63 | ///This error is thrown if the request exceeds the user-specified time limit in the request 64 | RequestTimedOut = 7, 65 | 66 | ///This is not a client facing error and is used only internally by intra-cluster 67 | /// broker communication 68 | BrokerNotAvailable = 8, 69 | 70 | ///Unused 71 | ReplicaNotAvailable = 9, 72 | 73 | ///The server has a configurable maximum message size to avoid unbounded memory allocation. 74 | /// This error is thrown if the client attempt to produce a message larger than this maximum. 75 | MessageSizeTooLarge = 10, 76 | 77 | ///Internal error code for broker-to-broker communication 78 | StaleControllerEpoch = 11, 79 | 80 | ///If you specify a string larger than configured maximum for offset metadata 81 | OffsetMetadataTooLarge = 12, 82 | 83 | /// The broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition) 84 | OffsetsLoadInProgress = 14, 85 | 86 | /// The broker returns this error code for consumer metadata requests or offset commit requests 87 | /// if the offsets topic has not yet been created 88 | ConsumerCoordinatorNotAvailable = 15, 89 | 90 | /// The broker returns this error code if it receives an offset fetch or commit request for a consumer group that it is not a coordinator for 91 | NotCoordinatorForConsumer = 16, 92 | 93 | /// 94 | /// For a request which attempts to access an invalid topic (e.g. one which has an illegal name), or if an attempt is made to write to an internal topic (such as the consumer offsets topic). 95 | /// 96 | InvalidTopic = 17, 97 | 98 | /// 99 | /// If a message batch in a produce request exceeds the maximum configured segment size. 100 | /// 101 | MessageSetSizeTooLarge = 18, 102 | 103 | /// 104 | /// Returned from a produce request when the number of in-sync replicas is lower than the configured minimum and requiredAcks is -1. 105 | /// 106 | NotEnoughReplicas = 19, 107 | 108 | /// 109 | /// Returned from a produce request when the message was written to the log, but with fewer in-sync replicas than required. 110 | /// 111 | NotEnoughReplicasAfterAppend = 20, 112 | 113 | /// Returned from a produce request if the requested requiredAcks is invalid (anything other than -1, 1, or 0). 114 | InvalidRequiredAcks = 21, 115 | 116 | /// 117 | /// Returned from group membership requests (such as heartbeats) when the generation id provided in the request is not the current generation. 118 | /// 119 | IllegalGeneration = 22, 120 | 121 | /// 122 | /// Returned in join group when the member provides a protocol type or set of protocols which is not compatible with the current group. 123 | /// 124 | InconsistentGroupProtocol = 23, 125 | 126 | /// 127 | /// Returned in join group when the groupId is empty or null. 128 | /// 129 | InvalidGroupId = 24, 130 | 131 | /// 132 | /// Returned from group requests (offset commits/fetches, heartbeats, etc) when the memberId is not in the current generation. 133 | /// 134 | UnknownMemberId = 25, 135 | 136 | /// 137 | /// Return in join group when the requested session timeout is outside of the allowed range on the broker 138 | /// 139 | InvalidSessionTimeout = 26, 140 | 141 | /// 142 | /// Returned in heartbeat requests when the coordinator has begun rebalancing the group. This indicates to the client that it should rejoin the group. 143 | /// 144 | RebalanceInProgress = 27, 145 | 146 | /// 147 | /// This error indicates that an offset commit was rejected because of oversize metadata. 148 | /// 149 | InvalidCommitOffsetSize = 28, 150 | 151 | /// 152 | /// Returned by the broker when the client is not authorized to access the requested topic. 153 | /// 154 | TopicAuthorizationFailed = 29, 155 | 156 | /// 157 | /// Returned by the broker when the client is not authorized to access a particular groupId. 158 | /// 159 | GroupAuthorizationFailed = 30, 160 | 161 | /// 162 | /// Returned by the broker when the client is not authorized to use an inter-broker or administrative API. 163 | /// 164 | ClusterAuthorizationFailed = 31, 165 | 166 | /// 167 | /// The timestamp of the message is out of acceptable range. 168 | /// 169 | InvalidTimestamp = 32, 170 | 171 | /// 172 | /// The broker does not support the requested SASL mechanism. 173 | /// 174 | UnsupportedSaslMechanism = 33, 175 | 176 | /// 177 | /// Request is not valid given the current SASL state. 178 | /// 179 | IllegalSaslState = 34, 180 | 181 | /// 182 | /// The version of API is not supported. 183 | /// 184 | UnsupportedVersion = 35, 185 | 186 | /// 187 | /// 188 | /// 189 | TopicAlreadyExists = 36, 190 | 191 | /// 192 | /// Number of partitions is invalid. 193 | /// 194 | InvalidPartitions = 37, 195 | 196 | /// 197 | /// Replication-factor is invalid. 198 | /// 199 | InvalidReplicationFactor = 38, 200 | 201 | /// 202 | /// Replica assignment is invalid. 203 | /// 204 | InvalidReplicaAssignment = 39, 205 | 206 | /// 207 | /// Configuration is invalid. 208 | /// 209 | InvalidConfig = 40, 210 | 211 | /// 212 | /// This is not the correct controller for this cluster. 213 | /// 214 | NotController = 41, 215 | 216 | /// 217 | /// This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details. 218 | /// 219 | InvalidRequest = 42, 220 | 221 | /// 222 | /// The message format version on the broker does not support the request. 223 | /// 224 | UnsupportedForMessageFormat = 43, 225 | 226 | /// 227 | /// Request parameters do not satisfy the configured policy. 228 | /// 229 | PolicyViolation = 44, 230 | 231 | // Local error, not from brokers 232 | LocalError = -42, 233 | 234 | /// 235 | /// Deserialization on one message failed (bad Crc, magic number or compression) 236 | /// 237 | DeserializationError = -43, 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/FetchRequest.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.Collections.Generic; 5 | using Kafka.Common; 6 | 7 | namespace Kafka.Protocol 8 | { 9 | struct FetchRequest : ISerializableRequest 10 | { 11 | public int MaxWaitTime; 12 | public int MinBytes; 13 | public int MaxBytes; 14 | public Basics.IsolationLevel IsolationLevel; 15 | public IEnumerable> TopicsData; 16 | 17 | #region Serialization 18 | 19 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object extra, Basics.ApiVersion version) 20 | { 21 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.FetchRequest, version, extra); 22 | } 23 | 24 | public void SerializeBody(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 25 | { 26 | stream.Write(Basics.MinusOne32, 0, 4); // ReplicaId, non clients that are not a broker must use -1 27 | BigEndianConverter.Write(stream, MaxWaitTime); 28 | BigEndianConverter.Write(stream, MinBytes); 29 | if (version >= Basics.ApiVersion.V3) 30 | { 31 | BigEndianConverter.Write(stream, MaxBytes); 32 | } 33 | if (version >= Basics.ApiVersion.V4) 34 | { 35 | stream.WriteByte((byte) IsolationLevel); 36 | } 37 | Basics.WriteArray(stream, TopicsData, extra, version); 38 | } 39 | 40 | #endregion 41 | } 42 | 43 | struct FetchPartitionData : IMemoryStreamSerializable 44 | { 45 | public int Partition; 46 | public int MaxBytes; 47 | public long FetchOffset; 48 | public long LogStartOffset; // Required by the protocol, but will always be zero in our case (i.e. we are consumers, not brokers) 49 | 50 | #region Serialization 51 | 52 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 53 | { 54 | BigEndianConverter.Write(stream, Partition); 55 | BigEndianConverter.Write(stream, FetchOffset); 56 | BigEndianConverter.Write(stream, MaxBytes); 57 | if (version >= Basics.ApiVersion.V5) 58 | { 59 | stream.Write(Basics.Zero64, 0, 8); // log_start_offset is 0 for consumer, only used by follower. 60 | } 61 | } 62 | 63 | // Used only in tests 64 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 65 | { 66 | Partition = BigEndianConverter.ReadInt32(stream); 67 | FetchOffset = BigEndianConverter.ReadInt64(stream); 68 | MaxBytes = BigEndianConverter.ReadInt32(stream); 69 | if (version >= Basics.ApiVersion.V5) 70 | { 71 | LogStartOffset = BigEndianConverter.ReadInt64(stream); 72 | } 73 | } 74 | 75 | #endregion 76 | } 77 | } 78 | 79 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/GroupCoordinationRequests.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.Collections.Generic; 5 | using Kafka.Common; 6 | namespace Kafka.Protocol 7 | { 8 | #region GroupCoordinator 9 | 10 | class GroupCoordinatorRequest : ISerializableRequest 11 | { 12 | public string GroupId; 13 | 14 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, 15 | object _, Basics.ApiVersion __) 16 | { 17 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.GroupCoordinatorRequest, 18 | Basics.ApiVersion.V0, null); 19 | } 20 | 21 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 22 | { 23 | Basics.SerializeString(stream, GroupId); 24 | } 25 | } 26 | 27 | #endregion 28 | 29 | #region OffsetCommit 30 | 31 | struct OffsetCommitPartitionData : IMemoryStreamSerializable 32 | { 33 | public int Partition; 34 | public long Offset; 35 | public string Metadata; 36 | 37 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 38 | { 39 | BigEndianConverter.Write(stream, Partition); 40 | BigEndianConverter.Write(stream, Offset); 41 | Basics.SerializeString(stream, Metadata); 42 | } 43 | 44 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 45 | { 46 | Partition = BigEndianConverter.ReadInt32(stream); 47 | Offset = BigEndianConverter.ReadInt64(stream); 48 | Metadata = Basics.DeserializeString(stream); 49 | } 50 | } 51 | 52 | class OffsetCommitRequest : ISerializableRequest 53 | { 54 | public string ConsumerGroupId; 55 | public int ConsumerGroupGenerationId; 56 | public string ConsumerId; 57 | public long RetentionTime; 58 | public IEnumerable> TopicsData; 59 | 60 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion __) 61 | { 62 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.OffsetCommitRequest, 63 | Basics.ApiVersion.V2, null); 64 | } 65 | 66 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 67 | { 68 | Basics.SerializeString(stream, ConsumerGroupId); 69 | BigEndianConverter.Write(stream, ConsumerGroupGenerationId); 70 | Basics.SerializeString(stream, ConsumerId); 71 | BigEndianConverter.Write(stream, RetentionTime); 72 | Basics.WriteArray(stream, TopicsData); 73 | } 74 | } 75 | 76 | class OffsetFetchRequest : ISerializableRequest 77 | { 78 | public string ConsumerGroupId; 79 | public IEnumerable> TopicsData; 80 | 81 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion __) 82 | { 83 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.OffsetFetchRequest, 84 | Basics.ApiVersion.V1, null); 85 | } 86 | 87 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 88 | { 89 | Basics.SerializeString(stream, ConsumerGroupId); 90 | Basics.WriteArray(stream, TopicsData); 91 | } 92 | } 93 | 94 | #endregion 95 | } 96 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/GroupCoordinationResponses.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | struct GroupCoordinatorResponse : IMemoryStreamSerializable 9 | { 10 | public ErrorCode ErrorCode; 11 | public int CoordinatorId; 12 | public string CoordinatorHost; 13 | public int CoordinatorPort; 14 | 15 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 16 | { 17 | BigEndianConverter.Write(stream, (short) ErrorCode); 18 | BigEndianConverter.Write(stream, CoordinatorId); 19 | Basics.SerializeString(stream, CoordinatorHost); 20 | BigEndianConverter.Write(stream, CoordinatorPort); 21 | } 22 | 23 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 24 | { 25 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 26 | CoordinatorId = BigEndianConverter.ReadInt32(stream); 27 | CoordinatorHost = Basics.DeserializeString(stream); 28 | CoordinatorPort = BigEndianConverter.ReadInt32(stream); 29 | } 30 | } 31 | 32 | struct PartitionCommitData : IMemoryStreamSerializable 33 | { 34 | public int Partition; 35 | public ErrorCode ErrorCode; 36 | 37 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 38 | { 39 | BigEndianConverter.Write(stream, Partition); 40 | BigEndianConverter.Write(stream, (short) ErrorCode); 41 | } 42 | 43 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 44 | { 45 | Partition = BigEndianConverter.ReadInt32(stream); 46 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 47 | } 48 | } 49 | 50 | struct PartitionOffsetData : IMemoryStreamSerializable 51 | { 52 | public int Partition; 53 | public long Offset; 54 | public string Metadata; 55 | public ErrorCode ErrorCode; 56 | 57 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 58 | { 59 | BigEndianConverter.Write(stream, Partition); 60 | BigEndianConverter.Write(stream, Offset); 61 | Basics.SerializeString(stream, Metadata); 62 | BigEndianConverter.Write(stream, (short) ErrorCode); 63 | } 64 | 65 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 66 | { 67 | Partition = BigEndianConverter.ReadInt32(stream); 68 | Offset = BigEndianConverter.ReadInt64(stream); 69 | Metadata = Basics.DeserializeString(stream); 70 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/KafkaLz4.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.IO; 5 | using Kafka.Common; 6 | using LZ4; 7 | 8 | namespace Kafka.Protocol 9 | { 10 | /// 11 | /// Compression/decompression of data following the LZ4 frame format: 12 | /// https://github.com/lz4/lz4/wiki/lz4_Frame_format.md 13 | /// 14 | /// Kafka uses version 1, no block checksum, no block dependency and 64Kb blocks. 15 | /// 16 | /// 17 | static class KafkaLz4 18 | { 19 | /// 20 | /// Harcoded frame descriptor (configuration is static). 21 | /// 22 | private static readonly byte[] FrameDescriptor = 23 | { 24 | 0x04, 0x22, 0x4D, 0x18, // magic number 25 | 1 << 6 | 1 << 5, // version 1 + block independance 26 | 1 << 6, // 64Kb blocks 27 | 0x82 // second byte of 32bits xxhash checksum of the two previous bytes (see: https://asecuritysite.com/encryption/xxHash and hash "`@" :) ) 28 | }; 29 | 30 | private static readonly int[] MaxBlockSizes = {0, 0, 0, 0, 64*1024, 256*1024, 1024*1024, 4*1024*1024}; 31 | 32 | private static readonly int MaxCompressedSize; 33 | 34 | private const int BLOCK_SIZE = 64 * 1024; 35 | 36 | static KafkaLz4() 37 | { 38 | MaxCompressedSize = LZ4Codec.MaximumOutputLength(BLOCK_SIZE); 39 | } 40 | 41 | public static void Compress(ReusableMemoryStream target, byte[] body, int count) 42 | { 43 | target.Write(FrameDescriptor, 0, FrameDescriptor.Length); 44 | 45 | // Blocks 46 | var left = count; 47 | while (left >= BLOCK_SIZE) 48 | { 49 | BlockCompress(target, body, count - left, BLOCK_SIZE); 50 | left -= BLOCK_SIZE; 51 | } 52 | 53 | // Last block if any 54 | if (left > 0) 55 | { 56 | BlockCompress(target, body, count - left, left); 57 | } 58 | 59 | // EndMark 60 | target.Write(Basics.Zero32, 0, Basics.Zero32.Length); 61 | target.SetLength(target.Position); 62 | } 63 | 64 | private static void BlockCompress(ReusableMemoryStream target, byte[] body, int offset, int count) 65 | { 66 | var position = (int) target.Position; 67 | target.SetLength(target.Length + MaxCompressedSize + 4); 68 | target.Position = position + 4; 69 | 70 | var size = LZ4Codec.Encode(body, offset, count, target.GetBuffer(), position + 4, MaxCompressedSize); 71 | 72 | if (size >= count) 73 | { 74 | // Do not compress block 75 | // => set block header highest bit to 1 to mark no compression 76 | LittleEndianWriteUInt32((uint)(count | 1 << 31), target.GetBuffer(), position); 77 | 78 | // Write uncompressed data 79 | target.Write(body, offset, count); 80 | } 81 | else 82 | { 83 | LittleEndianWriteUInt32((uint)size, target.GetBuffer(), position); 84 | 85 | // compressed data is already written, just set the position 86 | target.Position += size; 87 | } 88 | target.SetLength(target.Position); 89 | } 90 | 91 | public static void Uncompress(ReusableMemoryStream target, byte[] body, int offset) 92 | { 93 | // 1. Check magic number 94 | var magic = LittleEndianReadUInt32(body, offset); 95 | if (magic != 0x184D2204) 96 | { 97 | throw new InvalidDataException("Incorrect LZ4 magic number."); 98 | } 99 | 100 | // 2. FLG 101 | var flg = body[offset + 4]; 102 | if (flg >> 6 != 1) // version 103 | { 104 | throw new InvalidDataException("Invalid LZ4 version."); 105 | } 106 | 107 | var hasBlockChecksum = (flg >> 4 & 1) != 0; 108 | var hasContentSize = (flg >> 3 & 1) != 0; 109 | var hasContentChecksum = (flg >> 2 & 1) != 0; // we don't care anyway 110 | 111 | // 3. BD 112 | var bd = body[offset + 5]; 113 | var maxBlockSize = MaxBlockSizes[(bd >> 4) & 7]; 114 | 115 | // 4. Let's decompress! 116 | var dataStartIdx = offset + 4 + (hasContentSize ? 11 : 3); 117 | uint walked; 118 | while ((walked = UncompressBlock(target, body, dataStartIdx, hasBlockChecksum, maxBlockSize)) > 0) 119 | { 120 | dataStartIdx += (int) walked; 121 | } 122 | } 123 | 124 | private static uint UncompressBlock(ReusableMemoryStream target, byte[] body, int dataIndex, bool hasChecksum, int blockSize) 125 | { 126 | var blockHeader = LittleEndianReadUInt32(body, dataIndex); 127 | if (blockHeader == 0) // last frame 128 | { 129 | return 0; 130 | } 131 | 132 | var size = blockHeader & 0x7FFFFFFF; 133 | if ((blockHeader & 0x80000000) == 0) // compressed data 134 | { 135 | target.SetLength(target.Length + blockSize); 136 | var dsize = LZ4Codec.Decode(body, dataIndex + 4, (int) size, target.GetBuffer(), (int) target.Position, blockSize); 137 | if (dsize < blockSize) 138 | { 139 | target.SetLength(target.Length - blockSize + dsize); 140 | } 141 | target.Position = target.Length; 142 | } 143 | else // uncompressed data 144 | { 145 | target.Write(body, dataIndex + 4, (int) size); 146 | } 147 | 148 | return size + 4 + (hasChecksum ? 4u : 0); 149 | } 150 | 151 | private static void LittleEndianWriteUInt32(uint u, byte[] target, int offset) 152 | { 153 | target[offset + 0] = (byte) (u >> 8*0 & 0xff); 154 | target[offset + 1] = (byte) (u >> 8*1 & 0xff); 155 | target[offset + 2] = (byte) (u >> 8*2 & 0xff); 156 | target[offset + 3] = (byte) (u >> 8*3 & 0xff); 157 | } 158 | 159 | private static uint LittleEndianReadUInt32(byte[] source, int offset) 160 | { 161 | return 162 | (uint) 163 | (source[offset + 3] << 3*8 | source[offset + 2] << 2*8 | source[offset + 1] << 8 | source[offset]); 164 | } 165 | } 166 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/Message.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using Kafka.Cluster; 7 | using Kafka.Common; 8 | using Kafka.Public; 9 | 10 | namespace Kafka.Protocol 11 | { 12 | internal enum MessageVersion 13 | { 14 | V0 = 0, 15 | V1 = 1, 16 | // Message V2 is implemented in RecordBatch and shall not be used here. 17 | } 18 | 19 | internal struct Message 20 | { 21 | public object Key; 22 | public object Value; 23 | public ICollection Headers; 24 | public long TimeStamp; 25 | 26 | // Visible for tests 27 | internal ReusableMemoryStream SerializedKeyValue; 28 | 29 | private const int MinimumValidSizeForSerializedKeyValue = 2 * 4; // At least 4 bytes for key size and 4 bytes for value size 30 | 31 | public void SerializeKeyValue(ReusableMemoryStream target, Tuple serializers, Compatibility compatibility) 32 | { 33 | SerializedKeyValue = target; 34 | if (Basics.GetApiVersion(Node.RequestType.BatchedProduce, compatibility) >= Basics.ApiVersion.V3) 35 | { 36 | DoSerializeKeyValueAsRecord(SerializedKeyValue, serializers); 37 | } 38 | else 39 | { 40 | DoSerializeKeyValue(SerializedKeyValue, serializers); 41 | } 42 | Value = null; 43 | } 44 | 45 | public void ReleaseSerializedKeyValue() 46 | { 47 | // Make sure that the buffer cannot be disposed twice (not good for buffer pooling) 48 | SerializedKeyValue?.Dispose(); 49 | SerializedKeyValue = null; 50 | } 51 | 52 | public void Serialize(ReusableMemoryStream stream, CompressionCodec compressionCodec, 53 | Tuple serializers, MessageVersion msgVersion) 54 | { 55 | var crcPos = stream.Position; 56 | stream.Write(Basics.MinusOne32, 0, 4); // crc placeholder 57 | var bodyPos = stream.Position; 58 | 59 | // V0 message format 60 | if (msgVersion == MessageVersion.V0) 61 | { 62 | stream.WriteByte(0); // magic byte 63 | stream.WriteByte((byte) compressionCodec); // attributes 64 | } 65 | else // V1 message format 66 | { 67 | stream.WriteByte(1); // magic byte 68 | stream.WriteByte((byte) compressionCodec); // attributes 69 | BigEndianConverter.Write(stream, TimeStamp); 70 | } 71 | 72 | if (SerializedKeyValue != null) 73 | { 74 | if (SerializedKeyValue.Length < MinimumValidSizeForSerializedKeyValue) 75 | { 76 | HandleInvalidSerializedKeyValue(stream); 77 | } 78 | else 79 | { 80 | stream.Write(SerializedKeyValue.GetBuffer(), 0, (int) SerializedKeyValue.Length); 81 | } 82 | } 83 | else 84 | { 85 | DoSerializeKeyValue(stream, serializers); 86 | } 87 | 88 | // update crc 89 | var crc = Crc32.Compute(stream, bodyPos, stream.Position - bodyPos); 90 | var curPos = stream.Position; 91 | stream.Position = crcPos; 92 | BigEndianConverter.Write(stream, (int) crc); 93 | stream.Position = curPos; 94 | } 95 | 96 | private void HandleInvalidSerializedKeyValue(ReusableMemoryStream stream) 97 | { 98 | stream.Logger?.LogError("Invalid SerializedKeyValue. Length is only " + SerializedKeyValue.Length 99 | + " bytes. Message cannot be serialized : " + SerializedKeyValue.GetBuffer()); 100 | 101 | // Simulate an empty key & message to not send a corrupted message 102 | stream.Write(Basics.MinusOne32, 0, 4); 103 | stream.Write(Basics.MinusOne32, 0, 4); 104 | } 105 | 106 | private void DoSerializeKeyValueAsRecord(ReusableMemoryStream stream, Tuple serializers) 107 | { 108 | Basics.WriteObject(stream, Key, serializers.Item1); 109 | Basics.WriteObject(stream, Value, serializers.Item2); 110 | } 111 | 112 | private void DoSerializeKeyValue(ReusableMemoryStream stream, Tuple serializers) 113 | { 114 | if (Key == null) 115 | { 116 | stream.Write(Basics.MinusOne32, 0, 4); 117 | } 118 | else 119 | { 120 | SerializeObject(stream, serializers.Item1, Key); 121 | } 122 | 123 | if (Value == null) 124 | { 125 | stream.Write(Basics.MinusOne32, 0, 4); 126 | } 127 | else 128 | { 129 | SerializeObject(stream, serializers.Item2, Value); 130 | } 131 | } 132 | 133 | private static void SerializeObject(ReusableMemoryStream stream, ISerializer serializer, object theValue) 134 | { 135 | // byte[] are just copied 136 | if (theValue is byte[] bytes) 137 | { 138 | BigEndianConverter.Write(stream, bytes.Length); 139 | stream.Write(bytes, 0, bytes.Length); 140 | } 141 | else 142 | { 143 | Basics.WriteWithSize(stream, theValue, serializer, SerializerWrite); 144 | } 145 | } 146 | 147 | private static void SerializerWrite(ReusableMemoryStream stream, object m, ISerializer ser) 148 | { 149 | if (m is IMemorySerializable serializable) 150 | { 151 | serializable.Serialize(stream); 152 | } 153 | else 154 | { 155 | ser.Serialize(m, stream); 156 | } 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/Metadata.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | class BrokerMeta : IMemoryStreamSerializable 9 | { 10 | public string Host; 11 | public int Port; 12 | public int Id; 13 | 14 | public override string ToString() 15 | { 16 | return string.Format("(Id:{0} Host:{1} Port:{2})", Id, Host, Port); 17 | } 18 | 19 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 20 | { 21 | Id = BigEndianConverter.ReadInt32(stream); 22 | Host = Basics.DeserializeString(stream); 23 | Port = BigEndianConverter.ReadInt32(stream); 24 | } 25 | 26 | // Used only in tests 27 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 28 | { 29 | BigEndianConverter.Write(stream, Id); 30 | Basics.SerializeString(stream, Host); 31 | BigEndianConverter.Write(stream, Port); 32 | } 33 | } 34 | 35 | class TopicMeta : IMemoryStreamSerializable 36 | { 37 | public ErrorCode ErrorCode; 38 | public string TopicName; 39 | public PartitionMeta[] Partitions; 40 | 41 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 42 | { 43 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 44 | TopicName = Basics.DeserializeString(stream); 45 | Partitions = Basics.DeserializeArray(stream); 46 | } 47 | 48 | // Used only in tests 49 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 50 | { 51 | BigEndianConverter.Write(stream, (short) ErrorCode); 52 | Basics.SerializeString(stream, TopicName); 53 | Basics.WriteArray(stream, Partitions); 54 | } 55 | } 56 | 57 | class PartitionMeta : IMemoryStreamSerializable 58 | { 59 | public ErrorCode ErrorCode; 60 | public int Id; 61 | public int Leader; 62 | public int[] Replicas; 63 | public int[] Isr; 64 | 65 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 66 | { 67 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 68 | Id = BigEndianConverter.ReadInt32(stream); 69 | Leader = BigEndianConverter.ReadInt32(stream); 70 | Replicas = Basics.DeserializeArray(stream, BigEndianConverter.ReadInt32); 71 | Isr = Basics.DeserializeArray(stream, BigEndianConverter.ReadInt32); 72 | } 73 | 74 | // Used only in tests 75 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 76 | { 77 | BigEndianConverter.Write(stream, (short) ErrorCode); 78 | BigEndianConverter.Write(stream, Id); 79 | BigEndianConverter.Write(stream, Leader); 80 | Basics.WriteArray(stream, Replicas, BigEndianConverter.Write); 81 | Basics.WriteArray(stream, Isr, BigEndianConverter.Write); 82 | } 83 | } 84 | 85 | 86 | } 87 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/MetadataResponse.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | class MetadataResponse 9 | { 10 | public BrokerMeta[] BrokersMeta; 11 | public TopicMeta[] TopicsMeta; 12 | 13 | public static MetadataResponse Deserialize(ReusableMemoryStream stream, object noextra) 14 | { 15 | return new MetadataResponse 16 | { 17 | BrokersMeta = Basics.DeserializeArray(stream), 18 | TopicsMeta = Basics.DeserializeArray(stream) 19 | }; 20 | } 21 | 22 | // Used only in tests 23 | public void Serialize(ReusableMemoryStream stream, object noextra) 24 | { 25 | Basics.WriteArray(stream, BrokersMeta, noextra, Basics.ApiVersion.Ignored); 26 | Basics.WriteArray(stream, TopicsMeta, noextra, Basics.ApiVersion.Ignored); 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/OffsetRequest.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.Collections.Generic; 5 | using Kafka.Common; 6 | 7 | namespace Kafka.Protocol 8 | { 9 | struct OffsetRequest : ISerializableRequest 10 | { 11 | public IEnumerable> TopicsData; 12 | 13 | #region Serialization 14 | 15 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion version) 16 | { 17 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.OffsetRequest, version, null); 18 | } 19 | 20 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 21 | { 22 | stream.Write(Basics.MinusOne32, 0, 4); // ReplicaId, non clients that are not a broker must use -1 23 | Basics.WriteArray(stream, TopicsData, version); 24 | } 25 | 26 | #endregion 27 | } 28 | 29 | struct OffsetPartitionData : IMemoryStreamSerializable 30 | { 31 | public int Partition; 32 | public int MaxNumberOfOffsets; 33 | public long Time; 34 | 35 | #region Serialization 36 | 37 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 38 | { 39 | BigEndianConverter.Write(stream, Partition); 40 | BigEndianConverter.Write(stream, Time); 41 | if (version == Basics.ApiVersion.V0) 42 | { 43 | BigEndianConverter.Write(stream, MaxNumberOfOffsets); 44 | } 45 | } 46 | 47 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 48 | { 49 | Partition = BigEndianConverter.ReadInt32(stream); 50 | Time = BigEndianConverter.ReadInt64(stream); 51 | if (version == Basics.ApiVersion.V0) 52 | { 53 | MaxNumberOfOffsets = BigEndianConverter.ReadInt32(stream); 54 | } 55 | } 56 | 57 | #endregion 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/OffsetResponse.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | struct OffsetPartitionResponse : IMemoryStreamSerializable 9 | { 10 | public int Partition; 11 | public ErrorCode ErrorCode; 12 | public long Timestamp; 13 | public long[] Offsets; 14 | 15 | #region IMemoryStreamSerializable Members 16 | 17 | // Used only in tests 18 | public void Serialize(ReusableMemoryStream stream, object noextra, Basics.ApiVersion version) 19 | { 20 | BigEndianConverter.Write(stream, Partition); 21 | BigEndianConverter.Write(stream, (short) ErrorCode); 22 | if (version > Basics.ApiVersion.V0) 23 | { 24 | BigEndianConverter.Write(stream, Timestamp); 25 | BigEndianConverter.Write(stream, Offsets[0]); 26 | } 27 | else 28 | { 29 | Basics.WriteArray(stream, Offsets, BigEndianConverter.Write); 30 | } 31 | } 32 | 33 | public void Deserialize(ReusableMemoryStream stream, object noextra, Basics.ApiVersion version) 34 | { 35 | Partition = BigEndianConverter.ReadInt32(stream); 36 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 37 | Timestamp = -1; 38 | if (version > Basics.ApiVersion.V0) 39 | { 40 | Timestamp = BigEndianConverter.ReadInt64(stream); 41 | Offsets = new long[1]; 42 | Offsets[0] = BigEndianConverter.ReadInt64(stream); 43 | } 44 | else 45 | { 46 | Offsets = Basics.DeserializeArray(stream, BigEndianConverter.ReadInt64); 47 | } 48 | } 49 | 50 | #endregion 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/ProduceRequest.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using System.Linq; 7 | using Kafka.Common; 8 | using Kafka.Public; 9 | 10 | namespace Kafka.Protocol 11 | { 12 | using Serializers = Tuple; 13 | 14 | class ProduceRequest : ISerializableRequest 15 | { 16 | public IEnumerable> TopicsData; 17 | public int Timeout; 18 | public short RequiredAcks; 19 | public string TransactionalID; 20 | 21 | #region Serialization 22 | 23 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object extra, Basics.ApiVersion version) 24 | { 25 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.ProduceRequest, version, extra); 26 | } 27 | 28 | public void SerializeBody(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 29 | { 30 | if (version >= Basics.ApiVersion.V3) 31 | { 32 | Basics.SerializeString(stream, TransactionalID); 33 | } 34 | BigEndianConverter.Write(stream, RequiredAcks); 35 | BigEndianConverter.Write(stream, Timeout); 36 | Basics.WriteArray(stream, TopicsData, extra, version); 37 | } 38 | 39 | #endregion 40 | } 41 | 42 | class PartitionData : IMemoryStreamSerializable 43 | { 44 | public IEnumerable Messages; 45 | public int Partition; 46 | public CompressionCodec CompressionCodec; 47 | 48 | #region Serialization 49 | 50 | struct SerializationInfo 51 | { 52 | public Serializers Serializers; 53 | public CompressionCodec CompressionCodec; 54 | public MessageVersion MessageVersion; 55 | } 56 | 57 | public void Serialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 58 | { 59 | BigEndianConverter.Write(stream, Partition); 60 | 61 | if (version >= Basics.ApiVersion.V3) 62 | { 63 | ISerializer keySerializer = null, valueSerializer = null; 64 | if (extra is Serializers asSerializers) 65 | { 66 | keySerializer = asSerializers.Item1; 67 | valueSerializer = asSerializers.Item2; 68 | } 69 | 70 | SerializeRecordBatch(stream, keySerializer, valueSerializer); 71 | } 72 | else 73 | { 74 | SerializeMessageSet(stream, extra as Serializers, version >= Basics.ApiVersion.V2 ? MessageVersion.V1 : MessageVersion.V0); 75 | } 76 | } 77 | 78 | private void SerializeRecordBatch(ReusableMemoryStream stream, ISerializer keySerializer, 79 | ISerializer valueSerializer) 80 | { 81 | // Starting Produce request V3, messages are encoded in the new RecordBatch. 82 | var batch = new RecordBatch 83 | { 84 | CompressionCodec = CompressionCodec, 85 | Records = Messages.Select(message => 86 | { 87 | SerializeMessageIfNotSized(ref message, keySerializer, valueSerializer, stream.Pool); 88 | return new Record 89 | { 90 | Key = message.Key, 91 | Value = message.Value, 92 | Headers = message.Headers, 93 | Timestamp = message.TimeStamp, 94 | // If the serializer is not compatible, we already resolved this 95 | // previously, so it's ok if the cast returns null 96 | KeySerializer = keySerializer as ISizableSerializer, 97 | ValueSerializer = valueSerializer as ISizableSerializer, 98 | SerializedKeyValue = message.SerializedKeyValue, 99 | }; 100 | }), 101 | }; 102 | 103 | Basics.WriteWithSize(stream, batch.Serialize); 104 | } 105 | 106 | private static void SerializeMessageIfNotSized(ref Message msg, ISerializer keySerializer, ISerializer valueSerializer, Pool pool) 107 | { 108 | if (msg.SerializedKeyValue == null 109 | && (Basics.SizeOfSerializedObject(msg.Key, keySerializer as ISizableSerializer) == Basics.UnknownSize 110 | || Basics.SizeOfSerializedObject(msg.Value, valueSerializer as ISizableSerializer) == Basics.UnknownSize)) 111 | { 112 | msg.SerializeKeyValue(pool.Reserve(), new Serializers(keySerializer, valueSerializer), Compatibility.V0_11_0); 113 | } 114 | } 115 | 116 | private void SerializeMessageSet(ReusableMemoryStream stream, Serializers serializers, MessageVersion version) 117 | { 118 | Basics.WriteWithSize(stream, Messages, 119 | new SerializationInfo 120 | { 121 | Serializers = serializers, 122 | CompressionCodec = CompressionCodec, 123 | MessageVersion = version 124 | }, SerializeMessages); 125 | } 126 | 127 | private static void SerializeMessagesUncompressed(ReusableMemoryStream stream, IEnumerable messages, 128 | Serializers serializers, MessageVersion msgVersion) 129 | { 130 | long offset = 0; 131 | foreach (var message in messages) 132 | { 133 | // We always set offsets starting from 0 and increasing by one for each consecutive message. 134 | // This is because in compressed messages, when message format is V1, the brokers 135 | // will follow this format on disk. You're expected to do the same if you want to 136 | // avoid offset reassignment and message recompression. 137 | // When message format is V0, brokers will rewrite the offsets anyway 138 | // so we use the same scheme in all cases. 139 | BigEndianConverter.Write(stream, offset++); 140 | Basics.WriteWithSize(stream, message, 141 | new SerializationInfo 142 | { 143 | CompressionCodec = CompressionCodec.None, 144 | Serializers = serializers, 145 | MessageVersion = msgVersion 146 | }, SerializeMessageWithCodec); 147 | } 148 | } 149 | 150 | // Dumb trick to minimize closure allocations 151 | private static readonly Action, SerializationInfo> SerializeMessages = 152 | _SerializeMessages; 153 | 154 | // Dumb trick to minimize closure allocations 155 | private static readonly Action SerializeMessageWithCodec = 156 | _SerializeMessageWithCodec; 157 | 158 | private static void _SerializeMessages(ReusableMemoryStream stream, IEnumerable messages, SerializationInfo info) 159 | { 160 | if (info.CompressionCodec != CompressionCodec.None) 161 | { 162 | stream.Write(Basics.Zero64, 0, 8); 163 | using (var msgsetStream = stream.Pool.Reserve()) 164 | { 165 | SerializeMessagesUncompressed(msgsetStream, messages, info.Serializers, info.MessageVersion); 166 | 167 | using (var compressed = stream.Pool.Reserve()) 168 | { 169 | Basics.CompressStream(msgsetStream, compressed, info.CompressionCodec); 170 | 171 | var m = new Message 172 | { 173 | Value = compressed, 174 | TimeStamp = Timestamp.Now 175 | }; 176 | Basics.WriteWithSize(stream, m, 177 | new SerializationInfo 178 | { 179 | Serializers = SerializationConfig.ByteArraySerializers, 180 | CompressionCodec = info.CompressionCodec, 181 | MessageVersion = info.MessageVersion 182 | }, SerializeMessageWithCodec); 183 | } 184 | } 185 | } 186 | else 187 | { 188 | SerializeMessagesUncompressed(stream, messages, info.Serializers, info.MessageVersion); 189 | } 190 | } 191 | 192 | private static void _SerializeMessageWithCodec(ReusableMemoryStream stream, Message message, SerializationInfo info) 193 | { 194 | message.Serialize(stream, info.CompressionCodec, info.Serializers, info.MessageVersion); 195 | } 196 | 197 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 198 | { 199 | throw new NotImplementedException(); 200 | } 201 | 202 | #endregion 203 | } 204 | } 205 | 206 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/ProduceResponse.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | struct ProduceResponse : IMemoryStreamSerializable 9 | { 10 | public int ThrottleTime; 11 | public CommonResponse ProducePartitionResponse; 12 | 13 | public void Serialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 14 | { 15 | ProducePartitionResponse.Serialize(stream, extra, version); 16 | if (version > Basics.ApiVersion.V0) 17 | { 18 | BigEndianConverter.Write(stream, ThrottleTime); 19 | } 20 | } 21 | 22 | public void Deserialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 23 | { 24 | ProducePartitionResponse.Deserialize(stream, extra, version); 25 | if (version > Basics.ApiVersion.V0) 26 | { 27 | ThrottleTime = BigEndianConverter.ReadInt32(stream); 28 | } 29 | } 30 | } 31 | 32 | struct ProducePartitionResponse : IMemoryStreamSerializable 33 | { 34 | public int Partition; 35 | public ErrorCode ErrorCode; 36 | public long Offset; 37 | public long Timestamp; 38 | public long LogStartOffset; 39 | 40 | // Used only in tests 41 | public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 42 | { 43 | BigEndianConverter.Write(stream, Partition); 44 | BigEndianConverter.Write(stream, (short) ErrorCode); 45 | BigEndianConverter.Write(stream, Offset); 46 | if (version >= Basics.ApiVersion.V2) 47 | { 48 | BigEndianConverter.Write(stream, Timestamp); 49 | } 50 | if (version >= Basics.ApiVersion.V5) 51 | { 52 | BigEndianConverter.Write(stream, LogStartOffset); 53 | } 54 | } 55 | 56 | public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion version) 57 | { 58 | Partition = BigEndianConverter.ReadInt32(stream); 59 | ErrorCode = (ErrorCode) BigEndianConverter.ReadInt16(stream); 60 | Offset = BigEndianConverter.ReadInt64(stream); 61 | if (version >= Basics.ApiVersion.V2) 62 | { 63 | Timestamp = BigEndianConverter.ReadInt64(stream); 64 | } 65 | if (version >= Basics.ApiVersion.V5) 66 | { 67 | LogStartOffset = BigEndianConverter.ReadInt64(stream); 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/ProtocolException.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using Kafka.Public; 3 | 4 | namespace Kafka.Protocol 5 | { 6 | class ProtocolException : Exception 7 | { 8 | // Fields to identify the faulty message (useful in case the exception stops a batch) 9 | internal string Topic; 10 | internal int Partition; 11 | 12 | public ProtocolException(string message) : base(message) 13 | { 14 | } 15 | 16 | public ProtocolException(string message, Exception innerException) : base(message, innerException) 17 | { 18 | } 19 | } 20 | 21 | // Crc mismatch 22 | class CrcException : ProtocolException 23 | { 24 | public CrcException(string message) 25 | : base(message) 26 | { 27 | } 28 | } 29 | 30 | // Wrong compressed data 31 | class UncompressException : ProtocolException 32 | { 33 | public CompressionCodec Codec { get; internal set; } 34 | 35 | public UncompressException(string message, CompressionCodec codec, Exception ex) 36 | : base(message, ex) 37 | { 38 | Codec = codec; 39 | } 40 | } 41 | 42 | // Wrong message version 43 | class UnsupportedMagicByteVersion : ProtocolException 44 | { 45 | public UnsupportedMagicByteVersion(byte badMagic, string supported) 46 | : base($"Unsupported magic byte version: {badMagic}, only {supported} is supported") 47 | { 48 | } 49 | } 50 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/TopicData.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System.Collections.Generic; 5 | using Kafka.Common; 6 | using Kafka.Public; 7 | 8 | namespace Kafka.Protocol 9 | { 10 | struct TopicData : IMemoryStreamSerializable where TPartitionData : IMemoryStreamSerializable, new() 11 | { 12 | public string TopicName; 13 | public IEnumerable PartitionsData; 14 | 15 | #region Serialization 16 | 17 | public void Serialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 18 | { 19 | Basics.SerializeString(stream, TopicName); 20 | object pdExtra = null; 21 | if (extra != null) 22 | { 23 | var config = extra as SerializationConfig; 24 | pdExtra = config.GetSerializersForTopic(TopicName); 25 | } 26 | Basics.WriteArray(stream, PartitionsData, pdExtra, version); 27 | } 28 | 29 | public void Deserialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) 30 | { 31 | TopicName = Basics.DeserializeString(stream); 32 | var count = BigEndianConverter.ReadInt32(stream); 33 | var array = new TPartitionData[count]; 34 | object pdExtra = null; 35 | if (extra != null) 36 | { 37 | var config = extra as SerializationConfig; 38 | pdExtra = config.GetDeserializersForTopic(TopicName); 39 | } 40 | try 41 | { 42 | for (int i = 0; i < count; ++i) 43 | { 44 | array[i] = new TPartitionData(); 45 | array[i].Deserialize(stream, pdExtra, version); 46 | } 47 | PartitionsData = array; 48 | } 49 | catch (ProtocolException pEx) 50 | { 51 | pEx.Topic = TopicName; 52 | throw; 53 | } 54 | } 55 | 56 | #endregion 57 | } 58 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Protocol/TopicRequest.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using Kafka.Common; 5 | 6 | namespace Kafka.Protocol 7 | { 8 | struct TopicRequest : ISerializableRequest 9 | { 10 | public string[] Topics; 11 | 12 | #region Serialization 13 | 14 | public ReusableMemoryStream Serialize(ReusableMemoryStream target, int correlationId, byte[] clientId, object _, Basics.ApiVersion __) 15 | { 16 | return CommonRequest.Serialize(target, this, correlationId, clientId, Basics.ApiKey.MetadataRequest, Basics.ApiVersion.V0, null); 17 | } 18 | 19 | public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) 20 | { 21 | if (Topics == null || Topics.Length == 0) 22 | { 23 | stream.Write(Basics.Zero32, 0, 4); 24 | } 25 | else 26 | { 27 | BigEndianConverter.Write(stream, Topics.Length); 28 | foreach (var t in Topics) 29 | Basics.SerializeString(stream, t); 30 | } 31 | } 32 | 33 | #endregion 34 | } 35 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/Exceptions.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | 6 | namespace Kafka.Public 7 | { 8 | public class KafkaGroupCoordinatorAuthorizationException : Exception 9 | { 10 | public string GroupId { get; internal set; } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/ILogger.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | namespace Kafka.Public 5 | { 6 | public interface ILogger 7 | { 8 | void LogInformation(string message); 9 | void LogWarning(string message); 10 | void LogError(string message); 11 | void LogDebug(string message); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/KafkaRecord.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | 6 | namespace Kafka.Public 7 | { 8 | /// 9 | /// Partition Id magic values 10 | /// 11 | public static class Partitions 12 | { 13 | public const int None = -1; 14 | public const int All = -2; 15 | public const int Any = -3; 16 | 17 | /// 18 | /// Format partition in a printable way 19 | /// 20 | public static string Format(int partition) 21 | { 22 | switch (partition) 23 | { 24 | case None: 25 | return "none"; 26 | case All: 27 | return "all"; 28 | case Any: 29 | return "any"; 30 | default: 31 | return partition.ToString(); 32 | } 33 | } 34 | } 35 | 36 | /// 37 | /// A typed Kafka record. This is a struct because it just encapsulates 38 | /// a RawKafkaRecord. 39 | /// 40 | /// Key type, this must be a reference type 41 | /// Value type, this must be a reference type 42 | public struct KafkaRecord where TKey : class where TValue : class 43 | { 44 | internal RawKafkaRecord Record { get; set; } 45 | 46 | /// 47 | /// The topic of the record. 48 | /// 49 | public string Topic 50 | { 51 | get { return Record.Topic; } 52 | } 53 | 54 | /// 55 | /// The key part of the message. Will be null if there is 56 | /// no key (which is often the case). 57 | /// 58 | public TKey Key 59 | { 60 | get { return Record.Key as TKey; } 61 | } 62 | 63 | /// 64 | /// The value part of the message. 65 | /// 66 | public TValue Value 67 | { 68 | get { return Record.Value as TValue; } 69 | } 70 | 71 | /// 72 | /// The offset of the message in its partition. You may use this 73 | /// to save the state of what you have read. 74 | /// 75 | public long Offset 76 | { 77 | get { return Record.Offset; } 78 | } 79 | 80 | /// 81 | /// The distance to the end of partition offset. 82 | /// 83 | public long Lag { get { return Record.Lag; } } 84 | 85 | /// 86 | /// The partition the message belongs to inside its topic. 87 | /// 88 | public int Partition 89 | { 90 | get { return Record.Partition; } 91 | } 92 | 93 | /// 94 | /// Timestamp of the message. If using 0.8.2 compatibility mode, this 95 | /// is always set to Epoch (1970/01/01 00:00:00 UTC). 96 | /// 97 | public DateTime Timestamp { get { return Record.Timestamp; } } 98 | } 99 | 100 | /// 101 | /// A Kafka record, as got from consuming a topic. This is 102 | /// what is returned by the consumer. 103 | /// 104 | public class RawKafkaRecord 105 | { 106 | public RawKafkaRecord() { } 107 | 108 | // For testing purpose 109 | public RawKafkaRecord(string topic = null, object key = null, object value = null, long offset = 0, 110 | long lag = 0, int partition = 0, DateTime? timestamp = null) 111 | { 112 | Topic = topic; 113 | Key = key; 114 | Value = value; 115 | Offset = offset; 116 | Lag = lag; 117 | Partition = partition; 118 | Timestamp = timestamp.GetValueOrDefault(DateTime.MinValue); 119 | } 120 | 121 | /// 122 | /// The topic of the record. 123 | /// 124 | public string Topic { get; internal set; } 125 | 126 | /// 127 | /// The key part of the message. Will be null if there is 128 | /// no key (which is often the case). 129 | /// 130 | public object Key { get; internal set; } 131 | 132 | /// 133 | /// The value part of the message. 134 | /// 135 | public object Value { get; internal set; } 136 | 137 | /// 138 | /// The offset of the message in its partition. You may use this 139 | /// to save the state of what you have read. 140 | /// 141 | public long Offset { get; internal set; } 142 | 143 | /// 144 | /// The distance to the end of partition offset. 145 | /// 146 | public long Lag { get; internal set; } 147 | 148 | /// 149 | /// The partition the message belongs to inside its topic. 150 | /// 151 | public int Partition { get; internal set; } 152 | 153 | /// 154 | /// Timestamp of the message. If using 0.8.2 compatibility mode, this 155 | /// is always set to Epoch (1970/01/01 00:00:00 UTC). 156 | /// 157 | public DateTime Timestamp { get; internal set; } 158 | } 159 | 160 | /// 161 | /// Header in a record, as got from consuming a topic. 162 | /// An header is only available for records encoded using the "Message format" V3 163 | /// or more (starting kafka 0.11). 164 | /// 165 | public struct KafkaRecordHeader 166 | { 167 | /// 168 | /// Key of the header. 169 | /// 170 | public string Key { get; set; } 171 | 172 | /// 173 | /// Raw value of the header. 174 | /// 175 | public byte[] Value { get; set; } 176 | 177 | /// 178 | /// Throws an exception if the values of the objects are invalid. 179 | /// In particular, both the key and value must not be null. 180 | /// 181 | public void Validate() 182 | { 183 | if (Key == null) 184 | { 185 | throw new ArgumentNullException(nameof(Key)); 186 | } 187 | 188 | if (Value == null) 189 | { 190 | throw new ArgumentNullException(nameof(Value)); 191 | } 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/Loggers/ConsoleLogger.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | 7 | namespace Kafka.Public.Loggers 8 | { 9 | public class ConsoleLogger : ILogger 10 | { 11 | private static void LogHeader(string header) 12 | { 13 | Console.Write("[{0:yyyy-MM-dd HH:mm:ss}] {1} ", DateTime.UtcNow, header); 14 | } 15 | 16 | public void LogInformation(string message) 17 | { 18 | LogHeader("INFO"); 19 | Console.WriteLine(message); 20 | } 21 | 22 | public void LogWarning(string message) 23 | { 24 | Console.ForegroundColor = ConsoleColor.Yellow; 25 | LogHeader("WARNING"); 26 | Console.ResetColor(); 27 | Console.WriteLine(message); 28 | } 29 | 30 | public void LogError(string message) 31 | { 32 | Console.ForegroundColor = ConsoleColor.Red; 33 | LogHeader("ERROR"); 34 | Console.ResetColor(); 35 | Console.WriteLine(message); 36 | } 37 | 38 | public void LogDebug(string message) 39 | { 40 | LogHeader("DEBUG"); 41 | Console.WriteLine(message); 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/Loggers/DevNullLogger.cs: -------------------------------------------------------------------------------- 1 | namespace Kafka.Public.Loggers 2 | { 3 | public class DevNullLogger : ILogger 4 | { 5 | public void LogInformation(string message) 6 | { 7 | } 8 | 9 | public void LogWarning(string message) 10 | { 11 | } 12 | 13 | public void LogError(string message) 14 | { 15 | } 16 | 17 | public void LogDebug(string message) 18 | { 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Public/PartitionSelectionConfig.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using Kafka.Routing.PartitionSelection; 3 | 4 | namespace Kafka.Public 5 | { 6 | public enum PartitionSelectionStrategy 7 | { 8 | /// 9 | /// Round Robin between all available partitions. 10 | /// 11 | RoundRobin, 12 | 13 | /// 14 | /// Choose the partition depending on the message's key. It guarantees that given the same 15 | /// number of available partitions, messages with the same key will go to the same partition. 16 | /// If the message's key is null, it fall backs to the round robin strategy. 17 | /// If the chosen partition is blacklisted, messages to that partition will actually be sent 18 | /// to another partition using the round robin strategy. 19 | /// 20 | MessageKey, 21 | } 22 | 23 | public sealed class PartitionSelectionConfig 24 | { 25 | private readonly Dictionary _selectionStrategyByTopic; 26 | private PartitionSelectionStrategy _defaultStrategy = PartitionSelectionStrategy.RoundRobin; 27 | 28 | public PartitionSelectionConfig() 29 | { 30 | _selectionStrategyByTopic = new Dictionary(); 31 | } 32 | 33 | public void SetDefaultPartitionSelectionStrategy(PartitionSelectionStrategy strategy) 34 | { 35 | _defaultStrategy = strategy; 36 | } 37 | 38 | public void SetPartitionSelectionStrategyForTopic(string topic, PartitionSelectionStrategy strategy) 39 | { 40 | _selectionStrategyByTopic.Add(topic, strategy); 41 | } 42 | 43 | internal IPartitionSelection GetPartitionSelectionForTopic(string topic, int delay, int startSeed, 44 | ISerializer keySerializer, ILogger logger) 45 | { 46 | if (!_selectionStrategyByTopic.TryGetValue(topic, out var selectionStrategy)) 47 | { 48 | selectionStrategy = _defaultStrategy; 49 | } 50 | 51 | var roundRobinSelection = new RoundRobinPartitionSelection(delay, startSeed); 52 | if (selectionStrategy == PartitionSelectionStrategy.RoundRobin) 53 | { 54 | return roundRobinSelection; 55 | } 56 | 57 | return new MessageKeyPartitionSelection(keySerializer, roundRobinSelection, logger); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Routing/PartitionSelection/IPartitionSelection.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using Kafka.Cluster; 4 | 5 | namespace Kafka.Routing.PartitionSelection 6 | { 7 | /// 8 | /// A class responsible of selecting a partition to send a kafka message. 9 | /// 10 | internal interface IPartitionSelection 11 | { 12 | /// 13 | /// Select a partition to send the message to, taking into account that some partitions might 14 | /// be temporary blacklisted. 15 | /// 16 | /// Encapsulation of a message sent to Kafka brokers. It contains additional 17 | /// information like the RequiredPartition that indicates we want to target a specific partition 18 | /// The list of all available partitions 19 | /// Dictionary of partition ids that are temporary blacklisted 20 | /// The partition the message will be sent to 21 | Partition GetPartition(ProduceMessage produceMessage, Partition[] partitions, IReadOnlyDictionary blacklist); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Routing/PartitionSelection/MessageKeyPartitionSelection.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using Kafka.Cluster; 4 | using Kafka.Common; 5 | using Kafka.Public; 6 | using Kafka.Public.Loggers; 7 | 8 | namespace Kafka.Routing.PartitionSelection 9 | { 10 | internal class MessageKeyPartitionSelection : IPartitionSelection 11 | { 12 | private const int LogSamplingPercentage = 99; 13 | 14 | private readonly ISerializer _keySerializer; 15 | private readonly IPartitionSelection _roundRobinSelection; 16 | private readonly ILogger _logger; 17 | private readonly Pool _messageKeyBuffersPool; 18 | private readonly Random _rnd; 19 | 20 | public MessageKeyPartitionSelection(ISerializer keySerializer, IPartitionSelection roundRobinSelection, 21 | ILogger logger) 22 | { 23 | _keySerializer = keySerializer; 24 | _roundRobinSelection = roundRobinSelection; 25 | _logger = logger; 26 | _rnd = new Random(); 27 | _messageKeyBuffersPool = new Pool( 28 | limit: 100, 29 | constructor: () => new ReusableMemoryStream(_messageKeyBuffersPool), 30 | clearAction: (stream, reused) => 31 | { 32 | if (reused) 33 | stream.SetLength(0); 34 | }); 35 | } 36 | 37 | public Partition GetPartition(ProduceMessage produceMessage, Partition[] partitions, IReadOnlyDictionary blacklist) 38 | { 39 | var partitionId = GetPartitionIdFromKey(produceMessage.Message.Key, partitions.Length); 40 | if (partitionId != Partition.None.Id && !blacklist.ContainsKey(partitionId)) 41 | { 42 | return partitions[partitionId]; 43 | } 44 | 45 | return _roundRobinSelection.GetPartition(produceMessage, partitions, blacklist); 46 | } 47 | 48 | /// 49 | /// Compute the partition id given the message key and the number of partitions 50 | /// Do a simple Crc32 of the key, modulo the number of partitions. This is the exact same 51 | /// algorithm in librdkafka 52 | /// 53 | private int GetPartitionIdFromKey(object messageKey, int partitionsLength) 54 | { 55 | if (messageKey != null) 56 | { 57 | var memoryStream = _messageKeyBuffersPool.Reserve(); 58 | _keySerializer.Serialize(messageKey, memoryStream); 59 | var partitionId = Crc32.Compute(memoryStream, 0, memoryStream.Length) % partitionsLength; 60 | _messageKeyBuffersPool.Release(memoryStream); 61 | return (int) partitionId; 62 | } 63 | 64 | if (_rnd.Next(0, 100) >= LogSamplingPercentage) 65 | { 66 | _logger.LogError($"{typeof(MessageKeyPartitionSelection)} cannot determine partition as message's " 67 | + "key is null. Falling back to round robin selection"); 68 | } 69 | 70 | return Partition.None.Id; 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Routing/PartitionSelection/PartitionSelector.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using Kafka.Cluster; 7 | using Kafka.Public; 8 | 9 | namespace Kafka.Routing.PartitionSelection 10 | { 11 | /// 12 | /// Select a partition from an array of Partition. 13 | /// 14 | internal class PartitionSelector 15 | { 16 | private readonly IPartitionSelection _partitionSelection; 17 | private static readonly IReadOnlyDictionary EmptyBlackList = new Dictionary(); 18 | 19 | public PartitionSelector(IPartitionSelection partitionSelection) 20 | { 21 | _partitionSelection = partitionSelection; 22 | } 23 | 24 | /// 25 | /// Get the partition the message will be sent to. 26 | /// Actual selection strategy depends on the IPartitionSelection implementation 27 | /// that was chosen for this topic. 28 | /// 29 | /// The ProduceMessage to send 30 | /// List of all available partitions 31 | /// Dictionary of partition ids that are currently blacklisted 32 | /// 33 | public Partition GetPartition(ProduceMessage produceMessage, Partition[] partitions, IReadOnlyDictionary blacklist = null) 34 | { 35 | blacklist = blacklist ?? EmptyBlackList; 36 | 37 | switch (produceMessage.RequiredPartition) 38 | { 39 | case Partitions.None: 40 | return Partition.None; 41 | 42 | case Partitions.Any: 43 | case Partitions.All: 44 | return _partitionSelection.GetPartition(produceMessage, partitions, blacklist); 45 | 46 | default: 47 | var found = Array.BinarySearch(partitions, new Partition { Id = produceMessage.RequiredPartition }); 48 | return found >= 0 ? partitions[found] : Partition.None; 49 | } 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Routing/PartitionSelection/RoundRobinPartitionSelection.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using Kafka.Cluster; 4 | 5 | namespace Kafka.Routing.PartitionSelection 6 | { 7 | internal class RoundRobinPartitionSelection : IPartitionSelection 8 | { 9 | private long _next; 10 | private int _cursor; 11 | private readonly int _delay; 12 | 13 | public RoundRobinPartitionSelection(int delay = 1, int startSeed = 0) 14 | { 15 | _delay = delay <= 0 ? 1 : delay; 16 | _next = startSeed <= 0 ? 0L : startSeed; 17 | } 18 | 19 | public Partition GetPartition(ProduceMessage produceMessage, Partition[] partitions, IReadOnlyDictionary blacklist) 20 | { 21 | return GetRandomPartition(partitions, blacklist); 22 | } 23 | 24 | private Partition GetRandomPartition(Partition[] partitions, IReadOnlyDictionary blacklist) 25 | { 26 | for (var retryCount = 0; retryCount < partitions.Length; retryCount++) 27 | { 28 | var partition = GetActivePartition(partitions); 29 | 30 | if (blacklist.ContainsKey(partition.Id)) 31 | { 32 | SelectNextPartition(); 33 | continue; 34 | } 35 | 36 | _cursor++; 37 | 38 | if (_cursor >= _delay) 39 | { 40 | // Round-robin threshold met, switch to next partition for the next call 41 | SelectNextPartition(); 42 | } 43 | 44 | return partition; 45 | } 46 | 47 | return Partition.None; 48 | } 49 | 50 | private Partition GetActivePartition(Partition[] partitions) 51 | { 52 | return partitions[_next % partitions.Length]; 53 | } 54 | 55 | private void SelectNextPartition() 56 | { 57 | _next++; 58 | 59 | // Reset the hit count for current partition 60 | _cursor = 0; 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/Routing/RoutingTable.cs: -------------------------------------------------------------------------------- 1 | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. 2 | // You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 3 | 4 | using System; 5 | using System.Collections.Generic; 6 | using System.Linq; 7 | using Kafka.Cluster; 8 | using Kafka.Public; 9 | 10 | namespace Kafka.Routing 11 | { 12 | struct Partition : IComparable 13 | { 14 | public int Id { get; set; } 15 | public INode Leader { get; set; } 16 | public int NbIsr { get; set; } 17 | 18 | public static Partition None = new Partition {Id = Partitions.None}; 19 | 20 | public int CompareTo(Partition other) 21 | { 22 | return Id - other.Id; 23 | } 24 | } 25 | 26 | class RoutingTable 27 | { 28 | private readonly Dictionary _routes; 29 | private static readonly Partition[] NullPartition = new Partition[0]; 30 | 31 | /// 32 | /// Default 33 | /// 34 | public RoutingTable() 35 | { 36 | _routes = new Dictionary(); 37 | } 38 | 39 | /// 40 | /// Initialize from map topic -> partitions 41 | /// 42 | /// 43 | public RoutingTable(Dictionary routes) 44 | { 45 | _routes = new Dictionary(routes); 46 | LastRefreshed = DateTime.UtcNow; 47 | } 48 | 49 | /// 50 | /// Initialize from existing routing table 51 | /// than a given value 52 | /// 53 | /// 54 | public RoutingTable(RoutingTable fromTable) 55 | { 56 | _routes = new Dictionary(fromTable._routes); 57 | LastRefreshed = fromTable.LastRefreshed; 58 | } 59 | 60 | /// 61 | /// Initialize from existing routing table, removing partitions with dead nodes 62 | /// 63 | /// 64 | /// 65 | public RoutingTable(RoutingTable fromTable, INode deadNode) 66 | { 67 | _routes = new Dictionary(); 68 | var tmp = new List(); 69 | foreach (var kv in fromTable._routes) 70 | { 71 | tmp.AddRange(kv.Value.Where(partition => !Equals(partition.Leader, deadNode))); 72 | _routes.Add(kv.Key, tmp.ToArray()); 73 | tmp.Clear(); 74 | } 75 | LastRefreshed = fromTable.LastRefreshed; 76 | } 77 | 78 | /// 79 | /// Initialize from existing routing table, removing partitions with NbIsr less 80 | /// than a given value 81 | /// 82 | /// 83 | /// 84 | public RoutingTable(RoutingTable fromTable, int minIsr) 85 | { 86 | _routes = new Dictionary(); 87 | var tmp = new List(); 88 | foreach (var kv in fromTable._routes) 89 | { 90 | tmp.AddRange(kv.Value.Where(partition => partition.NbIsr >= minIsr)); 91 | _routes.Add(kv.Key, tmp.ToArray()); 92 | tmp.Clear(); 93 | } 94 | LastRefreshed = fromTable.LastRefreshed; 95 | } 96 | 97 | /// 98 | /// Returns the arrays of partitions for a given topic 99 | /// 100 | /// 101 | /// 102 | public Partition[] GetPartitions(string topic) 103 | { 104 | Partition[] partitions; 105 | _routes.TryGetValue(topic, out partitions); 106 | return partitions ?? NullPartition; 107 | } 108 | 109 | /// 110 | /// Returns the leader for a given topic / partition. 111 | /// 112 | /// 113 | /// 114 | /// 115 | public INode GetLeaderForPartition(string topic, int partition) 116 | { 117 | var partitions = GetPartitions(topic); 118 | int index = Array.BinarySearch(partitions, new Partition { Id = partition }); 119 | return index >= 0 ? partitions[index].Leader : null; 120 | } 121 | 122 | public DateTime LastRefreshed { get; internal set; } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /kafka-sharp/kafka-sharp/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /kafka-sharp/sample-kafka-sharp/App.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /kafka-sharp/sample-kafka-sharp/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("sample-kafka-sharp")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("Criteo")] 12 | [assembly: AssemblyProduct("sample-kafka-sharp")] 13 | [assembly: AssemblyCopyright("Copyright © Criteo 2015")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("8ad2f840-0e42-4a3f-828b-b525974fc523")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /kafka-sharp/sample-kafka-sharp/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /kafka-sharp/sample-kafka-sharp/sample-kafka-sharp.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {322B004A-B8B1-4D33-9CC6-2BA67267A4F4} 8 | Exe 9 | Properties 10 | sample_kafka_sharp 11 | sample-kafka-sharp 12 | v4.7.1 13 | 512 14 | 15 | 16 | 17 | true 18 | full 19 | false 20 | bin\Debug\ 21 | DEBUG;TRACE 22 | prompt 23 | 4 24 | 25 | 26 | pdbonly 27 | true 28 | bin\Release\ 29 | TRACE 30 | prompt 31 | 4 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | ..\packages\System.Reactive.Core.3.1.1\lib\net45\System.Reactive.Core.dll 41 | True 42 | 43 | 44 | ..\packages\System.Reactive.Interfaces.3.1.1\lib\net45\System.Reactive.Interfaces.dll 45 | True 46 | 47 | 48 | ..\packages\System.Reactive.Linq.3.1.1\lib\net45\System.Reactive.Linq.dll 49 | True 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | {c2e6d32f-69d0-41b4-a1ea-9ade27a99b5e} 69 | kafka-sharp 70 | 71 | 72 | 73 | 80 | 81 | -------------------------------------------------------------------------------- /kafka-sharp/sample-kafka-sharp/sample-kafka-sharp.netstandard.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | netcoreapp1.1 5 | sample-kafka-sharp 6 | sample-kafka-sharp 7 | true 8 | false 9 | false 10 | false 11 | false 12 | false 13 | false 14 | false 15 | false 16 | exe 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | $(DefineConstants);NET_CORE 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /scripts/make-nuget-package.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | REM 4 | REM Build and package kafka-sharp as a NuGet package. 5 | REM 6 | REM You will need to have nuget.exe in your path. If you don't already have it, 7 | REM download it from https://dist.nuget.org/index.html (Windows x86 Commandline). 8 | REM 9 | 10 | REM The project.json file from the .NET Core Visual Studio project interfers with the nuget packaging step. 11 | REM Get it out of the way temporarily by renaming it. 12 | ren ..\kafka-sharp\kafka-sharp\project.json project.json.RENAMED 13 | 14 | REM Build and package kafka-sharp. 15 | nuget.exe pack ..\kafka-sharp\kafka-sharp\Kafka.csproj -Verbosity detailed -Build -Prop Configuration=Release -IncludeReferencedProjects -OutputDirectory ..\nuget 16 | 17 | REM Revert the renaming step performed earlier. 18 | ren ..\kafka-sharp\kafka-sharp\project.json.RENAMED project.json 19 | 20 | pause 21 | -------------------------------------------------------------------------------- /scripts/presubmit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Download Kafka binaries and extract them 4 | if [ $1 = 'begin' ] 5 | then 6 | wget -nv -O kafka.tgz http://filer.criteo.prod/remote_files/kafka/kafka_2.10-0.8.2.1.tgz 7 | tar zxf kafka.tgz 8 | rm kafka.tgz 9 | fi 10 | 11 | --------------------------------------------------------------------------------