├── .dockerignore ├── .env ├── .gitignore ├── .vscode ├── launch.json └── tasks.json ├── Client ├── Client.csproj ├── CloudEvent.cs ├── Dockerfile ├── Program.cs ├── Properties │ └── launchSettings.json ├── appsettings.Development.json └── appsettings.json ├── README.md ├── Workflow ├── Activities │ ├── AlwaysFailActivity.cs │ ├── FastActivity.cs │ ├── HelloActivity.cs │ ├── NoOpActivity.cs │ ├── NotifyCompensateActivity.cs │ ├── RaiseEventActivity.cs │ ├── SlowActivity.cs │ └── VerySlowActivity.cs ├── CloudEvent.cs ├── Dockerfile ├── Program.cs ├── Properties │ └── launchSettings.json ├── Workflow.csproj ├── Workflows │ ├── ConstrainedWorkflow.cs │ ├── ExternalSystemWorkflow.cs │ ├── FanOutWorkflow.cs │ ├── MonitorWorkflow.cs │ └── ThrottleWorkflow.cs ├── appsettings.Development.json └── appsettings.json ├── components ├── pubsub-kafka.yaml └── statestore-pg-v2.yaml ├── compose-1-1.yml ├── compose-10-3.yml ├── compose-5-3.yml ├── compose-only-dependencies.yml ├── compose.debug-workflow-app.yml ├── dapr-config └── config.yml ├── dapr-workflow-examples.sln ├── deploy.yaml └── multirun.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | **/.classpath 2 | **/.dockerignore 3 | **/.env 4 | **/.git 5 | **/.gitignore 6 | **/.project 7 | **/.settings 8 | **/.toolstarget 9 | **/.vs 10 | **/.vscode 11 | **/*.*proj.user 12 | **/*.dbmdl 13 | **/*.jfm 14 | **/bin 15 | **/charts 16 | **/docker-compose* 17 | **/compose* 18 | **/Dockerfile* 19 | **/node_modules 20 | **/npm-debug.log 21 | **/obj 22 | **/secrets.dev.yaml 23 | **/values.dev.yaml 24 | LICENSE 25 | README.md 26 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | DAPR_RUNTIME_VERSION=1.15.4 2 | DAPR_SCHEDULER_VERSION=1.15.4 3 | DAPR_PLACEMENT_VERSION=1.15.4 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.rsuser 8 | *.suo 9 | *.user 10 | *.userosscache 11 | *.sln.docstates 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Mono auto generated files 17 | mono_crash.* 18 | 19 | # Build results 20 | [Dd]ebug/ 21 | [Dd]ebugPublic/ 22 | [Rr]elease/ 23 | [Rr]eleases/ 24 | x64/ 25 | x86/ 26 | [Aa][Rr][Mm]/ 27 | [Aa][Rr][Mm]64/ 28 | bld/ 29 | [Bb]in/ 30 | [Oo]bj/ 31 | [Ll]og/ 32 | [Ll]ogs/ 33 | 34 | # Visual Studio 2015/2017 cache/options directory 35 | .vs/ 36 | # Uncomment if you have tasks that create the project's static files in wwwroot 37 | #wwwroot/ 38 | 39 | # Visual Studio 2017 auto generated files 40 | Generated\ Files/ 41 | 42 | # MSTest test Results 43 | [Tt]est[Rr]esult*/ 44 | [Bb]uild[Ll]og.* 45 | 46 | # NUnit 47 | *.VisualState.xml 48 | TestResult.xml 49 | nunit-*.xml 50 | 51 | # Build Results of an ATL Project 52 | [Dd]ebugPS/ 53 | [Rr]eleasePS/ 54 | dlldata.c 55 | 56 | # Benchmark Results 57 | BenchmarkDotNet.Artifacts/ 58 | 59 | # .NET Core 60 | project.lock.json 61 | project.fragment.lock.json 62 | artifacts/ 63 | 64 | # StyleCop 65 | StyleCopReport.xml 66 | 67 | # Files built by Visual Studio 68 | *_i.c 69 | *_p.c 70 | *_h.h 71 | *.ilk 72 | *.meta 73 | *.obj 74 | *.iobj 75 | *.pch 76 | *.pdb 77 | *.ipdb 78 | *.pgc 79 | *.pgd 80 | *.rsp 81 | *.sbr 82 | *.tlb 83 | *.tli 84 | *.tlh 85 | *.tmp 86 | *.tmp_proj 87 | *_wpftmp.csproj 88 | *.log 89 | *.vspscc 90 | *.vssscc 91 | .builds 92 | *.pidb 93 | *.svclog 94 | *.scc 95 | 96 | # Chutzpah Test files 97 | _Chutzpah* 98 | 99 | # Visual C++ cache files 100 | ipch/ 101 | *.aps 102 | *.ncb 103 | *.opendb 104 | *.opensdf 105 | *.sdf 106 | *.cachefile 107 | *.VC.db 108 | *.VC.VC.opendb 109 | 110 | # Visual Studio profiler 111 | *.psess 112 | *.vsp 113 | *.vspx 114 | *.sap 115 | 116 | # Visual Studio Trace Files 117 | *.e2e 118 | 119 | # TFS 2012 Local Workspace 120 | $tf/ 121 | 122 | # Guidance Automation Toolkit 123 | *.gpState 124 | 125 | # ReSharper is a .NET coding add-in 126 | _ReSharper*/ 127 | *.[Rr]e[Ss]harper 128 | *.DotSettings.user 129 | 130 | # TeamCity is a build add-in 131 | _TeamCity* 132 | 133 | # DotCover is a Code Coverage Tool 134 | *.dotCover 135 | 136 | # AxoCover is a Code Coverage Tool 137 | .axoCover/* 138 | !.axoCover/settings.json 139 | 140 | # Visual Studio code coverage results 141 | *.coverage 142 | *.coveragexml 143 | 144 | # NCrunch 145 | _NCrunch_* 146 | .*crunch*.local.xml 147 | nCrunchTemp_* 148 | 149 | # MightyMoose 150 | *.mm.* 151 | AutoTest.Net/ 152 | 153 | # Web workbench (sass) 154 | .sass-cache/ 155 | 156 | # Installshield output folder 157 | [Ee]xpress/ 158 | 159 | # DocProject is a documentation generator add-in 160 | DocProject/buildhelp/ 161 | DocProject/Help/*.HxT 162 | DocProject/Help/*.HxC 163 | DocProject/Help/*.hhc 164 | DocProject/Help/*.hhk 165 | DocProject/Help/*.hhp 166 | DocProject/Help/Html2 167 | DocProject/Help/html 168 | 169 | # Click-Once directory 170 | publish/ 171 | 172 | # Publish Web Output 173 | *.[Pp]ublish.xml 174 | *.azurePubxml 175 | # Note: Comment the next line if you want to checkin your web deploy settings, 176 | # but database connection strings (with potential passwords) will be unencrypted 177 | *.pubxml 178 | *.publishproj 179 | 180 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 181 | # checkin your Azure Web App publish settings, but sensitive information contained 182 | # in these scripts will be unencrypted 183 | PublishScripts/ 184 | 185 | # NuGet Packages 186 | *.nupkg 187 | # NuGet Symbol Packages 188 | *.snupkg 189 | # The packages folder can be ignored because of Package Restore 190 | **/[Pp]ackages/* 191 | # except build/, which is used as an MSBuild target. 192 | !**/[Pp]ackages/build/ 193 | # Uncomment if necessary however generally it will be regenerated when needed 194 | #!**/[Pp]ackages/repositories.config 195 | # NuGet v3's project.json files produces more ignorable files 196 | *.nuget.props 197 | *.nuget.targets 198 | 199 | # Microsoft Azure Build Output 200 | csx/ 201 | *.build.csdef 202 | 203 | # Microsoft Azure Emulator 204 | ecf/ 205 | rcf/ 206 | 207 | # Windows Store app package directories and files 208 | AppPackages/ 209 | BundleArtifacts/ 210 | Package.StoreAssociation.xml 211 | _pkginfo.txt 212 | *.appx 213 | *.appxbundle 214 | *.appxupload 215 | 216 | # Visual Studio cache files 217 | # files ending in .cache can be ignored 218 | *.[Cc]ache 219 | # but keep track of directories ending in .cache 220 | !?*.[Cc]ache/ 221 | 222 | # Others 223 | ClientBin/ 224 | ~$* 225 | *~ 226 | *.dbmdl 227 | *.dbproj.schemaview 228 | *.jfm 229 | *.pfx 230 | *.publishsettings 231 | orleans.codegen.cs 232 | 233 | # Including strong name files can present a security risk 234 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 235 | #*.snk 236 | 237 | # Since there are multiple workflows, uncomment next line to ignore bower_components 238 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 239 | #bower_components/ 240 | 241 | # RIA/Silverlight projects 242 | Generated_Code/ 243 | 244 | # Backup & report files from converting an old project file 245 | # to a newer Visual Studio version. Backup files are not needed, 246 | # because we have git ;-) 247 | _UpgradeReport_Files/ 248 | Backup*/ 249 | UpgradeLog*.XML 250 | UpgradeLog*.htm 251 | ServiceFabricBackup/ 252 | *.rptproj.bak 253 | 254 | # SQL Server files 255 | *.mdf 256 | *.ldf 257 | *.ndf 258 | 259 | # Business Intelligence projects 260 | *.rdl.data 261 | *.bim.layout 262 | *.bim_*.settings 263 | *.rptproj.rsuser 264 | *- [Bb]ackup.rdl 265 | *- [Bb]ackup ([0-9]).rdl 266 | *- [Bb]ackup ([0-9][0-9]).rdl 267 | 268 | # Microsoft Fakes 269 | FakesAssemblies/ 270 | 271 | # GhostDoc plugin setting file 272 | *.GhostDoc.xml 273 | 274 | # Node.js Tools for Visual Studio 275 | .ntvs_analysis.dat 276 | node_modules/ 277 | 278 | # Visual Studio 6 build log 279 | *.plg 280 | 281 | # Visual Studio 6 workspace options file 282 | *.opt 283 | 284 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 285 | *.vbw 286 | 287 | # Visual Studio LightSwitch build output 288 | **/*.HTMLClient/GeneratedArtifacts 289 | **/*.DesktopClient/GeneratedArtifacts 290 | **/*.DesktopClient/ModelManifest.xml 291 | **/*.Server/GeneratedArtifacts 292 | **/*.Server/ModelManifest.xml 293 | _Pvt_Extensions 294 | 295 | # Paket dependency manager 296 | .paket/paket.exe 297 | paket-files/ 298 | 299 | # FAKE - F# Make 300 | .fake/ 301 | 302 | # CodeRush personal settings 303 | .cr/personal 304 | 305 | # Python Tools for Visual Studio (PTVS) 306 | __pycache__/ 307 | *.pyc 308 | 309 | # Cake - Uncomment if you are using it 310 | # tools/** 311 | # !tools/packages.config 312 | 313 | # Tabs Studio 314 | *.tss 315 | 316 | # Telerik's JustMock configuration file 317 | *.jmconfig 318 | 319 | # BizTalk build output 320 | *.btp.cs 321 | *.btm.cs 322 | *.odx.cs 323 | *.xsd.cs 324 | 325 | # OpenCover UI analysis results 326 | OpenCover/ 327 | 328 | # Azure Stream Analytics local run output 329 | ASALocalRun/ 330 | 331 | # MSBuild Binary and Structured Log 332 | *.binlog 333 | 334 | # NVidia Nsight GPU debugger configuration file 335 | *.nvuser 336 | 337 | # MFractors (Xamarin productivity tool) working folder 338 | .mfractor/ 339 | 340 | # Local History for Visual Studio 341 | .localhistory/ 342 | 343 | # BeatPulse healthcheck temp database 344 | healthchecksdb 345 | 346 | # Backup folder for Package Reference Convert tool in Visual Studio 2017 347 | MigrationBackup/ 348 | 349 | # Ionide (cross platform F# VS Code tools) working folder 350 | .ionide/ 351 | /dapr_scheduler 352 | 353 | .DS_store -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Debug workflow app", 6 | "type": "coreclr", 7 | "request": "launch", 8 | "preLaunchTask": "build", 9 | "program": "${workspaceFolder}/Workflow/bin/Debug/net6.0/Workflow.dll", 10 | "args": [], 11 | "cwd": "${workspaceFolder}", 12 | "stopAtEntry": false, 13 | "env": { 14 | "ASPNETCORE_ENVIRONMENT": "Development", 15 | "ASPNETCORE_URLS": "http://localhost:5111", 16 | "DAPR_HTTP_PORT": "3500", 17 | "DAPR_GRPC_PORT": "50001" 18 | }, 19 | "sourceFileMap": { 20 | "/Views": "${workspaceFolder}/Views" 21 | } 22 | }, 23 | { 24 | "name": "Docker .NET Attach (Preview)", 25 | "type": "docker", 26 | "request": "attach", 27 | "platform": "netCore", 28 | "sourceFileMap": { 29 | "/src": "${workspaceFolder}" 30 | } 31 | }, 32 | { 33 | "name": ".NET Core Attach", 34 | "type": "coreclr", 35 | "request": "attach" 36 | }, 37 | { 38 | "name": "Docker .NET Launch", 39 | "type": "docker", 40 | "request": "launch", 41 | "preLaunchTask": "docker-run: debug", 42 | "netCore": { 43 | "appProject": "${workspaceFolder}/Workflow/Workflow.csproj" 44 | } 45 | } 46 | ] 47 | } -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "label": "build", 6 | "command": "dotnet", 7 | "type": "process", 8 | "args": [ 9 | "build", 10 | "${workspaceFolder}/Workflow/Workflow.csproj", 11 | "/property:GenerateFullPaths=true", 12 | "/consoleloggerparameters:NoSummary" 13 | ], 14 | "problemMatcher": "$msCompile" 15 | }, 16 | { 17 | "label": "publish", 18 | "command": "dotnet", 19 | "type": "process", 20 | "args": [ 21 | "publish", 22 | "${workspaceFolder}/Workflow/Workflow.csproj", 23 | "/property:GenerateFullPaths=true", 24 | "/consoleloggerparameters:NoSummary" 25 | ], 26 | "problemMatcher": "$msCompile" 27 | }, 28 | { 29 | "label": "watch", 30 | "command": "dotnet", 31 | "type": "process", 32 | "args": [ 33 | "watch", 34 | "run", 35 | "--project", 36 | "${workspaceFolder}/Workflow/Workflow.csproj" 37 | ], 38 | "problemMatcher": "$msCompile" 39 | }, 40 | { 41 | "label": "bridge-to-kubernetes.resource", 42 | "type": "bridge-to-kubernetes.resource", 43 | "resource": "workflow-dapr", 44 | "resourceType": "service", 45 | "ports": [ 46 | 5111 47 | ], 48 | "targetCluster": "docker-desktop", 49 | "targetNamespace": "default", 50 | "useKubernetesServiceEnvironmentVariables": false 51 | }, 52 | { 53 | "label": "bridge-to-kubernetes.compound", 54 | "dependsOn": [ 55 | "bridge-to-kubernetes.resource", 56 | "build" 57 | ], 58 | "dependsOrder": "sequence" 59 | }, 60 | { 61 | "type": "docker-build", 62 | "label": "docker-build: debug", 63 | "dependsOn": [ 64 | "build" 65 | ], 66 | "dockerBuild": { 67 | "tag": "daprworkflowexamples:dev", 68 | "target": "base", 69 | "dockerfile": "${workspaceFolder}/Workflow/Dockerfile", 70 | "context": "${workspaceFolder}", 71 | "pull": true 72 | }, 73 | "netCore": { 74 | "appProject": "${workspaceFolder}/Workflow/Workflow.csproj" 75 | } 76 | }, 77 | { 78 | "type": "docker-build", 79 | "label": "docker-build: release", 80 | "dependsOn": [ 81 | "build" 82 | ], 83 | "dockerBuild": { 84 | "tag": "daprworkflowexamples:latest", 85 | "dockerfile": "${workspaceFolder}/Workflow/Dockerfile", 86 | "context": "${workspaceFolder}", 87 | "pull": true 88 | }, 89 | "netCore": { 90 | "appProject": "${workspaceFolder}/Workflow/Workflow.csproj" 91 | } 92 | }, 93 | { 94 | "type": "docker-run", 95 | "label": "docker-run: debug", 96 | "dependsOn": [ 97 | "docker-build: debug" 98 | ], 99 | "dockerRun": {}, 100 | "netCore": { 101 | "appProject": "${workspaceFolder}/Workflow/Workflow.csproj", 102 | "enableDebugging": true 103 | } 104 | }, 105 | { 106 | "type": "docker-run", 107 | "label": "docker-run: release", 108 | "dependsOn": [ 109 | "docker-build: release" 110 | ], 111 | "dockerRun": {}, 112 | "netCore": { 113 | "appProject": "${workspaceFolder}/Workflow/Workflow.csproj" 114 | } 115 | } 116 | ] 117 | } -------------------------------------------------------------------------------- /Client/Client.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net6.0 5 | enable 6 | enable 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /Client/CloudEvent.cs: -------------------------------------------------------------------------------- 1 | using System.Text.Json.Serialization; 2 | using System.Net.Mime; 3 | 4 | namespace Workflow; 5 | 6 | public class CustomCloudEvent : Dapr.CloudEvent 7 | { 8 | public CustomCloudEvent(TData data) : base(data) 9 | { 10 | 11 | } 12 | 13 | [JsonPropertyName("id")] 14 | public string Id { get; init; } 15 | 16 | [JsonPropertyName("specversion")] 17 | public string Specversion { get; init; } 18 | 19 | [JsonPropertyName("my-custom-property")] 20 | public string MyCustomProperty { get ;init; } 21 | } -------------------------------------------------------------------------------- /Client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base 2 | WORKDIR /app 3 | EXPOSE 5111 4 | 5 | ENV ASPNETCORE_URLS=http://+:5111 6 | 7 | # Creates a non-root user with an explicit UID and adds permission to access the /app folder 8 | # For more info, please refer to https://aka.ms/vscode-docker-dotnet-configure-containers 9 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app 10 | USER appuser 11 | 12 | FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build 13 | WORKDIR /src 14 | COPY ["Client/Client.csproj", "Client/"] 15 | RUN dotnet restore "Client/Client.csproj" 16 | COPY . . 17 | WORKDIR "/src/Client" 18 | RUN dotnet build "Client.csproj" -c Release -o /app/build 19 | 20 | FROM build AS publish 21 | RUN dotnet publish "Client.csproj" -c Release -o /app/publish /p:UseAppHost=false 22 | 23 | FROM base AS final 24 | WORKDIR /app 25 | COPY --from=publish /app/publish . 26 | ENTRYPOINT ["dotnet", "Client.dll"] 27 | -------------------------------------------------------------------------------- /Client/Program.cs: -------------------------------------------------------------------------------- 1 | using Dapr; 2 | using Dapr.Client; 3 | using Workflow; 4 | 5 | var builder = WebApplication.CreateBuilder(args); 6 | 7 | builder.Services.AddDaprClient(); 8 | 9 | // Add services to the container. 10 | builder.Services.AddEndpointsApiExplorer(); 11 | builder.Services.AddSwaggerGen(); 12 | 13 | var app = builder.Build(); 14 | 15 | // Configure the HTTP request pipeline. 16 | if (app.Environment.IsDevelopment()) 17 | { 18 | app.UseSwagger(); 19 | app.UseSwaggerUI(); 20 | } 21 | 22 | app.MapPost("/health", async () => 23 | { 24 | 25 | app.Logger.LogInformation("Hello from Client!"); 26 | 27 | return "Hello from Client!!"; 28 | }); 29 | 30 | app.MapPost("/start/monitor-workflow", async (DaprClient daprClient, string runId, int? count, bool? async, int? sleep, string? abortHint) => 31 | { 32 | if (!count.HasValue || count.Value < 1) 33 | count = 1; 34 | 35 | if (!sleep.HasValue) 36 | sleep = 0; 37 | 38 | var results = new List(); 39 | 40 | var cts = new CancellationTokenSource(); 41 | 42 | var options = new ParallelOptions() { MaxDegreeOfParallelism = 50, CancellationToken = cts.Token }; 43 | 44 | await Parallel.ForEachAsync(Enumerable.Range(0, count.Value), options, async (index, token) => 45 | { 46 | var request = new StartWorkflowRequest 47 | { 48 | Id = $"{index}-{runId}", 49 | Sleep = sleep.Value, 50 | AbortHint = abortHint 51 | }; 52 | 53 | var metadata = new Dictionary 54 | { 55 | { "cloudevent.id", request.Id }, 56 | { "cloudevent.type", "Continue As New"} , 57 | { "my-custom-property", "foo" }, 58 | { "partitionKey", Guid.NewGuid().ToString() } 59 | }; 60 | 61 | if (async.HasValue && async.Value == true) 62 | { 63 | await daprClient.PublishEventAsync("kafka-pubsub", "monitor-workflow", request, metadata, cts.Token); 64 | } 65 | else 66 | { 67 | var wrappedRequest = new CustomCloudEvent(request) 68 | { 69 | Id = request.Id, 70 | }; 71 | await daprClient.InvokeMethodAsync, StartWorkflowResponse>("workflow-a", "monitor-workflow", wrappedRequest, cts.Token); 72 | } 73 | app.Logger.LogInformation("start Id: {0}", request.Id); 74 | 75 | results.Add(new StartWorkflowResponse { Index = index, Id = request.Id }); 76 | }); 77 | return results; 78 | }).Produces>(); 79 | 80 | 81 | app.MapPost("/start-raise-event-workflow", async (DaprClient daprClient, string runId, int? count, bool? failOnTimeout, int? sleep, string? abortHint) => 82 | { 83 | if (!count.HasValue || count.Value < 1) 84 | count = 1; 85 | 86 | if (!sleep.HasValue) 87 | sleep = 0; 88 | 89 | if (!failOnTimeout.HasValue) 90 | failOnTimeout = false; 91 | 92 | var results = new List(); 93 | 94 | var cts = new CancellationTokenSource(); 95 | var options = new ParallelOptions() { MaxDegreeOfParallelism = 50, CancellationToken = cts.Token }; 96 | await Parallel.ForEachAsync(Enumerable.Range(0, count.Value), options, async (index, token) => 97 | { 98 | var request = new StartWorkflowRequest 99 | { 100 | Id = $"{index}-{runId}", 101 | Sleep = sleep.Value, 102 | AbortHint = abortHint, 103 | FailOnTimeout = failOnTimeout.Value 104 | }; 105 | 106 | await daprClient.PublishEventAsync("kafka-pubsub", "start-raise-event-workflow", request, cts.Token); 107 | 108 | app.Logger.LogInformation("start-raise-event-workflow Id: {0}", request.Id); 109 | 110 | results.Add(new StartWorkflowResponse { Index = index, Id = request.Id }); 111 | }); 112 | return results; 113 | }).Produces>(); 114 | 115 | 116 | app.MapPost("/start/fanout-workflow", async (DaprClient daprClient, string runId, int? count, bool? async, int? sleep, string? abortHint) => 117 | { 118 | if (!count.HasValue || count.Value < 1) 119 | count = 1; 120 | 121 | if (!sleep.HasValue) 122 | sleep = 0; 123 | 124 | var results = new List(); 125 | 126 | var cts = new CancellationTokenSource(); 127 | 128 | var options = new ParallelOptions() { MaxDegreeOfParallelism = 50, CancellationToken = cts.Token }; 129 | 130 | await Parallel.ForEachAsync(Enumerable.Range(0, count.Value), options, async (index, token) => 131 | { 132 | var request = new StartWorkflowRequest 133 | { 134 | Id = $"{index}-{runId}", 135 | Sleep = sleep.Value, 136 | AbortHint = abortHint 137 | }; 138 | 139 | if (async.HasValue && async.Value == true) 140 | await daprClient.PublishEventAsync("kafka-pubsub", "fanout-workflow", request, cts.Token); 141 | else 142 | { 143 | var wrappedRequest = new CustomCloudEvent(request) 144 | { 145 | Id = request.Id, 146 | }; 147 | await daprClient.InvokeMethodAsync, StartWorkflowResponse>("workflow-a", "fanout-workflow", wrappedRequest, cts.Token); 148 | } 149 | app.Logger.LogInformation("start Id: {0}", request.Id); 150 | 151 | results.Add(new StartWorkflowResponse { Index = index, Id = request.Id }); 152 | }); 153 | return results; 154 | }).Produces>(); 155 | 156 | 157 | app.MapPost("/start/schedule-job", async (DaprClient daprClient, string runId, int? count, bool? async, int? sleep, string? abortHint) => 158 | { 159 | if (!count.HasValue || count.Value < 1) 160 | count = 1; 161 | 162 | if (!sleep.HasValue) 163 | sleep = 0; 164 | 165 | var results = new List(); 166 | 167 | var cts = new CancellationTokenSource(); 168 | 169 | var options = new ParallelOptions() { MaxDegreeOfParallelism = 50, CancellationToken = cts.Token }; 170 | 171 | await Parallel.ForEachAsync(Enumerable.Range(0, count.Value), options, async (index, token) => 172 | { 173 | var request = new StartWorkflowRequest 174 | { 175 | Id = $"{index}-{runId}", 176 | Sleep = sleep.Value, 177 | AbortHint = abortHint 178 | }; 179 | 180 | if (async.HasValue && async.Value == true) 181 | await daprClient.PublishEventAsync("kafka-pubsub", "schedule-job", request, cts.Token); 182 | else 183 | { 184 | var wrappedRequest = new CustomCloudEvent(request) 185 | { 186 | Id = request.Id, 187 | }; 188 | await daprClient.InvokeMethodAsync, StartWorkflowResponse>("workflow-a", "fanout-workflow", wrappedRequest, cts.Token); 189 | } 190 | app.Logger.LogInformation("start Id: {0}", request.Id); 191 | 192 | results.Add(new StartWorkflowResponse { Index = index, Id = request.Id }); 193 | }); 194 | return results; 195 | }).Produces>(); 196 | 197 | 198 | 199 | app.Run(); 200 | 201 | public class StartWorkflowRequest 202 | { 203 | public string Id { get; set; } 204 | public bool FailOnTimeout { get; set; } 205 | public int Sleep { get; set; } 206 | public string AbortHint { get; set; } 207 | } 208 | 209 | public class StartWorkflowResponse 210 | { 211 | public int Index { get; set; } 212 | public string Id { get; set; } 213 | } 214 | 215 | public class RaiseEvent 216 | { 217 | public string InstanceId { get; set; } 218 | public string EventName { get; set; } 219 | public T EventData { get; set; } 220 | } 221 | -------------------------------------------------------------------------------- /Client/Properties/launchSettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/launchsettings.json", 3 | "iisSettings": { 4 | "windowsAuthentication": false, 5 | "anonymousAuthentication": true, 6 | "iisExpress": { 7 | "applicationUrl": "http://localhost:50807", 8 | "sslPort": 44394 9 | } 10 | }, 11 | "profiles": { 12 | "WorkflowApi": { 13 | "commandName": "Project", 14 | "dotnetRunMessages": true, 15 | "launchBrowser": true, 16 | "launchUrl": "swagger", 17 | "applicationUrl": "https://localhost:7223;http://localhost:5111", 18 | "environmentVariables": { 19 | "ASPNETCORE_ENVIRONMENT": "Development" 20 | } 21 | }, 22 | "IIS Express": { 23 | "commandName": "IISExpress", 24 | "launchBrowser": true, 25 | "launchUrl": "swagger", 26 | "environmentVariables": { 27 | "ASPNETCORE_ENVIRONMENT": "Development" 28 | } 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /Client/appsettings.Development.json: -------------------------------------------------------------------------------- 1 | { 2 | "Logging": { 3 | "LogLevel": { 4 | "Default": "Information", 5 | "Microsoft.AspNetCore": "Warning", 6 | "System.Net.Http.HttpClient": "Warning" 7 | } 8 | } 9 | } -------------------------------------------------------------------------------- /Client/appsettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "Logging": { 3 | "LogLevel": { 4 | "Default": "Information", 5 | "Microsoft.AspNetCore": "Warning", 6 | "System.Net.Http.HttpClient": "Warning" 7 | } 8 | }, 9 | "AllowedHosts": "*" 10 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Dapr Workflow Testing 2 | 3 | > [!IMPORTANT] 4 | > This repo is **purely** for load testing Dapr Workflows runtime. 5 | > It is **not** an example of what _good_ looks like! Most of the code is junk, to meet the end goal of testing the Workflows Runtime. 6 | 7 | In the past, this has flushed out many concurrency issues in the underlying durabletask-go library, which have been subsequently addressed in newer versions of the Dapr runtime. 8 | 9 | The dapr runtime version for all compose files is specified in the `.env` file. 10 | 11 | --- 12 | 13 | ### Run with 5 instances of the Workflow App, and 3 instances of the scheduler service 14 | 15 | 1. `docker compose -f compose-5-3.yml build` 16 | 2. `docker compose -f compose-5-3.yml up` 17 | 18 | 19 | ### Run a simple monitor pattern workflow 20 | 21 | This will create many workflow instances randomly distributed across the Workflow App instances. 22 | 23 | Run the workflows by making a POST request to : 24 | 25 | ```http://localhost:5112/start/monitor-workflow?runId={runId}&count=1000&async=true``` 26 | 27 | - Where `{runId}` is a unique value i.e. UUID/GUID. 28 | - Increase/decrease the amount of workflows created by changing the `count` property 29 | 30 | ### Run a simple fan-out & fan-in pattern workflow 31 | 32 | This will create many workflow instances randomly distributed across the Workflow App instances. 33 | 34 | Run the workflows by making a POST request to : 35 | 36 | ```http://localhost:5112/start/fanout-workflow?runId={runId}&count=1000&async=true``` 37 | 38 | - Where `{runId}` is a unique value i.e. UUID/GUID. 39 | - Increase/decrease the amount of workflows created by changing the `count` property 40 | -------------------------------------------------------------------------------- /Workflow/Activities/AlwaysFailActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using Microsoft.Extensions.Logging; 3 | 4 | namespace WorkflowConsoleApp.Activities 5 | { 6 | public class AlwaysFailActivity : WorkflowActivity 7 | { 8 | readonly ILogger logger; 9 | 10 | public AlwaysFailActivity(ILoggerFactory loggerFactory) 11 | { 12 | this.logger = loggerFactory.CreateLogger(); 13 | } 14 | 15 | public override Task RunAsync(WorkflowActivityContext context, Notification notification) 16 | { 17 | throw new Exception($"throwing random failure : {notification.Message}"); 18 | 19 | return Task.FromResult(null); 20 | } 21 | } 22 | } -------------------------------------------------------------------------------- /Workflow/Activities/FastActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public record Notification(string Message, Guid[]? Data = default); 6 | 7 | public class FastActivity : WorkflowActivity 8 | { 9 | readonly ILogger logger; 10 | 11 | public FastActivity(ILoggerFactory loggerFactory) 12 | { 13 | this.logger = loggerFactory.CreateLogger(); 14 | } 15 | 16 | public override async Task RunAsync(WorkflowActivityContext context, Notification notification) 17 | { 18 | this.logger.LogInformation(notification.Message); 19 | 20 | return Enumerable.Range(0, 1).Select(_ => Guid.NewGuid()).ToArray(); 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /Workflow/Activities/HelloActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public class HelloActivity : WorkflowActivity 6 | { 7 | readonly ILogger logger; 8 | 9 | public HelloActivity(ILoggerFactory loggerFactory) 10 | { 11 | this.logger = loggerFactory.CreateLogger(); 12 | } 13 | 14 | public override Task RunAsync(WorkflowActivityContext context, string input) 15 | { 16 | this.logger.LogInformation(input); 17 | 18 | return Task.FromResult($"hello, {input}"); 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /Workflow/Activities/NoOpActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public class NoOpActivity : WorkflowActivity 6 | { 7 | readonly ILogger logger; 8 | 9 | public NoOpActivity(ILoggerFactory loggerFactory) 10 | { 11 | this.logger = loggerFactory.CreateLogger(); 12 | } 13 | 14 | public override async Task RunAsync(WorkflowActivityContext context, Notification notification) 15 | { 16 | this.logger.LogInformation(notification.Message); 17 | 18 | return true; 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /Workflow/Activities/NotifyCompensateActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using Microsoft.Extensions.Logging; 3 | 4 | namespace WorkflowConsoleApp.Activities 5 | { 6 | public class NotifyCompensateActivity : WorkflowActivity 7 | { 8 | readonly ILogger logger; 9 | 10 | public NotifyCompensateActivity(ILoggerFactory loggerFactory) 11 | { 12 | this.logger = loggerFactory.CreateLogger(); 13 | } 14 | 15 | public override Task RunAsync(WorkflowActivityContext context, Notification notification) 16 | { 17 | this.logger.LogInformation($"Compensation applied: {notification.Message}"); 18 | 19 | return Task.FromResult(null); 20 | } 21 | } 22 | } -------------------------------------------------------------------------------- /Workflow/Activities/RaiseEventActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public class RaiseProceedEventActivity : WorkflowActivity, bool> 6 | { 7 | readonly ILogger logger; 8 | readonly DaprWorkflowClient _daprWorkflowClient; 9 | 10 | public RaiseProceedEventActivity(ILoggerFactory loggerFactory, DaprWorkflowClient daprWorkflowClient) 11 | { 12 | this.logger = loggerFactory.CreateLogger(); 13 | this._daprWorkflowClient = daprWorkflowClient; 14 | } 15 | 16 | public override async Task RunAsync(WorkflowActivityContext context, Tuple @event) 17 | { 18 | this.logger.LogInformation("raising event : " + @event.Item1 + " " + @event.Item2); 19 | 20 | await _daprWorkflowClient.RaiseEventAsync(@event.Item1, @event.Item2, null); 21 | 22 | return true; 23 | } 24 | } 25 | 26 | public class RaiseWaitEventActivity : WorkflowActivity, bool> 27 | { 28 | readonly ILogger logger; 29 | readonly DaprWorkflowClient _daprWorkflowClient; 30 | 31 | public RaiseWaitEventActivity(ILoggerFactory loggerFactory, DaprWorkflowClient daprWorkflowClient) 32 | { 33 | this.logger = loggerFactory.CreateLogger(); 34 | this._daprWorkflowClient = daprWorkflowClient; 35 | } 36 | 37 | public override async Task RunAsync(WorkflowActivityContext context, Tuple @event) 38 | { 39 | this.logger.LogInformation("raising wait event : " + @event.Item1); 40 | 41 | await _daprWorkflowClient.RaiseEventAsync(@event.Item1, "wait", @event.Item2); 42 | 43 | return true; 44 | } 45 | } 46 | 47 | public class RaiseSignalEventActivity : WorkflowActivity, bool> 48 | { 49 | readonly ILogger logger; 50 | readonly DaprWorkflowClient _daprWorkflowClient; 51 | 52 | public RaiseSignalEventActivity(ILoggerFactory loggerFactory, DaprWorkflowClient daprWorkflowClient) 53 | { 54 | this.logger = loggerFactory.CreateLogger(); 55 | this._daprWorkflowClient = daprWorkflowClient; 56 | } 57 | 58 | public override async Task RunAsync(WorkflowActivityContext context, Tuple @event) 59 | { 60 | this.logger.LogInformation("raising signal event : " + @event.Item1); 61 | 62 | await _daprWorkflowClient.RaiseEventAsync(@event.Item1, "signal", @event.Item2); 63 | 64 | return true; 65 | } 66 | } 67 | } -------------------------------------------------------------------------------- /Workflow/Activities/SlowActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public class SlowActivity : WorkflowActivity 6 | { 7 | readonly ILogger logger; 8 | 9 | public SlowActivity(ILoggerFactory loggerFactory) 10 | { 11 | this.logger = loggerFactory.CreateLogger(); 12 | } 13 | 14 | public override async Task RunAsync(WorkflowActivityContext context, Notification notification) 15 | { 16 | var message = notification.Message + $" activated={DateTime.UtcNow.ToString("HH:mm:ss")}"; 17 | 18 | await Task.Delay(3000); 19 | 20 | this.logger.LogInformation(message); 21 | 22 | return true; 23 | } 24 | } 25 | } -------------------------------------------------------------------------------- /Workflow/Activities/VerySlowActivity.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | 3 | namespace WorkflowConsoleApp.Activities 4 | { 5 | public class VerySlowActivity : WorkflowActivity 6 | { 7 | readonly ILogger logger; 8 | 9 | public VerySlowActivity(ILoggerFactory loggerFactory) 10 | { 11 | this.logger = loggerFactory.CreateLogger(); 12 | } 13 | 14 | public override async Task RunAsync(WorkflowActivityContext context, Notification notification) 15 | { 16 | var message = notification.Message + $" activated={DateTime.UtcNow.ToString("HH:mm:ss")}"; 17 | 18 | await Task.Delay(10000); 19 | 20 | this.logger.LogInformation(message); 21 | 22 | return true; 23 | } 24 | } 25 | } -------------------------------------------------------------------------------- /Workflow/CloudEvent.cs: -------------------------------------------------------------------------------- 1 | using System.Text.Json.Serialization; 2 | using System.Net.Mime; 3 | 4 | namespace workflow; 5 | 6 | public class CustomCloudEvent : Dapr.CloudEvent 7 | { 8 | public CustomCloudEvent(TData data) : base(data) 9 | { 10 | 11 | } 12 | 13 | [JsonPropertyName("id")] 14 | public string Id { get; init; } 15 | 16 | [JsonPropertyName("specversion")] 17 | public string Specversion { get; init; } 18 | 19 | [JsonPropertyName("my-custom-property")] 20 | public string MyCustomProperty { get ;init; } 21 | } -------------------------------------------------------------------------------- /Workflow/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS base 2 | WORKDIR /app 3 | EXPOSE 5111 4 | 5 | ENV ASPNETCORE_URLS=http://+:5111 6 | 7 | # Creates a non-root user with an explicit UID and adds permission to access the /app folder 8 | # For more info, please refer to https://aka.ms/vscode-docker-dotnet-configure-containers 9 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app 10 | USER appuser 11 | 12 | FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build 13 | WORKDIR /src 14 | COPY ["Workflow/Workflow.csproj", "Workflow/"] 15 | RUN dotnet restore "Workflow/Workflow.csproj" 16 | COPY . . 17 | WORKDIR "/src/Workflow" 18 | RUN dotnet build "Workflow.csproj" -c Release -o /app/build 19 | 20 | FROM build AS publish 21 | RUN dotnet publish "Workflow.csproj" -c Release -o /app/publish /p:UseAppHost=false 22 | 23 | FROM base AS final 24 | WORKDIR /app 25 | COPY --from=publish /app/publish . 26 | ENTRYPOINT ["dotnet", "Workflow.dll"] 27 | -------------------------------------------------------------------------------- /Workflow/Program.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using Dapr; 3 | using Dapr.Client; 4 | using WorkflowConsoleApp.Activities; 5 | using WorkflowConsoleApp.Workflows; 6 | using workflow; 7 | using System.Text.Json; 8 | using WorkflowConsoleApp; 9 | using System.Text; 10 | 11 | 12 | var builder = WebApplication.CreateBuilder(args); 13 | bool registerWorkflows = Convert.ToBoolean(Environment.GetEnvironmentVariable("REGISTER_WORKFLOWS")); 14 | bool registerActivities = Convert.ToBoolean(Environment.GetEnvironmentVariable("REGISTER_ACTIVITIES")); 15 | 16 | builder.Services.AddHttpClient(); 17 | builder.Services.AddDaprClient(); 18 | builder.Services.AddDaprWorkflow(options => 19 | { 20 | if (registerWorkflows) 21 | { 22 | options.RegisterWorkflow(); 23 | options.RegisterWorkflow(); 24 | options.RegisterWorkflow(); 25 | options.RegisterWorkflow(); 26 | options.RegisterWorkflow(); 27 | } 28 | 29 | if (registerActivities) 30 | { 31 | options.RegisterActivity(); 32 | options.RegisterActivity(); 33 | options.RegisterActivity(); 34 | options.RegisterActivity(); 35 | options.RegisterActivity(); 36 | options.RegisterActivity(); 37 | options.RegisterActivity(); 38 | options.RegisterActivity(); 39 | options.RegisterActivity(); 40 | } 41 | }); 42 | 43 | // Add services to the container. 44 | builder.Services.AddControllers(); 45 | builder.Services.AddEndpointsApiExplorer(); 46 | builder.Services.AddSwaggerGen(); 47 | 48 | var app = builder.Build(); 49 | 50 | //app.UseCloudEvents(); 51 | app.MapSubscribeHandler(); 52 | 53 | app.Logger.LogInformation("REGISTER_WORKFLOWS: " + registerWorkflows); 54 | app.Logger.LogInformation("REGISTER_ACTIVITIES: " + registerActivities); 55 | app.Logger.LogInformation("DAPR_HTTP_PORT: " + Environment.GetEnvironmentVariable("DAPR_HTTP_PORT")); 56 | app.Logger.LogInformation("DAPR_GRPC_PORT: " + Environment.GetEnvironmentVariable("DAPR_GRPC_PORT")); 57 | // Configure the HTTP request pipeline. 58 | if (app.Environment.IsDevelopment()) 59 | { 60 | app.UseSwagger(); 61 | app.UseSwaggerUI(); 62 | } 63 | 64 | 65 | app.MapPost("/monitor-workflow", [Topic("kafka-pubsub", "monitor-workflow")] async (DaprClient daprClient, DaprWorkflowClient workflowClient, CustomCloudEvent? ce) => 66 | { 67 | while (!await daprClient.CheckHealthAsync()) 68 | { 69 | Thread.Sleep(TimeSpan.FromSeconds(5)); 70 | app.Logger.LogInformation("waiting..."); 71 | } 72 | 73 | if (ce.Data.Sleep == 666) 74 | { 75 | throw new Exception("666"); 76 | } 77 | 78 | if (ce.Data.Sleep > 0) 79 | { 80 | app.Logger.LogInformation("sleeping for {0} ...", ce.Data.Sleep); 81 | await Task.Delay(TimeSpan.FromSeconds(ce.Data.Sleep)); 82 | app.Logger.LogInformation("Awake!"); 83 | } 84 | 85 | if (!string.IsNullOrEmpty(ce.Data.AbortHint)) 86 | { 87 | return new StartWorkflowResponse() 88 | { 89 | status = ce.Data.AbortHint 90 | }; 91 | } 92 | 93 | string randomData = Guid.NewGuid().ToString(); 94 | string workflowId = ce.Data?.Id ?? $"{Guid.NewGuid().ToString()[..8]}"; 95 | var orderInfo = new WorkflowPayload(randomData.ToLowerInvariant(), 10, Enumerable.Range(0, 1).Select(_ => Guid.NewGuid()).ToArray()); 96 | 97 | string result = string.Empty; 98 | 99 | try 100 | { 101 | result = await workflowClient.ScheduleNewWorkflowAsync( 102 | name: nameof(MonitorWorkflow), 103 | instanceId: workflowId, 104 | input: orderInfo); 105 | } 106 | catch (Grpc.Core.RpcException ex) when (ex.StatusCode == Grpc.Core.StatusCode.Unknown && ex.Status.Detail.StartsWith("an active workflow with ID")) 107 | { 108 | app.Logger.LogError(ex, "Workflow already running : {workflowId}", workflowId); 109 | return new StartWorkflowResponse() 110 | { 111 | Id = workflowId + " error" 112 | }; 113 | } 114 | 115 | return new StartWorkflowResponse() 116 | { 117 | Id = result 118 | }; 119 | }).Produces(); 120 | 121 | app.MapPost("/start-raise-event-workflow", [Topic("kafka-pubsub", "start-raise-event-workflow")] async (DaprClient daprClient, DaprWorkflowClient workflowClient, CustomCloudEvent? ce) => 122 | { 123 | while (!await daprClient.CheckHealthAsync()) 124 | { 125 | Thread.Sleep(TimeSpan.FromSeconds(5)); 126 | app.Logger.LogInformation("waiting..."); 127 | } 128 | 129 | if (ce.Data.Sleep == 666) 130 | { 131 | throw new Exception("666"); 132 | } 133 | 134 | if (ce.Data.Sleep > 0) 135 | { 136 | app.Logger.LogInformation("sleeping for {0} ...", ce.Data.Sleep); 137 | await Task.Delay(TimeSpan.FromSeconds(ce.Data.Sleep)); 138 | app.Logger.LogInformation("Awake!"); 139 | } 140 | 141 | if (!string.IsNullOrEmpty(ce.Data.AbortHint)) 142 | { 143 | return new StartWorkflowResponse() 144 | { 145 | status = ce.Data.AbortHint 146 | }; 147 | } 148 | 149 | string randomData = Guid.NewGuid().ToString(); 150 | string workflowId = ce.Data?.Id ?? $"{Guid.NewGuid().ToString()[..8]}"; 151 | var orderInfo = new ExternalSystemWorkflowPayload(ce.Data?.FailOnTimeout ?? false); 152 | 153 | try 154 | { 155 | await workflowClient.ScheduleNewWorkflowAsync(nameof(ExternalSystemWorkflow), workflowId, orderInfo); 156 | 157 | var cts = new CancellationTokenSource(); 158 | var options = new ParallelOptions() { MaxDegreeOfParallelism = 50, CancellationToken = cts.Token }; 159 | await Parallel.ForEachAsync(Enumerable.Range(0, 1000), options, async (index, token) => 160 | { 161 | await workflowClient.RaiseEventAsync(workflowId, "event-name", $"{index}-{Guid.NewGuid()}"); 162 | }); 163 | } 164 | catch (Grpc.Core.RpcException ex) when (ex.StatusCode == Grpc.Core.StatusCode.Unknown && ex.Status.Detail.StartsWith("an active workflow with ID")) 165 | { 166 | app.Logger.LogError(ex, "Workflow already running : {workflowId}", workflowId); 167 | return new StartWorkflowResponse() 168 | { 169 | Id = workflowId + " error" 170 | }; 171 | } 172 | 173 | return new StartWorkflowResponse() 174 | { 175 | Id = workflowId 176 | }; 177 | }).Produces(); 178 | 179 | 180 | app.MapGet("/status-batch", async (DaprClient daprClient, DaprWorkflowClient workflowClient, string runId, int? count, bool? show_running) => 181 | { 182 | while (!await daprClient.CheckHealthAsync()) 183 | { 184 | Thread.Sleep(TimeSpan.FromSeconds(5)); 185 | app.Logger.LogInformation("waiting..."); 186 | } 187 | 188 | var failed = 0; 189 | var complete = 0; 190 | var running = 0; 191 | var pending = 0; 192 | var terminated = 0; 193 | var suspended = 0; 194 | var unknown = 0; 195 | Dictionary Running = new Dictionary(); 196 | 197 | foreach (var i in Enumerable.Range(0, count.Value)) 198 | { 199 | var instanceId = $"{i}-{runId}"; 200 | var state = await workflowClient.GetWorkflowStateAsync(instanceId); 201 | 202 | if (state.RuntimeStatus == WorkflowRuntimeStatus.Completed) 203 | complete += 1; 204 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Running) 205 | running += 1; 206 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Failed) 207 | failed += 1; 208 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Pending) 209 | pending += 1; 210 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Terminated) 211 | terminated += 1; 212 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Suspended) 213 | suspended += 1; 214 | else if (state.RuntimeStatus == WorkflowRuntimeStatus.Unknown) 215 | unknown += 1; 216 | 217 | if (show_running == true) 218 | if (state.RuntimeStatus == WorkflowRuntimeStatus.Running) 219 | Running.Add(instanceId, state); 220 | } 221 | 222 | var responseSb = new StringBuilder(); 223 | responseSb.AppendLine($"Completed : {complete}, Failed : {failed}, Running : {running}, Pending : {pending}, Terminated : {terminated}, Suspended : {suspended}, Unknown : {unknown} "); 224 | 225 | if (show_running == true) 226 | { 227 | foreach (var instance in Running) 228 | { 229 | responseSb.AppendLine(instance.Key); 230 | responseSb.AppendLine(JsonSerializer.Serialize(instance.Value)); 231 | } 232 | } 233 | 234 | return responseSb.ToString(); 235 | 236 | }).Produces(); 237 | 238 | 239 | app.MapPost("/fanout-workflow", [Topic("kafka-pubsub", "fanout-workflow")] async (DaprClient daprClient, DaprWorkflowClient workflowClient, CustomCloudEvent? ce) => 240 | { 241 | while (!await daprClient.CheckHealthAsync()) 242 | { 243 | Thread.Sleep(TimeSpan.FromSeconds(5)); 244 | app.Logger.LogInformation("waiting..."); 245 | } 246 | 247 | if (ce.Data.Sleep == 666) 248 | { 249 | throw new Exception("666"); 250 | } 251 | 252 | if (ce.Data.Sleep > 0) 253 | { 254 | app.Logger.LogInformation("sleeping for {0} ...", ce.Data.Sleep); 255 | await Task.Delay(TimeSpan.FromSeconds(ce.Data.Sleep)); 256 | app.Logger.LogInformation("Awake!"); 257 | } 258 | 259 | if (!string.IsNullOrEmpty(ce.Data.AbortHint)) 260 | { 261 | return new StartWorkflowResponse() 262 | { 263 | status = ce.Data.AbortHint 264 | }; 265 | } 266 | 267 | string randomData = Guid.NewGuid().ToString(); 268 | string workflowId = ce.Data?.Id ?? $"{Guid.NewGuid().ToString()[..8]}"; 269 | var orderInfo = new WorkflowPayload(randomData.ToLowerInvariant(), 10); 270 | 271 | string result = string.Empty; 272 | try 273 | { 274 | result = await workflowClient.ScheduleNewWorkflowAsync( 275 | name: nameof(FanOutWorkflow), 276 | instanceId: workflowId, 277 | input: orderInfo); 278 | } 279 | catch (Grpc.Core.RpcException ex) when (ex.StatusCode == Grpc.Core.StatusCode.Unknown && ex.Status.Detail.StartsWith("an active workflow with ID")) 280 | { 281 | app.Logger.LogError(ex, "Workflow already running : {workflowId}", workflowId); 282 | return new StartWorkflowResponse() 283 | { 284 | Id = workflowId + " error" 285 | }; 286 | } 287 | 288 | return new StartWorkflowResponse() 289 | { 290 | Id = result 291 | }; 292 | }).Produces(); 293 | 294 | 295 | app.Run(); 296 | 297 | public record WorkflowPayload(string RandomData, int Itterations = 1, Guid[]? Data = default); 298 | 299 | public record ExternalSystemWorkflowPayload(bool failOnTimeout = false); 300 | 301 | public class HelloWorld 302 | { 303 | public DateTime scheduled { get; set; } 304 | } 305 | 306 | public class StartWorklowRequest 307 | { 308 | public string Id { get; set; } 309 | public bool FailOnTimeout { get; set; } 310 | public int Sleep { get; set; } 311 | public string AbortHint { get; set; } 312 | } 313 | 314 | public class StartWorkflowResponse 315 | { 316 | public string Id { get; set; } 317 | public string status { get; set; } 318 | } 319 | 320 | public class RaiseEvent 321 | { 322 | public string InstanceId { get; set; } 323 | public string EventName { get; set; } 324 | public T EventData { get; set; } 325 | } -------------------------------------------------------------------------------- /Workflow/Properties/launchSettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/launchsettings.json", 3 | "iisSettings": { 4 | "windowsAuthentication": false, 5 | "anonymousAuthentication": true, 6 | "iisExpress": { 7 | "applicationUrl": "http://localhost:50807", 8 | "sslPort": 44394 9 | } 10 | }, 11 | "profiles": { 12 | "WorkflowApi": { 13 | "commandName": "Project", 14 | "dotnetRunMessages": true, 15 | "launchBrowser": true, 16 | "launchUrl": "swagger", 17 | "applicationUrl": "https://localhost:7223;http://localhost:5111", 18 | "environmentVariables": { 19 | "ASPNETCORE_ENVIRONMENT": "Development" 20 | } 21 | }, 22 | "IIS Express": { 23 | "commandName": "IISExpress", 24 | "launchBrowser": true, 25 | "launchUrl": "swagger", 26 | "environmentVariables": { 27 | "ASPNETCORE_ENVIRONMENT": "Development" 28 | } 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /Workflow/Workflow.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net6.0 5 | enable 6 | enable 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /Workflow/Workflows/ConstrainedWorkflow.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using WorkflowConsoleApp.Activities; 3 | 4 | namespace WorkflowConsoleApp.Workflows 5 | { 6 | public class ConstrainedWorkflow : Workflow 7 | { 8 | public override async Task RunAsync(WorkflowContext context, bool state) 9 | { 10 | context.SetCustomStatus("STARTED"); 11 | 12 | // 1. let's tell the throttler that we wan't to be told when its our turn to proceeed 13 | var waitEvent = new WaitEvent() { InstanceId = context.InstanceId, ProceedEventName = "proceed" }; 14 | // https://github.com/dapr/dapr/issues/8243 15 | // context.SendEvent("throttle", "wait", waitEvent ); 16 | await context.CallActivityAsync(nameof(RaiseWaitEventActivity), new Tuple("throttle", waitEvent)); 17 | 18 | // 2. now we wait... 19 | var startTime = context.CurrentUtcDateTime.ToUniversalTime(); 20 | context.SetCustomStatus("WAITING"); 21 | await context.WaitForExternalEventAsync("proceed"); 22 | var endTime = context.CurrentUtcDateTime.ToUniversalTime(); 23 | 24 | // 3. Ok, we can proceed with the constrained / slow activity 25 | context.SetCustomStatus("PROCEED"); 26 | await context.CallActivityAsync( 27 | nameof(VerySlowActivity), 28 | new Notification($"{context.InstanceId} - {nameof(VerySlowActivity)} - scheduled={DateTime.UtcNow:HH:mm:ss}") 29 | ); 30 | context.SetCustomStatus("DONE"); 31 | 32 | // 4. Tell the throttler that we are done (allowing the throttler to allow other work to proceed) 33 | var signalEvent = new SignalEvent() { InstanceId = context.InstanceId }; 34 | // https://github.com/dapr/dapr/issues/8243 35 | // context.SendEvent("throttle", "signal", signalEvent); 36 | await context.CallActivityAsync(nameof(RaiseSignalEventActivity), new Tuple("throttle", signalEvent)); 37 | context.SetCustomStatus("SIGNALLED"); 38 | 39 | // 5. Echo back how long this workflow waited for due to throttling 40 | return $"workflow throttled for {(endTime - startTime).TotalMilliseconds}ms"; 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /Workflow/Workflows/ExternalSystemWorkflow.cs: -------------------------------------------------------------------------------- 1 | using System.Text; 2 | using Dapr.Workflow; 3 | using Newtonsoft.Json; 4 | using WorkflowConsoleApp.Activities; 5 | 6 | namespace WorkflowConsoleApp.Workflows 7 | { 8 | public class ExternalSystemWorkflow : Workflow> 9 | { 10 | public override async Task> RunAsync(WorkflowContext context, ExternalSystemWorkflowPayload payload) 11 | { 12 | var cts = new CancellationTokenSource(); 13 | 14 | Task timeout = context.CreateTimer(TimeSpan.FromSeconds(30), cts.Token); 15 | 16 | List> results = new List>(); 17 | 18 | for(int i = 0; i < 1000; i++) 19 | { 20 | results.Add(context.WaitForExternalEventAsync("wait-event")); 21 | } 22 | 23 | // WhenAll == AND(x, y, z, a, b) 24 | var externalEvents = Task.WhenAll(results); 25 | 26 | // WhenAny == XOR(x, y) 27 | var winner = await Task.WhenAny(externalEvents, timeout); 28 | 29 | Dictionary receivedEvents = new Dictionary(); 30 | 31 | if (winner == externalEvents) 32 | { 33 | cts.Cancel(); 34 | foreach (var result in results) 35 | { 36 | if (!receivedEvents.TryAdd(result.Result, 1)) 37 | { 38 | var count = receivedEvents[result.Result]; 39 | receivedEvents[result.Result] = count += 1; 40 | } 41 | } 42 | 43 | return receivedEvents; 44 | } 45 | else if (winner == timeout) 46 | { 47 | var sb = new StringBuilder(); 48 | sb.Append("Events status : "); 49 | foreach (var result in results) 50 | { 51 | sb.AppendLine(result.Status.ToString()); 52 | } 53 | 54 | if (payload.failOnTimeout) 55 | throw new Exception($"Workflow Timed out after 30 seconds : {receivedEvents}"); 56 | else 57 | { 58 | receivedEvents.Add("FAILED ON TIMEOUT", 0); 59 | return receivedEvents; 60 | } 61 | } 62 | 63 | return null; 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /Workflow/Workflows/FanOutWorkflow.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using WorkflowConsoleApp.Activities; 3 | 4 | namespace WorkflowConsoleApp.Workflows 5 | { 6 | public class FanOutWorkflow : Workflow 7 | { 8 | public override async Task RunAsync(WorkflowContext context, WorkflowPayload payload) 9 | { 10 | string workflowId = context.InstanceId; 11 | 12 | var fanOut = new List(); 13 | 14 | for(int i = 0; i < payload.Itterations; i++) 15 | { 16 | fanOut.Add(context.CallActivityAsync(nameof(SlowActivity), new Notification($"{workflowId} - {nameof(SlowActivity)} #{i} - scheduled={DateTime.UtcNow.ToString("HH:mm:ss")}"))); 17 | }; 18 | 19 | await Task.WhenAll(fanOut); 20 | 21 | return true; 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /Workflow/Workflows/MonitorWorkflow.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using WorkflowConsoleApp.Activities; 3 | 4 | namespace WorkflowConsoleApp.Workflows 5 | { 6 | public class MonitorWorkflow : Workflow 7 | { 8 | public override async Task RunAsync(WorkflowContext context, WorkflowPayload payload) 9 | { 10 | string workflowId = context.InstanceId; 11 | 12 | var result = await context.CallActivityAsync( 13 | nameof(SlowActivity), 14 | new Notification($"{workflowId} - {nameof(SlowActivity)} #{payload.Itterations} - scheduled={DateTime.UtcNow.ToString("HH:mm:ss")}", payload.Data )); 15 | 16 | await context.CreateTimer(TimeSpan.FromSeconds(3)); 17 | 18 | var newWorkflowPayload = new WorkflowPayload( 19 | payload.RandomData, 20 | payload.Itterations - 1, 21 | null 22 | ); 23 | 24 | if (newWorkflowPayload.Itterations == 0) 25 | return true; 26 | 27 | context.ContinueAsNew(newWorkflowPayload); 28 | return false; 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /Workflow/Workflows/ThrottleWorkflow.cs: -------------------------------------------------------------------------------- 1 | using Dapr.Workflow; 2 | using WorkflowConsoleApp.Activities; 3 | 4 | namespace WorkflowConsoleApp.Workflows 5 | { 6 | public class ThrottleWorkflow : Workflow 7 | { 8 | public override async Task RunAsync(WorkflowContext context, ThrottleState state) 9 | { 10 | #region Convenience functions 11 | Action log = (LogLevel, message) => 12 | { 13 | if (LogLevel >= state.RuntimeConfig.logLevel) 14 | state.PersistentLog.Add(message); 15 | }; 16 | #endregion 17 | 18 | #region Optimisations & Deadlock handling 19 | 20 | // 1. Sometimes a downstream workflow will not send it's signal, the consequence of this happening is that 21 | // eventually with enough failures, the semaphore will become blocked and no new downstream workflows will 22 | // be able to progress. 23 | 24 | // So, every 15s we can scan the 'activeWaits' and if it has exceeded its ttl (default 60s) then a virtual signal 25 | // is injected to unblock the sempahore. 26 | 27 | var expiryWatermark = context.CurrentUtcDateTime; 28 | var expiryPurgeCount = 0; 29 | foreach (var activeWait in state.ActiveWaits 30 | .Where(x => x.Value.Expiry.HasValue) 31 | .Where(x => expiryWatermark > x.Value.Expiry)) 32 | { 33 | expiryPurgeCount += 1; 34 | log(LogLevel.Debug, $"active wait for {activeWait.Key} has expired [ts-now: {expiryWatermark}, ts-expiry: {activeWait.Value.Expiry.Value}, delta: {activeWait.Value.Expiry.Value.Subtract(expiryWatermark).TotalSeconds} seconds]"); 35 | state.PendingSignals.Enqueue(new SignalEvent() { InstanceId = activeWait.Key }); 36 | } 37 | if (expiryPurgeCount > 0) 38 | log(LogLevel.Info, $"purged {expiryPurgeCount} expired active wait(s)"); 39 | 40 | // 2. Handle any signals first... (which will free-up capacity in the semaphore for step 3) 41 | while (state.PendingSignals.Any()) 42 | { 43 | var signal1 = state.PendingSignals.Dequeue(); 44 | state.ActiveWaits.Remove(signal1.InstanceId); 45 | 46 | if (!state.PendingSignals.Any()) 47 | { 48 | // Don't technically have to do this, but it allows us to do a 49 | // GET on the workflow and see the state accurately 50 | context.ContinueAsNew(state, true); 51 | return true; 52 | } 53 | } 54 | 55 | #endregion 56 | 57 | // 3. Ensure that enough work is active (up to the Max Concurrency limit) 58 | while (state.PendingWaits.Any() && 59 | (state.ActiveWaits.Count() < state.RuntimeConfig.MaxConcurrency)) 60 | { 61 | WaitEvent waitEvent = state.PendingWaits.Dequeue(); 62 | state.ActiveWaits.TryAdd(waitEvent.InstanceId, waitEvent); 63 | 64 | // https://github.com/dapr/dapr/issues/8243 65 | // context.SendEvent(waitEvent.InstanceId, waitEvent.ProceedEventName, null); 66 | await context.CallActivityAsync(nameof(RaiseProceedEventActivity), new Tuple(waitEvent.InstanceId, waitEvent.ProceedEventName)); 67 | } 68 | 69 | 70 | // 4. Wait for a `wait` or `signal` from a Workflow (or `adjust` or `expiryScan` tick) 71 | var wait = context.WaitForExternalEventAsync("wait"); 72 | var signal = context.WaitForExternalEventAsync("signal"); 73 | var adjust = context.WaitForExternalEventAsync("adjust"); 74 | var cts = new CancellationTokenSource(); 75 | var expiryScan = context.CreateTimer(TimeSpan.FromSeconds(15), cts.Token); 76 | 77 | context.SetCustomStatus(new ThrottleSummary { Status = "WAITING", MaxWaits = state.RuntimeConfig.MaxConcurrency, ActiveWaits = state.ActiveWaits.Count(), PendingWaits = state.PendingWaits.Count() }); 78 | var winner = await Task.WhenAny(wait, signal, adjust, expiryScan); 79 | if (winner == wait) 80 | { 81 | cts.Cancel(); 82 | # region Expiry handling 83 | if (state.RuntimeConfig.DefaultTTLInSeconds > 0 && !wait.Result.Expiry.HasValue) 84 | wait.Result.Expiry = context.CurrentUtcDateTime.AddSeconds(state.RuntimeConfig.DefaultTTLInSeconds); 85 | #endregion 86 | state.PendingWaits.Enqueue(wait.Result); 87 | } 88 | else if (winner == signal) 89 | { 90 | cts.Cancel(); 91 | state.PendingSignals.Enqueue(signal.Result); 92 | } 93 | else if (winner == adjust) 94 | { 95 | cts.Cancel(); 96 | state.RuntimeConfig = adjust.Result; 97 | } 98 | else if (winner == expiryScan) 99 | { // no-op 100 | } 101 | else 102 | throw new Exception("unknown event"); 103 | 104 | context.ContinueAsNew(state, true); 105 | return true; 106 | } 107 | } 108 | 109 | public class WaitEvent 110 | { 111 | public string InstanceId { get; set; } 112 | 113 | public string ProceedEventName { get; set; } 114 | 115 | public DateTime? Expiry { get; set; } 116 | } 117 | 118 | public class SignalEvent 119 | { 120 | public string InstanceId { get; set; } 121 | } 122 | 123 | public class ThrottleState 124 | { 125 | public RuntimeConfig RuntimeConfig = new RuntimeConfig(); 126 | 127 | public Queue PendingWaits = new Queue(); 128 | 129 | public Dictionary ActiveWaits = new Dictionary(); 130 | 131 | public Queue PendingSignals = new Queue(); 132 | 133 | public List PersistentLog = new List(); 134 | } 135 | 136 | public class RuntimeConfig 137 | { 138 | public int MaxConcurrency { get; set; } = 10; 139 | 140 | public int DefaultTTLInSeconds { get; set; } = 60; 141 | 142 | public LogLevel logLevel { get; set; } = LogLevel.Info; 143 | } 144 | 145 | public enum LogLevel 146 | { 147 | Debug = 0, 148 | Info = 1, 149 | } 150 | 151 | public class ThrottleSummary 152 | { 153 | public string Status { get; set; } 154 | public int MaxWaits { get; set; } 155 | public int ActiveWaits { get; set; } 156 | public int PendingWaits { get; set; } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /Workflow/appsettings.Development.json: -------------------------------------------------------------------------------- 1 | { 2 | "Logging": { 3 | "LogLevel": { 4 | "Default": "Information", 5 | "Microsoft.AspNetCore": "Warning" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /Workflow/appsettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "Logging": { 3 | "LogLevel": { 4 | "Default": "Information", 5 | "Microsoft.AspNetCore": "Warning" 6 | } 7 | }, 8 | "AllowedHosts": "*" 9 | } 10 | -------------------------------------------------------------------------------- /components/pubsub-kafka.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: kafka-pubsub 5 | spec: 6 | type: pubsub.kafka 7 | version: v1 8 | metadata: 9 | - name: brokers # Required. Kafka broker connection setting 10 | value: "kafka:29092" 11 | - name: consumerGroup 12 | value: "{namespace}" 13 | - name: clientID 14 | value: "workflow-client" 15 | - name: authType # Required. 16 | value: "none" 17 | - name: consumeRetryEnabled 18 | value: "true" 19 | -------------------------------------------------------------------------------- /components/statestore-pg-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Component 3 | metadata: 4 | name: statestore-pg-v2 5 | spec: 6 | type: state.postgresql 7 | version: v2 8 | metadata: 9 | # Connection string 10 | - name: connectionString 11 | value: "host=postgres-db user=postgres password=wO2VGDsMKR port=5432 database=postgres" 12 | - name: actorStateStore 13 | value: "true" 14 | - name: tablePrefix 15 | value: v2_ -------------------------------------------------------------------------------- /compose-1-1.yml: -------------------------------------------------------------------------------- 1 | version: "3.4" 2 | services: 3 | postgres-db: 4 | image: postgres:16.2-alpine 5 | restart: always 6 | environment: 7 | - POSTGRES_USER=postgres 8 | - POSTGRES_PASSWORD=wO2VGDsMKR 9 | - max_wal_size=2GB 10 | ports: 11 | - "5432:5432" 12 | networks: 13 | - network 14 | volumes: 15 | - postgres-db-16-2:/var/lib/postgresql/data-16-2 16 | ############################ 17 | # Workflow App + Dapr sidecar 18 | ############################ 19 | workflow-app-a: 20 | environment: 21 | - DAPR_HTTP_PORT=3500 22 | - REGISTER_WORKFLOWS=true 23 | - REGISTER_ACTIVITIES=true 24 | build: 25 | context: . 26 | dockerfile: Workflow/Dockerfile 27 | ports: 28 | - "3500:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 29 | - "5113:5111" 30 | - "7777:7776" 31 | depends_on: 32 | - placement 33 | networks: 34 | - network 35 | workflow-dapr-a: 36 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 37 | command: 38 | [ 39 | "./daprd", 40 | "-app-id", 41 | "workflow-a", 42 | "-app-port", 43 | "5111", 44 | "-placement-host-address", 45 | "placement:50005", 46 | "-scheduler-host-address", 47 | "scheduler-0:50006", 48 | "-resources-path", 49 | "/components", 50 | "-config", 51 | "/dapr-config/config.yml", 52 | "-log-level", 53 | "info", 54 | ] 55 | volumes: 56 | - "./components/:/components" 57 | - "./dapr-config/:/dapr-config" 58 | depends_on: 59 | postgres-db: 60 | condition: service_started 61 | kafka: 62 | condition: service_healthy 63 | network_mode: "service:workflow-app-a" 64 | 65 | ########################### 66 | #Client App + Dapr sidecar 67 | ########################### 68 | client-app: 69 | build: 70 | context: . 71 | dockerfile: Client/Dockerfile 72 | ports: 73 | - "5112:5111" 74 | - "3503:3500" 75 | networks: 76 | - network 77 | client-dapr: 78 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 79 | command: 80 | [ 81 | "./daprd", 82 | "-app-id", 83 | "client", 84 | "-app-port", 85 | "5111", 86 | "-dapr-http-port", 87 | "3500", 88 | "-resources-path", 89 | "/components", 90 | "-log-level", 91 | "warn", 92 | ] 93 | volumes: 94 | - "./components/:/components" 95 | depends_on: 96 | kafka: 97 | condition: service_healthy 98 | network_mode: "service:client-app" 99 | ############################ 100 | # Dapr placement service 101 | ############################ 102 | placement: 103 | image: "daprio/dapr:${DAPR_PLACEMENT_VERSION}" 104 | command: ["./placement", "-port", "50005", "-log-level", "warn"] 105 | networks: 106 | - network 107 | 108 | # ############################ 109 | # # Dapr Scheduler service 110 | # ############################ 111 | scheduler-0: 112 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 113 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 114 | volumes: 115 | - ./dapr_scheduler/0:/var/run/dapr/scheduler 116 | networks: 117 | - network 118 | 119 | ############################ 120 | # Zookeeper 121 | ############################ 122 | zookeeper: 123 | image: confluentinc/cp-zookeeper:latest 124 | networks: 125 | - network 126 | restart: unless-stopped 127 | environment: 128 | ZOOKEEPER_CLIENT_PORT: 2181 129 | ZOOKEEPER_TICK_TIME: 2000 130 | ALLOW_ANONYMOUS_LOGIN: "true" 131 | ports: 132 | - 2181:2181 133 | volumes: 134 | - zookeeper_logs:/var/lib/zookeeper/log 135 | - zookeeper_data:/var/lib/zookeeper/data 136 | ############################ 137 | # Kafka 138 | ############################ 139 | kafka: 140 | image: confluentinc/cp-kafka:latest 141 | networks: 142 | - network 143 | restart: unless-stopped 144 | depends_on: 145 | - zookeeper 146 | healthcheck: 147 | test: nc -z localhost 9092 || exit -1 148 | interval: 10s 149 | retries: 10 150 | timeout: 30s 151 | start_period: 10s 152 | ports: 153 | - 9092:9092 154 | - 29092:29092 155 | environment: 156 | KAFKA_BROKER_ID: 1 157 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 158 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 159 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 160 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 161 | KAFKA_LOG4J_ROOT_LOGLEVEL: ERROR 162 | KAFKA_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.apache.kafka=ERROR,kafka=ERROR,kafka.cluster=ERROR,kafka.controller=ERROR,kafka.coordinator=ERROR,kafka.log=ERROR,kafka.server=ERROR,kafka.zookeeper=ERROR,state.change.logger=ERROR 163 | KAFKA_JMX_PORT: 9997 164 | KAFKA_NUM_PARTITIONS: 10 165 | ############################ 166 | # Kafka UI 167 | ############################ 168 | kafka-ui: 169 | container_name: kafka-ui 170 | image: provectuslabs/kafka-ui:latest 171 | ports: 172 | - 8080:8080 173 | depends_on: 174 | - kafka 175 | environment: 176 | KAFKA_CLUSTERS_0_NAME: local 177 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 178 | KAFKA_CLUSTERS_0_METRICS_PORT: 9997 179 | KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: "true" 180 | KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: "true" 181 | LOGGING_LEVEL_ROOT: "error" 182 | LOGGING_LEVEL_COM_PROVECTUS: "error" 183 | networks: 184 | - network 185 | networks: 186 | network: 187 | volumes: 188 | db-data: 189 | driver: local 190 | zookeeper_logs: 191 | driver: local 192 | zookeeper_data: 193 | driver: local 194 | postgres-db-16-2: 195 | driver: local 196 | -------------------------------------------------------------------------------- /compose-10-3.yml: -------------------------------------------------------------------------------- 1 | version: "3.4" 2 | services: 3 | postgres-db: 4 | image: postgres:16.2-alpine 5 | restart: always 6 | environment: 7 | - POSTGRES_USER=postgres 8 | - POSTGRES_PASSWORD=wO2VGDsMKR 9 | - max_wal_size=2GB 10 | ports: 11 | - "5432:5432" 12 | networks: 13 | - network 14 | volumes: 15 | - postgres-db-16-2:/var/lib/postgresql/data-16-2 16 | ############################ 17 | # Workflow App + Dapr sidecar 18 | ############################ 19 | workflow-app-a: 20 | environment: 21 | - REGISTER_WORKFLOWS=true 22 | - REGISTER_ACTIVITIES=true 23 | build: 24 | context: . 25 | dockerfile: Workflow/Dockerfile 26 | ports: 27 | - "3500:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 28 | - "5113:5111" 29 | - "7777:7776" 30 | depends_on: 31 | - placement 32 | networks: 33 | - network 34 | workflow-dapr-a: 35 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 36 | command: 37 | [ 38 | "./daprd", 39 | "-app-id", 40 | "workflow-a", 41 | "-app-port", 42 | "5111", 43 | "-placement-host-address", 44 | "placement:50005", 45 | "-scheduler-host-address", 46 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 47 | "-resources-path", 48 | "/components", 49 | "-config", 50 | "/dapr-config/config.yml", 51 | "-log-level", 52 | "info", 53 | ] 54 | volumes: 55 | - "./components/:/components" 56 | - "./dapr-config/:/dapr-config" 57 | depends_on: 58 | postgres-db: 59 | condition: service_started 60 | kafka: 61 | condition: service_healthy 62 | network_mode: "service:workflow-app-a" 63 | ############################ 64 | # Workflow App + Dapr sidecar 65 | ############################ 66 | workflow-app-b: 67 | environment: 68 | - DAPR_GRPC_PORT=50002 69 | - REGISTER_WORKFLOWS=true 70 | - REGISTER_ACTIVITIES=true 71 | build: 72 | context: . 73 | dockerfile: Workflow/Dockerfile 74 | ports: 75 | - "3501:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 76 | depends_on: 77 | - placement 78 | networks: 79 | - network 80 | workflow-dapr-b: 81 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 82 | command: 83 | [ 84 | "./daprd", 85 | "-app-id", 86 | "workflow-a", 87 | "-app-port", 88 | "5111", 89 | "-dapr-grpc-port", 90 | "50002", 91 | "-placement-host-address", 92 | "placement:50005", 93 | "-scheduler-host-address", 94 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 95 | "-resources-path", 96 | "/components", 97 | "-config", 98 | "/dapr-config/config.yml", 99 | "-log-level", 100 | "info", 101 | ] 102 | volumes: 103 | - "./components/:/components" 104 | - "./dapr-config/:/dapr-config" 105 | depends_on: 106 | postgres-db: 107 | condition: service_started 108 | kafka: 109 | condition: service_healthy 110 | network_mode: "service:workflow-app-b" 111 | ############################ 112 | # Workflow App + Dapr sidecar 113 | ############################ 114 | workflow-app-c: 115 | environment: 116 | - DAPR_GRPC_PORT=50003 117 | - REGISTER_WORKFLOWS=true 118 | - REGISTER_ACTIVITIES=true 119 | build: 120 | context: . 121 | dockerfile: Workflow/Dockerfile 122 | ports: 123 | - "3502:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 124 | depends_on: 125 | - placement 126 | networks: 127 | - network 128 | workflow-dapr-c: 129 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 130 | command: 131 | [ 132 | "./daprd", 133 | "-app-id", 134 | "workflow-a", 135 | "-app-port", 136 | "5111", 137 | "-dapr-grpc-port", 138 | "50003", 139 | "-placement-host-address", 140 | "placement:50005", 141 | "-scheduler-host-address", 142 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 143 | "-resources-path", 144 | "/components", 145 | "-config", 146 | "/dapr-config/config.yml", 147 | "-log-level", 148 | "info", 149 | ] 150 | volumes: 151 | - "./components/:/components" 152 | - "./dapr-config/:/dapr-config" 153 | depends_on: 154 | postgres-db: 155 | condition: service_started 156 | kafka: 157 | condition: service_healthy 158 | network_mode: "service:workflow-app-c" 159 | ############################ 160 | # Workflow App + Dapr sidecar 161 | ############################ 162 | workflow-app-d: 163 | environment: 164 | - DAPR_GRPC_PORT=50004 165 | - REGISTER_WORKFLOWS=true 166 | - REGISTER_ACTIVITIES=true 167 | build: 168 | context: . 169 | dockerfile: Workflow/Dockerfile 170 | ports: 171 | - "3503:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 172 | depends_on: 173 | - placement 174 | networks: 175 | - network 176 | workflow-dapr-d: 177 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 178 | command: 179 | [ 180 | "./daprd", 181 | "-app-id", 182 | "workflow-a", 183 | "-app-port", 184 | "5111", 185 | "-dapr-grpc-port", 186 | "50004", 187 | "-placement-host-address", 188 | "placement:50005", 189 | "-scheduler-host-address", 190 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 191 | "-resources-path", 192 | "/components", 193 | "-config", 194 | "/dapr-config/config.yml", 195 | "-log-level", 196 | "info", 197 | ] 198 | volumes: 199 | - "./components/:/components" 200 | - "./dapr-config/:/dapr-config" 201 | depends_on: 202 | postgres-db: 203 | condition: service_started 204 | kafka: 205 | condition: service_healthy 206 | network_mode: "service:workflow-app-d" 207 | ############################ 208 | # Workflow App + Dapr sidecar 209 | ############################ 210 | workflow-app-e: 211 | environment: 212 | - DAPR_GRPC_PORT=50005 213 | - REGISTER_WORKFLOWS=true 214 | - REGISTER_ACTIVITIES=true 215 | build: 216 | context: . 217 | dockerfile: Workflow/Dockerfile 218 | ports: 219 | - "3504:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 220 | depends_on: 221 | - placement 222 | networks: 223 | - network 224 | workflow-dapr-e: 225 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 226 | command: 227 | [ 228 | "./daprd", 229 | "-app-id", 230 | "workflow-a", 231 | "-app-port", 232 | "5111", 233 | "-dapr-grpc-port", 234 | "50005", 235 | "-placement-host-address", 236 | "placement:50005", 237 | "-scheduler-host-address", 238 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 239 | "-resources-path", 240 | "/components", 241 | "-config", 242 | "/dapr-config/config.yml", 243 | "-log-level", 244 | "info", 245 | ] 246 | volumes: 247 | - "./components/:/components" 248 | - "./dapr-config/:/dapr-config" 249 | depends_on: 250 | postgres-db: 251 | condition: service_started 252 | kafka: 253 | condition: service_healthy 254 | network_mode: "service:workflow-app-e" 255 | ############################ 256 | # Workflow App + Dapr sidecar 257 | ############################ 258 | workflow-app-f: 259 | environment: 260 | - DAPR_GRPC_PORT=50006 261 | - REGISTER_WORKFLOWS=true 262 | - REGISTER_ACTIVITIES=true 263 | build: 264 | context: . 265 | dockerfile: Workflow/Dockerfile 266 | ports: 267 | - "3505:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 268 | depends_on: 269 | - placement 270 | networks: 271 | - network 272 | workflow-dapr-f: 273 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 274 | command: 275 | [ 276 | "./daprd", 277 | "-app-id", 278 | "workflow-a", 279 | "-app-port", 280 | "5111", 281 | "-dapr-grpc-port", 282 | "50006", 283 | "-placement-host-address", 284 | "placement:50005", 285 | "-scheduler-host-address", 286 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 287 | "-resources-path", 288 | "/components", 289 | "-config", 290 | "/dapr-config/config.yml", 291 | "-log-level", 292 | "info", 293 | ] 294 | volumes: 295 | - "./components/:/components" 296 | - "./dapr-config/:/dapr-config" 297 | depends_on: 298 | postgres-db: 299 | condition: service_started 300 | kafka: 301 | condition: service_healthy 302 | network_mode: "service:workflow-app-f" 303 | ############################ 304 | # Workflow App + Dapr sidecar 305 | ############################ 306 | workflow-app-g: 307 | environment: 308 | - DAPR_GRPC_PORT=50007 309 | - REGISTER_WORKFLOWS=true 310 | - REGISTER_ACTIVITIES=true 311 | build: 312 | context: . 313 | dockerfile: Workflow/Dockerfile 314 | ports: 315 | - "3506:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 316 | depends_on: 317 | - placement 318 | networks: 319 | - network 320 | workflow-dapr-g: 321 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 322 | command: 323 | [ 324 | "./daprd", 325 | "-app-id", 326 | "workflow-a", 327 | "-app-port", 328 | "5111", 329 | "-dapr-grpc-port", 330 | "50007", 331 | "-placement-host-address", 332 | "placement:50005", 333 | "-scheduler-host-address", 334 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 335 | "-resources-path", 336 | "/components", 337 | "-config", 338 | "/dapr-config/config.yml", 339 | "-log-level", 340 | "info", 341 | ] 342 | volumes: 343 | - "./components/:/components" 344 | - "./dapr-config/:/dapr-config" 345 | depends_on: 346 | postgres-db: 347 | condition: service_started 348 | kafka: 349 | condition: service_healthy 350 | network_mode: "service:workflow-app-g" 351 | ############################ 352 | # Workflow App + Dapr sidecar 353 | ############################ 354 | workflow-app-h: 355 | environment: 356 | - DAPR_GRPC_PORT=50008 357 | - REGISTER_WORKFLOWS=true 358 | - REGISTER_ACTIVITIES=true 359 | build: 360 | context: . 361 | dockerfile: Workflow/Dockerfile 362 | ports: 363 | - "3507:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 364 | depends_on: 365 | - placement 366 | networks: 367 | - network 368 | workflow-dapr-h: 369 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 370 | command: 371 | [ 372 | "./daprd", 373 | "-app-id", 374 | "workflow-a", 375 | "-app-port", 376 | "5111", 377 | "-dapr-grpc-port", 378 | "50008", 379 | "-placement-host-address", 380 | "placement:50005", 381 | "-scheduler-host-address", 382 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 383 | "-resources-path", 384 | "/components", 385 | "-config", 386 | "/dapr-config/config.yml", 387 | "-log-level", 388 | "info", 389 | ] 390 | volumes: 391 | - "./components/:/components" 392 | - "./dapr-config/:/dapr-config" 393 | depends_on: 394 | postgres-db: 395 | condition: service_started 396 | kafka: 397 | condition: service_healthy 398 | network_mode: "service:workflow-app-h" 399 | ############################ 400 | # Workflow App + Dapr sidecar 401 | ############################ 402 | workflow-app-i: 403 | environment: 404 | - DAPR_GRPC_PORT=50009 405 | - REGISTER_WORKFLOWS=true 406 | - REGISTER_ACTIVITIES=true 407 | build: 408 | context: . 409 | dockerfile: Workflow/Dockerfile 410 | ports: 411 | - "3508:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 412 | depends_on: 413 | - placement 414 | networks: 415 | - network 416 | workflow-dapr-i: 417 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 418 | command: 419 | [ 420 | "./daprd", 421 | "-app-id", 422 | "workflow-a", 423 | "-app-port", 424 | "5111", 425 | "-dapr-grpc-port", 426 | "50009", 427 | "-placement-host-address", 428 | "placement:50005", 429 | "-scheduler-host-address", 430 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 431 | "-resources-path", 432 | "/components", 433 | "-config", 434 | "/dapr-config/config.yml", 435 | "-log-level", 436 | "info", 437 | ] 438 | volumes: 439 | - "./components/:/components" 440 | - "./dapr-config/:/dapr-config" 441 | depends_on: 442 | postgres-db: 443 | condition: service_started 444 | kafka: 445 | condition: service_healthy 446 | network_mode: "service:workflow-app-i" 447 | ############################ 448 | # Workflow App + Dapr sidecar 449 | ############################ 450 | workflow-app-j: 451 | environment: 452 | - DAPR_GRPC_PORT=50010 453 | - REGISTER_WORKFLOWS=true 454 | - REGISTER_ACTIVITIES=true 455 | build: 456 | context: . 457 | dockerfile: Workflow/Dockerfile 458 | ports: 459 | - "3509:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 460 | depends_on: 461 | - placement 462 | networks: 463 | - network 464 | workflow-dapr-j: 465 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 466 | command: 467 | [ 468 | "./daprd", 469 | "-app-id", 470 | "workflow-a", 471 | "-app-port", 472 | "5111", 473 | "-dapr-grpc-port", 474 | "50010", 475 | "-placement-host-address", 476 | "placement:50005", 477 | "-scheduler-host-address", 478 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 479 | "-resources-path", 480 | "/components", 481 | "-config", 482 | "/dapr-config/config.yml", 483 | "-log-level", 484 | "info", 485 | ] 486 | volumes: 487 | - "./components/:/components" 488 | - "./dapr-config/:/dapr-config" 489 | depends_on: 490 | postgres-db: 491 | condition: service_started 492 | kafka: 493 | condition: service_healthy 494 | network_mode: "service:workflow-app-j" 495 | ########################### 496 | #Client App + Dapr sidecar 497 | ########################### 498 | client-app: 499 | build: 500 | context: . 501 | dockerfile: Client/Dockerfile 502 | ports: 503 | - "5112:5111" 504 | networks: 505 | - network 506 | client-dapr: 507 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 508 | command: 509 | [ 510 | "./daprd", 511 | "-app-id", 512 | "client", 513 | "-app-port", 514 | "5111", 515 | "-dapr-http-port", 516 | "3500", 517 | "-resources-path", 518 | "/components", 519 | "-log-level", 520 | "warn", 521 | ] 522 | volumes: 523 | - "./components/:/components" 524 | depends_on: 525 | kafka: 526 | condition: service_healthy 527 | network_mode: "service:client-app" 528 | ############################ 529 | # Dapr placement service 530 | ############################ 531 | placement: 532 | image: "daprio/dapr:${DAPR_PLACEMENT_VERSION}" 533 | command: ["./placement", "-port", "50005", "-log-level", "warn"] 534 | networks: 535 | - network 536 | 537 | # ############################ 538 | # # Dapr Scheduler service 539 | # ############################ 540 | scheduler-0: 541 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 542 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 543 | volumes: 544 | - ./dapr_scheduler/0:/var/run/dapr/scheduler 545 | networks: 546 | - network 547 | scheduler-1: 548 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 549 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 550 | volumes: 551 | - ./dapr_scheduler/1:/var/run/dapr/scheduler 552 | networks: 553 | - network 554 | scheduler-2: 555 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 556 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 557 | volumes: 558 | - ./dapr_scheduler/2:/var/run/dapr/scheduler 559 | networks: 560 | - network 561 | 562 | ############################ 563 | # Zookeeper 564 | ############################ 565 | zookeeper: 566 | image: confluentinc/cp-zookeeper:latest 567 | networks: 568 | - network 569 | restart: unless-stopped 570 | environment: 571 | ZOOKEEPER_CLIENT_PORT: 2181 572 | ZOOKEEPER_TICK_TIME: 2000 573 | ALLOW_ANONYMOUS_LOGIN: "true" 574 | ports: 575 | - 2181:2181 576 | volumes: 577 | - zookeeper_logs:/var/lib/zookeeper/log 578 | - zookeeper_data:/var/lib/zookeeper/data 579 | ############################ 580 | # Kafka 581 | ############################ 582 | kafka: 583 | image: confluentinc/cp-kafka:latest 584 | networks: 585 | - network 586 | restart: unless-stopped 587 | depends_on: 588 | - zookeeper 589 | healthcheck: 590 | test: nc -z localhost 9092 || exit -1 591 | interval: 10s 592 | retries: 10 593 | timeout: 30s 594 | start_period: 10s 595 | ports: 596 | - 9092:9092 597 | - 29092:29092 598 | environment: 599 | KAFKA_BROKER_ID: 1 600 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 601 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 602 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 603 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 604 | KAFKA_LOG4J_ROOT_LOGLEVEL: ERROR 605 | KAFKA_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.apache.kafka=ERROR,kafka=ERROR,kafka.cluster=ERROR,kafka.controller=ERROR,kafka.coordinator=ERROR,kafka.log=ERROR,kafka.server=ERROR,kafka.zookeeper=ERROR,state.change.logger=ERROR 606 | KAFKA_JMX_PORT: 9997 607 | KAFKA_NUM_PARTITIONS: 10 608 | ############################ 609 | # Kafka UI 610 | ############################ 611 | kafka-ui: 612 | container_name: kafka-ui 613 | image: provectuslabs/kafka-ui:latest 614 | ports: 615 | - 8080:8080 616 | depends_on: 617 | - kafka 618 | environment: 619 | KAFKA_CLUSTERS_0_NAME: local 620 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 621 | KAFKA_CLUSTERS_0_METRICS_PORT: 9997 622 | KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: "true" 623 | KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: "true" 624 | LOGGING_LEVEL_ROOT: "error" 625 | LOGGING_LEVEL_COM_PROVECTUS: "error" 626 | networks: 627 | - network 628 | networks: 629 | network: 630 | volumes: 631 | db-data: 632 | driver: local 633 | zookeeper_logs: 634 | driver: local 635 | zookeeper_data: 636 | driver: local 637 | postgres-db-16-2: 638 | driver: local 639 | -------------------------------------------------------------------------------- /compose-5-3.yml: -------------------------------------------------------------------------------- 1 | version: "3.4" 2 | services: 3 | postgres-db: 4 | image: postgres:16.2-alpine 5 | restart: always 6 | environment: 7 | - POSTGRES_USER=postgres 8 | - POSTGRES_PASSWORD=wO2VGDsMKR 9 | - max_wal_size=2GB 10 | ports: 11 | - "5432:5432" 12 | networks: 13 | - network 14 | volumes: 15 | - postgres-db-16-2:/var/lib/postgresql/data-16-2 16 | ############################ 17 | # Workflow App + Dapr sidecar 18 | ############################ 19 | workflow-app-a: 20 | environment: 21 | - REGISTER_WORKFLOWS=true 22 | - REGISTER_ACTIVITIES=true 23 | build: 24 | context: . 25 | dockerfile: Workflow/Dockerfile 26 | ports: 27 | - "3500:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 28 | - "5113:5111" 29 | - "7777:7776" 30 | depends_on: 31 | - placement 32 | networks: 33 | - network 34 | workflow-dapr-a: 35 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 36 | command: 37 | [ 38 | "./daprd", 39 | "-app-id", 40 | "workflow-a", 41 | "-app-port", 42 | "5111", 43 | "-placement-host-address", 44 | "placement:50005", 45 | "-scheduler-host-address", 46 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 47 | "-resources-path", 48 | "/components", 49 | "-config", 50 | "/dapr-config/config.yml", 51 | "-log-level", 52 | "info", 53 | ] 54 | volumes: 55 | - "./components/:/components" 56 | - "./dapr-config/:/dapr-config" 57 | depends_on: 58 | postgres-db: 59 | condition: service_started 60 | kafka: 61 | condition: service_healthy 62 | network_mode: "service:workflow-app-a" 63 | ############################ 64 | # Workflow App + Dapr sidecar 65 | ############################ 66 | workflow-app-b: 67 | environment: 68 | - DAPR_GRPC_PORT=50002 69 | - REGISTER_WORKFLOWS=true 70 | - REGISTER_ACTIVITIES=true 71 | build: 72 | context: . 73 | dockerfile: Workflow/Dockerfile 74 | ports: 75 | - "3501:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 76 | depends_on: 77 | - placement 78 | networks: 79 | - network 80 | workflow-dapr-b: 81 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 82 | command: 83 | [ 84 | "./daprd", 85 | "-app-id", 86 | "workflow-a", 87 | "-app-port", 88 | "5111", 89 | "-dapr-grpc-port", 90 | "50002", 91 | "-placement-host-address", 92 | "placement:50005", 93 | "-scheduler-host-address", 94 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 95 | "-resources-path", 96 | "/components", 97 | "-config", 98 | "/dapr-config/config.yml", 99 | "-log-level", 100 | "info", 101 | ] 102 | volumes: 103 | - "./components/:/components" 104 | - "./dapr-config/:/dapr-config" 105 | depends_on: 106 | postgres-db: 107 | condition: service_started 108 | kafka: 109 | condition: service_healthy 110 | network_mode: "service:workflow-app-b" 111 | ############################ 112 | # Workflow App + Dapr sidecar 113 | ############################ 114 | workflow-app-c: 115 | environment: 116 | - DAPR_GRPC_PORT=50003 117 | - REGISTER_WORKFLOWS=true 118 | - REGISTER_ACTIVITIES=true 119 | build: 120 | context: . 121 | dockerfile: Workflow/Dockerfile 122 | ports: 123 | - "3502:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 124 | depends_on: 125 | - placement 126 | networks: 127 | - network 128 | workflow-dapr-c: 129 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 130 | command: 131 | [ 132 | "./daprd", 133 | "-app-id", 134 | "workflow-a", 135 | "-app-port", 136 | "5111", 137 | "-dapr-grpc-port", 138 | "50003", 139 | "-placement-host-address", 140 | "placement:50005", 141 | "-scheduler-host-address", 142 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 143 | "-resources-path", 144 | "/components", 145 | "-config", 146 | "/dapr-config/config.yml", 147 | "-log-level", 148 | "info", 149 | ] 150 | volumes: 151 | - "./components/:/components" 152 | - "./dapr-config/:/dapr-config" 153 | depends_on: 154 | postgres-db: 155 | condition: service_started 156 | kafka: 157 | condition: service_healthy 158 | network_mode: "service:workflow-app-c" 159 | ############################ 160 | # Workflow App + Dapr sidecar 161 | ############################ 162 | workflow-app-d: 163 | environment: 164 | - DAPR_GRPC_PORT=50004 165 | - REGISTER_WORKFLOWS=true 166 | - REGISTER_ACTIVITIES=true 167 | build: 168 | context: . 169 | dockerfile: Workflow/Dockerfile 170 | ports: 171 | - "3503:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 172 | depends_on: 173 | - placement 174 | networks: 175 | - network 176 | workflow-dapr-d: 177 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 178 | command: 179 | [ 180 | "./daprd", 181 | "-app-id", 182 | "workflow-a", 183 | "-app-port", 184 | "5111", 185 | "-dapr-grpc-port", 186 | "50004", 187 | "-placement-host-address", 188 | "placement:50005", 189 | "-scheduler-host-address", 190 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 191 | "-resources-path", 192 | "/components", 193 | "-config", 194 | "/dapr-config/config.yml", 195 | "-log-level", 196 | "info", 197 | ] 198 | volumes: 199 | - "./components/:/components" 200 | - "./dapr-config/:/dapr-config" 201 | depends_on: 202 | postgres-db: 203 | condition: service_started 204 | kafka: 205 | condition: service_healthy 206 | network_mode: "service:workflow-app-d" 207 | ############################ 208 | # Workflow App + Dapr sidecar 209 | ############################ 210 | workflow-app-e: 211 | environment: 212 | - DAPR_GRPC_PORT=50005 213 | - REGISTER_WORKFLOWS=true 214 | - REGISTER_ACTIVITIES=true 215 | build: 216 | context: . 217 | dockerfile: Workflow/Dockerfile 218 | ports: 219 | - "3504:3500" # only important so we can reach the Dapr HTTP sidecar from the host for testing purposes 220 | depends_on: 221 | - placement 222 | networks: 223 | - network 224 | workflow-dapr-e: 225 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 226 | command: 227 | [ 228 | "./daprd", 229 | "-app-id", 230 | "workflow-a", 231 | "-app-port", 232 | "5111", 233 | "-dapr-grpc-port", 234 | "50005", 235 | "-placement-host-address", 236 | "placement:50005", 237 | "-scheduler-host-address", 238 | "scheduler-0:50006,scheduler-1:50006,scheduler-2:50006", 239 | "-resources-path", 240 | "/components", 241 | "-config", 242 | "/dapr-config/config.yml", 243 | "-log-level", 244 | "info", 245 | ] 246 | volumes: 247 | - "./components/:/components" 248 | - "./dapr-config/:/dapr-config" 249 | depends_on: 250 | postgres-db: 251 | condition: service_started 252 | kafka: 253 | condition: service_healthy 254 | network_mode: "service:workflow-app-e" 255 | ########################### 256 | #Client App + Dapr sidecar 257 | ########################### 258 | client-app: 259 | build: 260 | context: . 261 | dockerfile: Client/Dockerfile 262 | ports: 263 | - "5112:5111" 264 | networks: 265 | - network 266 | client-dapr: 267 | image: "daprio/daprd:${DAPR_RUNTIME_VERSION}" 268 | command: 269 | [ 270 | "./daprd", 271 | "-app-id", 272 | "client", 273 | "-app-port", 274 | "5111", 275 | "-dapr-http-port", 276 | "3500", 277 | "-resources-path", 278 | "/components", 279 | "-log-level", 280 | "warn", 281 | ] 282 | volumes: 283 | - "./components/:/components" 284 | depends_on: 285 | kafka: 286 | condition: service_healthy 287 | network_mode: "service:client-app" 288 | ############################ 289 | # Dapr placement service 290 | ############################ 291 | placement: 292 | image: "daprio/dapr:${DAPR_PLACEMENT_VERSION}" 293 | command: ["./placement", "-port", "50005", "-log-level", "warn"] 294 | networks: 295 | - network 296 | 297 | # ############################ 298 | # # Dapr Scheduler service 299 | # ############################ 300 | scheduler-0: 301 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 302 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 303 | volumes: 304 | - ./dapr_scheduler/0:/var/run/dapr/scheduler 305 | networks: 306 | - network 307 | scheduler-1: 308 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 309 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 310 | volumes: 311 | - ./dapr_scheduler/1:/var/run/dapr/scheduler 312 | networks: 313 | - network 314 | scheduler-2: 315 | image: "daprio/dapr:${DAPR_SCHEDULER_VERSION}" 316 | command: ["./scheduler", "--etcd-data-dir", "/var/run/dapr/scheduler"] 317 | volumes: 318 | - ./dapr_scheduler/2:/var/run/dapr/scheduler 319 | networks: 320 | - network 321 | 322 | ############################ 323 | # Zookeeper 324 | ############################ 325 | zookeeper: 326 | image: confluentinc/cp-zookeeper:latest 327 | networks: 328 | - network 329 | restart: unless-stopped 330 | environment: 331 | ZOOKEEPER_CLIENT_PORT: 2181 332 | ZOOKEEPER_TICK_TIME: 2000 333 | ALLOW_ANONYMOUS_LOGIN: "true" 334 | ports: 335 | - 2181:2181 336 | volumes: 337 | - zookeeper_logs:/var/lib/zookeeper/log 338 | - zookeeper_data:/var/lib/zookeeper/data 339 | ############################ 340 | # Kafka 341 | ############################ 342 | kafka: 343 | image: confluentinc/cp-kafka:latest 344 | networks: 345 | - network 346 | restart: unless-stopped 347 | depends_on: 348 | - zookeeper 349 | healthcheck: 350 | test: nc -z localhost 9092 || exit -1 351 | interval: 10s 352 | retries: 10 353 | timeout: 30s 354 | start_period: 10s 355 | ports: 356 | - 9092:9092 357 | - 29092:29092 358 | environment: 359 | KAFKA_BROKER_ID: 1 360 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 361 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 362 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 363 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 364 | KAFKA_LOG4J_ROOT_LOGLEVEL: ERROR 365 | KAFKA_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.apache.kafka=ERROR,kafka=ERROR,kafka.cluster=ERROR,kafka.controller=ERROR,kafka.coordinator=ERROR,kafka.log=ERROR,kafka.server=ERROR,kafka.zookeeper=ERROR,state.change.logger=ERROR 366 | KAFKA_JMX_PORT: 9997 367 | KAFKA_NUM_PARTITIONS: 10 368 | ############################ 369 | # Kafka UI 370 | ############################ 371 | kafka-ui: 372 | container_name: kafka-ui 373 | image: provectuslabs/kafka-ui:latest 374 | ports: 375 | - 8080:8080 376 | depends_on: 377 | - kafka 378 | environment: 379 | KAFKA_CLUSTERS_0_NAME: local 380 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 381 | KAFKA_CLUSTERS_0_METRICS_PORT: 9997 382 | KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: "true" 383 | KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: "true" 384 | LOGGING_LEVEL_ROOT: "error" 385 | LOGGING_LEVEL_COM_PROVECTUS: "error" 386 | networks: 387 | - network 388 | networks: 389 | network: 390 | volumes: 391 | db-data: 392 | driver: local 393 | zookeeper_logs: 394 | driver: local 395 | zookeeper_data: 396 | driver: local 397 | postgres-db-16-2: 398 | driver: local 399 | -------------------------------------------------------------------------------- /compose-only-dependencies.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres-db: 3 | image: postgres:16.2-alpine 4 | restart: always 5 | environment: 6 | - POSTGRES_USER=postgres 7 | - POSTGRES_PASSWORD=wO2VGDsMKR 8 | - max_wal_size=2GB 9 | ports: 10 | - "5432:5432" 11 | networks: 12 | - network 13 | volumes: 14 | - postgres-db-16-2:/var/lib/postgresql/data-16-2 15 | ############################ 16 | # Zookeeper 17 | ############################ 18 | zookeeper: 19 | image: confluentinc/cp-zookeeper:latest 20 | networks: 21 | - network 22 | restart: unless-stopped 23 | environment: 24 | ZOOKEEPER_CLIENT_PORT: 2181 25 | ZOOKEEPER_TICK_TIME: 2000 26 | ALLOW_ANONYMOUS_LOGIN: "true" 27 | ports: 28 | - 2181:2181 29 | volumes: 30 | - zookeeper_logs:/var/lib/zookeeper/log 31 | - zookeeper_data:/var/lib/zookeeper/data 32 | ############################ 33 | # Kafka 34 | ############################ 35 | kafka: 36 | image: confluentinc/cp-kafka:latest 37 | networks: 38 | - network 39 | restart: unless-stopped 40 | depends_on: 41 | - zookeeper 42 | healthcheck: 43 | test: nc -z localhost 9092 || exit -1 44 | interval: 10s 45 | retries: 10 46 | timeout: 30s 47 | start_period: 10s 48 | ports: 49 | - 9092:9092 50 | - 29092:29092 51 | environment: 52 | KAFKA_BROKER_ID: 1 53 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 54 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092,PLAINTEXT_HOST://localhost:9092 55 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 56 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 57 | KAFKA_LOG4J_ROOT_LOGLEVEL: ERROR 58 | KAFKA_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.apache.kafka=ERROR,kafka=ERROR,kafka.cluster=ERROR,kafka.controller=ERROR,kafka.coordinator=ERROR,kafka.log=ERROR,kafka.server=ERROR,kafka.zookeeper=ERROR,state.change.logger=ERROR 59 | KAFKA_JMX_PORT: 9997 60 | KAFKA_NUM_PARTITIONS: 10 61 | ############################ 62 | # Kafka UI 63 | ############################ 64 | kafka-ui: 65 | container_name: kafka-ui 66 | image: provectuslabs/kafka-ui:latest 67 | ports: 68 | - 8080:8080 69 | depends_on: 70 | - kafka 71 | environment: 72 | KAFKA_CLUSTERS_0_NAME: local 73 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 74 | KAFKA_CLUSTERS_0_METRICS_PORT: 9997 75 | KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: "true" 76 | KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: "true" 77 | LOGGING_LEVEL_ROOT: "error" 78 | LOGGING_LEVEL_COM_PROVECTUS: "error" 79 | networks: 80 | - network 81 | networks: 82 | network: 83 | volumes: 84 | db-data: 85 | driver: local 86 | zookeeper_logs: 87 | driver: local 88 | zookeeper_data: 89 | driver: local 90 | postgres-db-16-2: 91 | driver: local 92 | -------------------------------------------------------------------------------- /compose.debug-workflow-app.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | workflow-app-a: #isolate the workflow-app by removing its ports and taking it off the network 4 | ports: !reset [] 5 | networks: !reset 6 | - "" 7 | workflow-dapr-a: 8 | command: ["./daprd", 9 | "-app-id", "workflow-a", 10 | "-app-port", "5111", 11 | "-placement-host-address", "placement:50006", 12 | "-resources-path", "/components", 13 | "-app-channel-address", "host.docker.internal"] # make the sidecar look on the host for the App Channel 14 | network_mode: !reset "" # reset the network_mode... 15 | networks: # ... so that the sidecar can go into the normal network 16 | - network 17 | ports: 18 | - "3500:3500" # Expose the HTTP port to the host 19 | - "50001:50001" # Expose the GRPC port to the host (Dapr Worfklows depends upon the GRPC channel) -------------------------------------------------------------------------------- /dapr-config/config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: dapr.io/v1alpha1 2 | kind: Configuration 3 | metadata: 4 | name: daprConfig 5 | spec: 6 | tracing: 7 | samplingRate: "1" 8 | zipkin: 9 | endpointAddress: "http://host.docker.internal:9411/api/v2/spans" 10 | -------------------------------------------------------------------------------- /dapr-workflow-examples.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 17 4 | VisualStudioVersion = 17.5.002.0 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Workflow", "Workflow\Workflow.csproj", "{FC58B591-9EB5-4AF4-83B7-E47506AB98E6}" 7 | EndProject 8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Client", "Client\Client.csproj", "{A79130D7-7599-496C-B2B8-A3B87DE3C054}" 9 | EndProject 10 | Global 11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 12 | Debug|Any CPU = Debug|Any CPU 13 | Release|Any CPU = Release|Any CPU 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {FC58B591-9EB5-4AF4-83B7-E47506AB98E6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 17 | {FC58B591-9EB5-4AF4-83B7-E47506AB98E6}.Debug|Any CPU.Build.0 = Debug|Any CPU 18 | {FC58B591-9EB5-4AF4-83B7-E47506AB98E6}.Release|Any CPU.ActiveCfg = Release|Any CPU 19 | {FC58B591-9EB5-4AF4-83B7-E47506AB98E6}.Release|Any CPU.Build.0 = Release|Any CPU 20 | {A79130D7-7599-496C-B2B8-A3B87DE3C054}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 21 | {A79130D7-7599-496C-B2B8-A3B87DE3C054}.Debug|Any CPU.Build.0 = Debug|Any CPU 22 | {A79130D7-7599-496C-B2B8-A3B87DE3C054}.Release|Any CPU.ActiveCfg = Release|Any CPU 23 | {A79130D7-7599-496C-B2B8-A3B87DE3C054}.Release|Any CPU.Build.0 = Release|Any CPU 24 | EndGlobalSection 25 | GlobalSection(SolutionProperties) = preSolution 26 | HideSolutionNode = FALSE 27 | EndGlobalSection 28 | GlobalSection(ExtensibilityGlobals) = postSolution 29 | SolutionGuid = {881B430D-D264-4AD3-BA7A-5475B4A6089B} 30 | EndGlobalSection 31 | EndGlobal 32 | -------------------------------------------------------------------------------- /deploy.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: workflow 5 | labels: 6 | app: workflow 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: workflow 12 | template: 13 | metadata: 14 | labels: 15 | app: workflow 16 | annotations: 17 | dapr.io/enabled: "true" 18 | dapr.io/app-id: "workflow-a" 19 | dapr.io/app-port: "5111" 20 | dapr.io/config: "mydaprconfig" 21 | spec: 22 | containers: 23 | - name: workflow 24 | image: workflow:1.0.1 25 | imagePullPolicy: Never 26 | ports: 27 | - containerPort: 5111 28 | --- 29 | kind: Deployment 30 | apiVersion: apps/v1 31 | metadata: 32 | name: client 33 | labels: 34 | app: client 35 | spec: 36 | replicas: 1 37 | selector: 38 | matchLabels: 39 | app: client 40 | template: 41 | metadata: 42 | labels: 43 | app: client 44 | annotations: 45 | dapr.io/enabled: "true" 46 | dapr.io/app-id: "client" 47 | dapr.io/app-port: "5111" 48 | dapr.io/config: "mydaprconfig" 49 | spec: 50 | containers: 51 | - name: client 52 | image: client:1.0.1 53 | imagePullPolicy: Never 54 | ports: 55 | - containerPort: 5111 56 | -------------------------------------------------------------------------------- /multirun.yaml: -------------------------------------------------------------------------------- 1 | version: 1 2 | common: # optional section for variables shared across apps 3 | resourcesPath: /multirun-components # any dapr resources to be shared across apps 4 | apps: 5 | - appID: workflow # optional 6 | appDirPath: /Workflow # REQUIRED 7 | configFilePath: config/config.yaml # (optional) can be default by convention too, ignore if file is not found. 8 | appProtocol: http 9 | appPort: 8080 10 | daprHttpPort: 3500 11 | daprGRPCPort: 50001 12 | daprInternalGRPCPort: 48727 13 | metricsPort: 9091 14 | command: ["dotnet", "run"] 15 | env: 16 | ASPNETCORE_URLS: http://localhost:5113/ 17 | DAPR_HTTP_PORT: 3500 18 | DAPR_GRPC_PORT: 50001 19 | appLogDestination: console # (optional), can be file, console or fileAndConsole. default is fileAndConsole. 20 | daprdLogDestination: console 21 | --------------------------------------------------------------------------------