├── doc
├── readme-image.png
├── todo.txt
└── kafka-prompt.md
├── .idea
├── vcs.xml
├── .gitignore
├── jsLibraryMappings.xml
├── misc.xml
└── modules.xml
├── src
├── app.js
├── sketchMetrics.js
├── sharedState.js
├── utils.js
├── canvas
│ ├── metricsPanel.js
│ ├── consumers.js
│ ├── producers.js
│ └── partitions.js
├── index.html
├── config.js
└── sketchSimulation.js
├── .gitignore
├── package.json
├── LICENSE.md
└── README.md
/doc/readme-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pedromazala/kafka-traffic-visualizer/HEAD/doc/readme-image.png
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/app.js:
--------------------------------------------------------------------------------
1 | import p5 from 'p5';
2 | import sketchSimulation from './sketchSimulation.js';
3 | import sketchMetrics from './sketchMetrics.js';
4 |
5 | new p5(sketchSimulation);
6 | new p5(sketchMetrics);
7 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/jsLibraryMappings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # IntelliJ IDEA files
2 | .idea/
3 | *.iml
4 | *.iws
5 | *.ipr
6 | .idea_modules/
7 | out/
8 |
9 | # Node.js
10 | node_modules/
11 | npm-debug.log
12 | yarn-debug.log
13 | yarn-error.log
14 | package-lock.json
15 | yarn.lock
16 | .parcel-cache/
17 |
18 | # Build output
19 | dist/
20 | build/
21 | lib/
22 | coverage/
23 |
24 | # macOS
25 | .DS_Store
26 | .AppleDouble
27 | .LSOverride
28 | ._*
29 |
30 | # Environment files
31 | .env
32 | .env.local
33 | .env.development.local
34 | .env.test.local
35 | .env.production.local
36 |
37 | # Log files
38 | logs/
39 | *.log
40 |
41 | # Editor config
42 | .vscode/
43 | *.sublime-project
44 | *.sublime-workspace
45 |
--------------------------------------------------------------------------------
/src/sketchMetrics.js:
--------------------------------------------------------------------------------
1 | import sharedState from './sharedState.js';
2 | import {createMetricsPanelRenderer} from './canvas/metricsPanel.js';
3 |
4 | const metricsSketch = (p) => {
5 | const CANVAS_WIDTH = 1100;
6 | const CANVAS_HEIGHT = 150;
7 |
8 | let metricsPanelRenderer;
9 |
10 | p.setup = () => {
11 | let metricsCanvas = p.createCanvas(CANVAS_WIDTH, CANVAS_HEIGHT);
12 | metricsCanvas.parent('canvas-metrics');
13 |
14 | metricsPanelRenderer = createMetricsPanelRenderer(p);
15 | };
16 |
17 | p.draw = () => {
18 | p.background(200);
19 |
20 | metricsPanelRenderer.drawMetricsPanel(
21 | sharedState.getMetrics(),
22 | sharedState.consumers
23 | );
24 | };
25 | };
26 |
27 | export default metricsSketch;
28 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kafka-traffic-visualizer",
3 | "version": "0.1.0",
4 | "scripts": {
5 | "start": "parcel src/index.html",
6 | "build": "parcel build src/index.html",
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "targets": {
10 | "default": {
11 | "publicUrl": "/kafka-traffic-visualizer/"
12 | }
13 | },
14 | "repository": {
15 | "type": "git",
16 | "url": "git+https://github.com/evouraorg/kafka-traffic-visualizer.git"
17 | },
18 | "author": "Renato Mefi",
19 | "license": "MIT",
20 | "bugs": {
21 | "url": "https://github.com/evouraorg/kafka-traffic-visualizer/issues"
22 | },
23 | "homepage": "https://github.com/evouraorg/kafka-traffic-visualizer#readme",
24 | "description": "",
25 | "dependencies": {
26 | "p5": "^1.11.3"
27 | },
28 | "devDependencies": {
29 | "@types/p5": "^1.7.6",
30 | "parcel": "^2.13.3"
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/sharedState.js:
--------------------------------------------------------------------------------
1 | const sharedState = {
2 | metrics: {
3 | startTime: 0,
4 | lastUpdateTime: 0,
5 | producers: {}, // Map of producer ID -> metrics
6 | consumers: {}, // Map of consumer ID -> metrics
7 | global: {
8 | totalRecordsProduced: 0,
9 | totalRecordsConsumed: 0,
10 | totalBytesProduced: 0,
11 | totalBytesConsumed: 0,
12 | avgProcessingTimeMs: 0,
13 | processingTimeSamples: 0
14 | }
15 | },
16 |
17 | // Method to update metrics from simulation sketch
18 | updateMetrics(newMetrics) {
19 | this.metrics = newMetrics;
20 | },
21 |
22 | // Method to get current metrics for metrics panel sketch
23 | getMetrics() {
24 | return this.metrics;
25 | },
26 |
27 | // Reference to consumers array for the metrics panel to use
28 | consumers: [],
29 |
30 | // Method to update consumers reference
31 | updateConsumers(newConsumers) {
32 | this.consumers = newConsumers;
33 | }
34 | };
35 |
36 | export default sharedState;
37 |
--------------------------------------------------------------------------------
/src/utils.js:
--------------------------------------------------------------------------------
1 |
2 | export function formatBytes(bytes) {
3 | if (bytes < 1000) {
4 | return Math.round(bytes) + ' B';
5 | } else if (bytes < 1000 * 1000) {
6 | return (bytes / 1000).toFixed(2) + ' KB';
7 | } else {
8 | return (bytes / (1000 * 1000)).toFixed(2) + ' MB';
9 | }
10 | }
11 |
12 | /**
13 | * Generates a consistent color based on an index using the golden ratio conjugate
14 | *
15 | * @param {Object} p - p5.js instance
16 | * @param {number} index - The index to generate color for
17 | * @param {number} [offset=0] - Offset in the color wheel (0.5 = opposite side)
18 | * @param {number} [saturation=70] - Color saturation (0-100)
19 | * @param {number} [brightness=80] - Color brightness (0-100)
20 | * @returns {Object} p5.js color object
21 | */
22 | export function generateConsistentColor(p, index, offset = 0, saturation = 70, brightness = 80) {
23 | const goldenRatioConjugate = 0.618033988749895;
24 | const hue = (((index * goldenRatioConjugate) + offset) % 1) * 360;
25 |
26 | p.colorMode(p.HSB, 360, 100, 100);
27 | const color = p.color(hue, saturation, brightness);
28 | p.colorMode(p.RGB, 255, 255, 255);
29 |
30 | return color;
31 | }
32 |
--------------------------------------------------------------------------------
/doc/todo.txt:
--------------------------------------------------------------------------------
1 | 1. Modify the producer time to use real time with millis:
2 | Reliable Timing Strategies
3 | 1. Millisecond Timing (millis())
4 |
5 | javascript
6 | let lastEvent = 0;
7 |
8 | function draw() {
9 | if (millis() - lastEvent > 1000) { // Trigger every 1000ms
10 | doSomething();
11 | lastEvent = millis();
12 | }
13 | }
14 | Uses actual system clock time rather than frames15
15 |
16 | 2. Delta Time Compensation
17 |
18 | javascript
19 | let posX = 0;
20 | let prevTime = 0;
21 |
22 | function draw() {
23 | const deltaTime = millis() - prevTime;
24 | posX += (100 * deltaTime)/1000; // 100px/second
25 | prevTime = millis();
26 | }
27 | Makes movement speed consistent regardless of FPS5
28 |
29 | 2. Records shouldn't move faster if they're small. Replace this speed with "network" speed
30 | // Smaller records move slightly faster for visual variety
31 | const adjustedSpeed = baseSpeed * (1 - (radius - MIN_RECORD_RADIUS) / (MAX_RECORD_RADIUS - MIN_RECORD_RADIUS) * 0.3);
32 |
33 |
34 | 3. Decouple record x line 833 from the object itself, it's an animation concern
35 |
36 | 4. could we emit an event on record pushed to partition? line 847
37 | // Add the record to the partition
38 | partitions[partitionId].records.push(record);
39 |
40 | // Emit record produced event
41 | eventEmitter.emit(EVENTS.RECORD_PRODUCED, record);
42 |
43 | 5. Make the emit record look more like a kafka producer
44 |
45 | 6. consumerThroughputMaxInBytes and processingCApacitySlider should be the same
46 |
47 | 7. partition algorithms configurable, instead of only modulo
48 |
49 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0)
2 |
3 | This work is licensed under a [Creative Commons Attribution-NonCommercial 4.0 International License](https://creativecommons.org/licenses/by-nc/4.0/).
4 |
5 | ## You are free to:
6 |
7 | - **Share** — copy and redistribute the material in any medium or format
8 | - **Adapt** — remix, transform, and build upon the material
9 |
10 | The licensor cannot revoke these freedoms as long as you follow the license terms.
11 |
12 | ## Under the following terms:
13 |
14 | - **Attribution** — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.
15 | - **NonCommercial** — You may not use the material for commercial purposes.
16 | - **No additional restrictions** — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.
17 |
18 | ## Notices:
19 |
20 | You do not have to comply with the license for elements of the material in the public domain or where your use is permitted by an applicable exception or limitation.
21 |
22 | No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material.
23 |
24 | ## Full License Text
25 |
26 | The full text of the license is available at: [https://creativecommons.org/licenses/by-nc/4.0/legalcode](https://creativecommons.org/licenses/by-nc/4.0/legalcode)
27 |
28 | ---
29 |
30 | © 2025 [Evoura](https://evoura.com) / [Renato Mefi](https://www.linkedin.com/in/renatomefi/)
31 |
--------------------------------------------------------------------------------
/doc/kafka-prompt.md:
--------------------------------------------------------------------------------
1 | # Kafka Traffic Visualizer
2 |
3 | This project P5.js instance mode that simulates a Kafka topic traffic with Producers generating Records with Key/Value and Consumers consuming those Records from the partitions.
4 | Consider it's a single Topic.
5 |
6 | Topics:
7 | - Have Partitions
8 |
9 | Records:
10 | - Record have Key, keys determine the Partition deterministically with a mod algorithm
11 | - Record have Value, Value varies in bytes size and determine how long they take to Produce to the Partition
12 |
13 | Producers:
14 | - They generate Records
15 |
16 | Configurations and controls:
17 | - Topic Partition count: 1 to 256, start with 8
18 | - Producers count: 0 to 16, start with 2
19 | - Consumers count: 0 to 256, start with 2
20 |
21 | Consumer:
22 | - A Consumer can consume 1 or more Partitions
23 | - Consumes Records in the order of the Partition, it cannot consume out of order
24 |
25 | Display and visuals:
26 | - Produce pure P5.js script, no HTMLs or other frameworks allowed
27 | - Partitions should be wide rectangles
28 | - Producers should be on the left and distant from partitions
29 | - a Record is produced to the far left of the partition, presenting an input
30 | - a Record travels within the partition rectangle to the right, where consumers would be
31 | - Records are a ellipses, the ellipse size is proportional to Value size in bytes
32 | - Records have colors which match the Producer color
33 |
34 | Metrics:
35 | - Below the Producer, show how many Records were produced, a cumulative sum of Bytes produced and a produce rate in Kilobytes per Second
36 | - Below the Consumer, show how many Records were consumed, a cumulative sum of Bytes consumed and a consume rate in Kilobytes per Second
37 |
38 | Physics:
39 | - A Partition behaves like a FIFO queue
40 | - inside a Partition, Records can not move past each other, they have to respect the queue and can only be processed after the previous one is already processed
41 | - Records should accumulate inside a Partition when they are not consumed, they should remain stacked in the order they were produced, like a FIFO queue
42 |
43 | Code style:
44 | - Use early returns
45 | - Avoid nested loops, extract functions accordingly with single responsibility principle
46 | - Name variables and functions with Kafka internal terminology and distributed systems paradigms
47 | - Consider performing data structures
48 | - UI/Canvas/Drawing/Controls code should be separated from the Traffic simulation code
49 | - do not pass objects by reference to update them within a function, always return a new object with the updated values
50 |
--------------------------------------------------------------------------------
/src/canvas/metricsPanel.js:
--------------------------------------------------------------------------------
1 | import {formatBytes} from '../utils.js';
2 |
3 | export function createMetricsPanelRenderer(p, panelX = 20, panelY = 20) {
4 | function formatElapsedTime(ms) {
5 | if (ms < 1000) {
6 | return `${ms.toFixed(0)}ms`;
7 | } else if (ms < 60000) {
8 | const seconds = Math.floor(ms / 1000);
9 | const remainingMs = ms % 1000;
10 | return `${seconds}s ${remainingMs.toFixed(0)}ms`;
11 | } else {
12 | const minutes = Math.floor(ms / 60000);
13 | const seconds = Math.floor((ms % 60000) / 1000);
14 | const remainingMs = ms % 1000;
15 | return `${minutes}m ${seconds}s ${remainingMs.toFixed(0)}ms`;
16 | }
17 | }
18 |
19 | function drawMetricsPanel(metrics, consumers = []) {
20 | const panelWidth = 200;
21 | const panelHeight = 110; // Increased to fit new metrics
22 |
23 | // Calculate active consumer count (consumers with assigned partitions)
24 | let consumerThroughput = 0;
25 |
26 | consumers.filter(consumer =>
27 | consumer.assignedPartitions && consumer.assignedPartitions.length > 0
28 | ).forEach((consumer) => {
29 | consumerThroughput += consumer.throughputMax
30 | });
31 |
32 | p.push();
33 | // Draw panel background
34 | p.fill(240);
35 | p.stroke(100);
36 | p.strokeWeight(1);
37 | p.rect(panelX, panelY, panelWidth, panelHeight);
38 |
39 | // Draw metrics text
40 | p.fill(0);
41 | p.noStroke();
42 | p.textAlign(p.LEFT, p.TOP);
43 | p.textSize(12);
44 | p.text("Global Metrics:", panelX + 5, panelY + 5);
45 |
46 | const elapsedMs = p.millis();
47 |
48 | p.textSize(10);
49 | p.text(`Records: P ${metrics.global.totalRecordsProduced} → C ${metrics.global.totalRecordsConsumed} → Lag: ${metrics.global.totalRecordsProduced - metrics.global.totalRecordsConsumed}`,
50 | panelX + 5, panelY + 25);
51 | p.text(`Bytes: ${formatBytes(metrics.global.totalBytesProduced)} → ${formatBytes(metrics.global.totalBytesConsumed)}`,
52 | panelX + 5, panelY + 40);
53 | p.text(`Avg Processing: ${Math.round(metrics.global.avgProcessingTimeMs)}ms`,
54 | panelX + 5, panelY + 55);
55 | p.text(`Consumers max throughput: ${formatBytes(consumerThroughput)}`,
56 | panelX + 5, panelY + 70);
57 | p.text(`Elapsed ms: ${elapsedMs.toFixed(0)}`,
58 | panelX + 5, panelY + 85);
59 | p.text(`Elapsed time: ${formatElapsedTime(elapsedMs)}`,
60 | panelX + 5, panelY + 100);
61 | p.pop();
62 | }
63 |
64 | return {
65 | drawMetricsPanel
66 | };
67 | }
68 |
--------------------------------------------------------------------------------
/src/canvas/consumers.js:
--------------------------------------------------------------------------------
1 | import { formatBytes } from '../utils.js';
2 |
3 | export function createConsumerRenderer(p, positionX) {
4 | // Function to draw a single consumer
5 | function drawConsumerComponent(consumer, index, metrics) {
6 | p.push(); // Start a new drawing context
7 | p.translate(positionX, consumer.y); // Set the origin to the consumer position
8 |
9 | // Get metrics for this consumer
10 | const consumerMetrics = metrics?.consumers?.[consumer.id] || {
11 | recordsConsumed: 0,
12 | bytesConsumed: 0,
13 | consumeRate: 0,
14 | recordsRate: 0
15 | };
16 |
17 | // Consumer metrics data
18 | const metricsData = [
19 | `Records: ${consumerMetrics.recordsConsumed}`,
20 | `Sum B: ${formatBytes(consumerMetrics.bytesConsumed)}`,
21 | `${Math.round(consumerMetrics.consumeRate)} B/s`,
22 | `${Math.round(consumerMetrics.recordsRate * 100) / 100} rec/s`
23 | ];
24 |
25 | // Calculate metrics box dimensions
26 | p.textSize(10);
27 | const textHeight = 15; // Height per line of text
28 | const textPadding = 2; // Padding between text and border
29 | const metricsWidth = p.max(
30 | ...metricsData.map(text => p.textWidth(text))
31 | ) + textPadding * 2;
32 | const metricsHeight = textHeight * metricsData.length + textPadding * 2;
33 |
34 | // Use gray color for unassigned consumers
35 | const borderColor = consumer.assignedPartitions.length === 0 ? p.color(200) : consumer.color;
36 |
37 | // Draw metrics box - vertically centered with the consumer square
38 | p.noFill();
39 | p.stroke(borderColor);
40 | p.strokeWeight(1);
41 | p.rect(30, -metricsHeight / 2, metricsWidth, metricsHeight);
42 |
43 | // Draw metrics text
44 | p.fill(0);
45 | p.noStroke();
46 | p.textAlign(p.LEFT, p.TOP);
47 | for (let i = 0; i < metricsData.length; i++) {
48 | p.text(metricsData[i], 30 + textPadding, -metricsHeight / 2 + textPadding + i * textHeight);
49 | }
50 |
51 | // Draw consumer rectangle - always use regular color regardless of busy state
52 | p.fill(consumer.color);
53 | p.stroke(0);
54 | p.strokeWeight(1);
55 | p.rect(0, -15, 30, 30);
56 |
57 | // Draw consumer ID inside rectangle
58 | p.fill(255);
59 | p.noStroke();
60 | p.textAlign(p.CENTER, p.CENTER);
61 | p.textSize(10);
62 | p.textStyle(p.BOLD);
63 | p.text(index, 15, 0);
64 | p.textStyle(p.NORMAL);
65 |
66 | p.pop(); // Restore the drawing context
67 | }
68 |
69 | // Function to draw connections between consumers and their partitions
70 | function drawConsumerPartitionConnections(consumer, partitions, partitionStartX, partitionWidth, partitionHeight) {
71 | p.stroke(consumer.color);
72 | p.strokeWeight(1.8);
73 | p.drawingContext.setLineDash([5, 5]);
74 |
75 | for (const partitionId of consumer.assignedPartitions) {
76 | const partitionY = partitions[partitionId].y + partitionHeight / 2;
77 | p.line(partitionStartX + partitionWidth, partitionY, positionX, consumer.y);
78 | }
79 |
80 | p.drawingContext.setLineDash([]);
81 | }
82 |
83 | return {
84 | drawConsumer: drawConsumerComponent,
85 | drawConsumerPartitionConnections: drawConsumerPartitionConnections,
86 | drawConsumers(consumers, metrics) {
87 | for (let i = 0; i < consumers.length; i++) {
88 | drawConsumerComponent(consumers[i], i, metrics);
89 | }
90 | },
91 | drawConsumersWithConnections(consumers, partitions, metrics, partitionStartX, partitionWidth, partitionHeight) {
92 | for (let i = 0; i < consumers.length; i++) {
93 | drawConsumerComponent(consumers[i], i, metrics);
94 | drawConsumerPartitionConnections(
95 | consumers[i],
96 | partitions,
97 | partitionStartX,
98 | partitionWidth,
99 | partitionHeight
100 | );
101 | }
102 | }
103 | };
104 | }
--------------------------------------------------------------------------------
/src/canvas/producers.js:
--------------------------------------------------------------------------------
1 | import { formatBytes } from '../utils.js';
2 |
3 | export default function createProducerEffectsManager(p) {
4 | const effects = [];
5 |
6 | function addEffectToManager(x1, y1, x2, y2, color, duration) {
7 | const effect = {
8 | startTime: p.millis(),
9 | endTime: p.millis() + duration,
10 | x1, y1, x2, y2, color
11 | };
12 | effects.push(effect);
13 | }
14 |
15 | function updateEffects() {
16 | // Remove expired effects
17 | for (let i = effects.length - 1; i >= 0; i--) {
18 | if (p.millis() >= effects[i].endTime) {
19 | effects.splice(i, 1);
20 | }
21 | }
22 | }
23 |
24 | function drawEffects() {
25 | for (const effect of effects) {
26 | // Draw the full line immediately (no animation)
27 | p.stroke(effect.color);
28 | p.strokeWeight(2);
29 | p.line(effect.x1, effect.y1, effect.x2, effect.y2);
30 | }
31 | }
32 |
33 | function getEffectsCount() {
34 | return effects.length;
35 | }
36 |
37 | return {
38 | addEffect: addEffectToManager,
39 | update: updateEffects,
40 | draw: drawEffects,
41 | getCount: getEffectsCount
42 | };
43 | }
44 |
45 | // src/canvas/producers.js
46 | export function createProducerRenderer(p, positionX) {
47 | // Original function to draw a single producer
48 | function drawProducerComponent(producer, index, metrics) {
49 | p.push(); // Start a new drawing context
50 | p.translate(positionX, producer.y); // Set the origin to the producer position
51 |
52 | // Get metrics for this producer
53 | const producerMetrics = metrics?.producers?.[producer.id] || {
54 | recordsProduced: 0,
55 | bytesProduced: 0,
56 | produceRate: 0,
57 | recordsRate: 0
58 | };
59 |
60 | // Producer metrics data
61 | const metricsData = [
62 | `Records: ${producerMetrics.recordsProduced}`,
63 | `Sum B: ${formatBytes(producerMetrics.bytesProduced)}`,
64 | `${Math.round(producerMetrics.produceRate)} B/s`,
65 | `${Math.round(producerMetrics.recordsRate * 100) / 100} rec/s`
66 | ];
67 |
68 | // Calculate metrics box dimensions
69 | p.textSize(10);
70 | const textHeight = 15; // Height per line of text
71 | const textPadding = 2; // Padding between text and border
72 | const metricsWidth = p.max(
73 | p.textWidth(metricsData[0]),
74 | p.textWidth(metricsData[1]),
75 | p.textWidth(metricsData[2]),
76 | p.textWidth(metricsData[3])
77 | ) + textPadding * 2;
78 | const metricsHeight = textHeight * metricsData.length + textPadding * 2;
79 |
80 | // Draw metrics box - positioned to touch the producer triangle
81 | p.noFill();
82 | p.stroke(producer.color);
83 | p.strokeWeight(1);
84 | p.rect(-metricsWidth - 15, -metricsHeight / 2, metricsWidth, metricsHeight);
85 |
86 | // Draw metrics text
87 | p.fill(0);
88 | p.noStroke();
89 | p.textAlign(p.LEFT, p.TOP);
90 | for (let i = 0; i < metricsData.length; i++) {
91 | p.text(
92 | metricsData[i],
93 | -metricsWidth - 15 + textPadding,
94 | -metricsHeight / 2 + i * textHeight + textPadding
95 | );
96 | }
97 |
98 | // Draw producer symbol (triangle)
99 | p.fill(producer.color);
100 | p.stroke(0);
101 | p.strokeWeight(1);
102 | p.triangle(-15, -15, 15, 0, -15, 15);
103 |
104 | // Draw producer ID inside the triangle
105 | p.fill(255);
106 | p.noStroke();
107 | p.textAlign(p.CENTER, p.CENTER);
108 | p.textSize(10);
109 | p.textStyle(p.BOLD);
110 | p.text(index, -10, 0);
111 | p.textStyle(p.NORMAL);
112 |
113 | p.pop(); // Restore the drawing context
114 | }
115 |
116 | // Return an object with both individual and batch rendering functions
117 | return {
118 | drawProducer: drawProducerComponent,
119 | drawProducers(producers, metrics) {
120 | for (let i = 0; i < producers.length; i++) {
121 | drawProducerComponent(producers[i], i, metrics);
122 | }
123 | }
124 | };
125 | }
126 |
--------------------------------------------------------------------------------
/src/canvas/partitions.js:
--------------------------------------------------------------------------------
1 | import { generateConsistentColor } from '../utils.js';
2 |
3 | export function createPartitionRenderer(p, startX, startY, partitionWidth, partitionHeight, partitionSpacing) {
4 | // Draw a single partition with its records
5 | function drawPartition(partition, index) {
6 | // Set consistent styling for all partitions
7 | p.push();
8 | p.fill(255);
9 | p.stroke(0); // Changed from 100 to 0 to match original styling
10 | p.strokeWeight(1);
11 |
12 | // Draw partition rectangle
13 | p.rect(startX, partition.y, partitionWidth, partitionHeight);
14 | p.pop();
15 |
16 | // Draw partition label with current offset (matching original format)
17 | p.fill(0);
18 | p.noStroke();
19 | p.textAlign(p.RIGHT, p.CENTER);
20 | p.textSize(12);
21 | p.text(`P${index} (${partition.currentOffset})`, startX - 10, partition.y + partitionHeight / 2);
22 | }
23 |
24 | // Draw all records within a partition
25 | function drawPartitionRecords(partition) {
26 | for (const record of partition.records) {
27 | // Save drawing state
28 | p.push();
29 |
30 | // Generate fill color based on record key
31 | const fillColor = generateConsistentColor(p, record.key % 128, 0.75, 70, 90);
32 | const centerY = partition.y + partitionHeight / 2;
33 |
34 | // Draw Record circle with producer color fill
35 | p.fill(record.color);
36 | p.stroke(record.color);
37 | p.strokeWeight(2);
38 | p.ellipse(record.x, centerY, record.radius * 2, record.radius * 2);
39 |
40 | // Draw rectangle with key color
41 | const rectWidth = record.radius * 1.4;
42 | const rectHeight = 10;
43 | p.fill(fillColor);
44 | p.noStroke();
45 | p.rectMode(p.CENTER);
46 | p.rect(record.x, centerY, rectWidth, rectHeight);
47 |
48 | // Draw key text
49 | p.fill(0);
50 | p.noStroke();
51 | p.textAlign(p.CENTER, p.CENTER);
52 | p.textSize(10);
53 | p.text(record.key, record.x, centerY);
54 |
55 | // Processing Records circular progress bar
56 | if (record.isBeingProcessed && record.processingProgress !== undefined) {
57 | p.noFill();
58 | p.stroke(0, 100, 0);
59 | p.strokeWeight(3);
60 | p.arc(
61 | record.x,
62 | centerY,
63 | (record.radius + 1) * 2,
64 | (record.radius + 1) * 2,
65 | -p.HALF_PI,
66 | -p.HALF_PI + p.TWO_PI * record.processingProgress
67 | );
68 | }
69 |
70 | // Restore drawing state
71 | p.pop();
72 | }
73 | }
74 |
75 | function drawPartitionRecordsMovement(partitions, eventEmitter) {
76 | // Process each partition
77 | for (const partition of partitions) {
78 | // Skip empty partitions
79 | if (partition.records.length === 0) continue;
80 |
81 | // First pass: Move records from oldest to newest (FIFO order)
82 | for (let i = 0; i < partition.records.length; i++) {
83 | const record = partition.records[i];
84 |
85 | // Skip records being processed or waiting
86 | if (record.isBeingProcessed || record.isWaiting) continue;
87 |
88 | // Define the maximum position inside the partition
89 | const maxX = startX + partitionWidth - record.radius - 5;
90 |
91 | // Convert milliseconds to pixels per frame
92 | const framesForTransfer = record.speed / (1000 / 60); // at 60fps
93 | const pixelsPerFrame = framesForTransfer > 0 ? partitionWidth / framesForTransfer : 0;
94 |
95 | // First record can move freely
96 | if (i === 0) {
97 | const newX = Math.min(record.x + pixelsPerFrame, maxX);
98 |
99 | // Emit event when record reaches the end of partition
100 | if (record.x < maxX && newX >= maxX) {
101 | eventEmitter.emit('RECORD_REACHED_PARTITION_END', {
102 | recordId: record.id,
103 | partitionId: partition.id
104 | });
105 | }
106 |
107 | record.x = newX;
108 | continue;
109 | }
110 |
111 | // Other records: check collision with the record ahead
112 | const recordAhead = partition.records[i - 1];
113 | const minDistance = recordAhead.radius + record.radius;
114 | const maxPossibleX = recordAhead.x - minDistance;
115 |
116 | // Move without collision
117 | if (record.x < maxPossibleX) {
118 | record.x = Math.min(record.x + pixelsPerFrame, maxPossibleX);
119 | }
120 | }
121 |
122 | // Sort records by x position for proper drawing order
123 | partition.records.sort((a, b) => b.x - a.x);
124 |
125 | // Handle records being processed
126 | const processingRecords = partition.records.filter(r => r.isBeingProcessed);
127 | if (processingRecords.length === 0) continue;
128 |
129 | // Position processing records at the end of the partition
130 | const maxX = startX + partitionWidth - 5;
131 | for (const record of processingRecords) {
132 | record.x = maxX - record.radius;
133 | }
134 |
135 | // Ensure non-processing records don't overlap with processing ones
136 | const minNonProcessingX = maxX - (processingRecords[0].radius * 2) - 5;
137 | for (const record of partition.records) {
138 | if (!record.isBeingProcessed && record.x > minNonProcessingX) {
139 | record.x = minNonProcessingX;
140 | }
141 | }
142 | }
143 | }
144 |
145 | // Return public API
146 | return {
147 | drawPartition,
148 | drawPartitionRecords,
149 | drawPartitions(partitions) {
150 | for (let i = 0; i < partitions.length; i++) {
151 | drawPartition(partitions[i], i);
152 | drawPartitionRecords(partitions[i]);
153 | }
154 | },
155 | drawPartitionRecordsMovement
156 | };
157 | }
158 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Kafka-like Stream Processing Simulation
3 |
4 | This project simulates a Kafka-like distributed streaming system with Producers, Partitions, and Consumers. It visualizes how data flows through the system and provides real-time metrics on performance.
5 | It's useful to understand how records flow within the system and distribute across Consumers.
6 |
7 | [🔥 Go play now on our online version 🔥](https://evoura.com/kafka-traffic-visualizer/)
8 |
9 | 
10 |
11 | ## License and author
12 | ❤️ Created by [Renato Mefi](https://www.linkedin.com/in/renatomefi/)
13 |
14 | Sponsored by [Evoura](https://evoura.com/) - Data Streaming and EDA experts consultancy
15 |
16 | This project is licensed under [CC BY-NC 4.0](LICENSE.md) - feel free to use and modify it for non-commercial purposes, but credit must be given to the original author.
17 |
18 | This project was created with Claude 3.7 as a way of learning it, it looks hundreds of messages to get to this state, the code does not represent the author normal style of code as it's 99% AI generated with guidance on code style.
19 |
20 | # Run
21 |
22 | ```
23 | $ npm start
24 | ```
25 |
26 | Visit [http://localhost:1234](http://localhost:1234)
27 | ‼️ Don't forget to open your browser console to see the logs
28 |
29 | # Key features
30 |
31 | ## Disclaimer
32 | It's not an exact Kafka engine copy, for many reasons.
33 | The code is meant to provide the minimum set of Kafka-like functionalities in order to provide a meaningful visualization while having key properties like ordering, partitioning, consumer assignment, etc.
34 | It's not representative of real world scenarios, however can get enough visibility into possible issues that are applicable to real scenarios.
35 |
36 | ## Ordering issues Detection
37 |
38 | The simulation tracks and detects ordering issues:
39 | - Records with the same key should be processed in order of their event time
40 | - When records with the same key are processed out of order, a warning is logged
41 | - This helps visualize the ordering guarantees (or lack thereof) in distributed stream processing
42 |
43 | ## Configuration Options
44 |
45 | Key configuration parameters:
46 | - `partitionsAmount`: Number of partitions in the system
47 | - `producersAmount`: Number of producers generating records
48 | - `consumersAmount`: Number of consumers processing records
49 | - `producerRate`: Records per second each producer generates
50 | - `producerDelayRandomFactor`: Random factor for producer delays (0-1)
51 | - `recordValueSizeMin`: Minimum record size in bytes
52 | - `recordValueSizeMax`: Maximum record size in bytes
53 | - `recordKeyRange`: Range of possible key values
54 | - `partitionBandwidth`: Network speed in bytes per second
55 | - `consumerThroughputMaxInBytes`: Maximum consumer processing capacity
56 | - `consumerAssignmentStrategy`: How partitions are assigned to consumers
57 |
58 | ## Metrics
59 |
60 | ### Global Metrics
61 | - `totalRecordsProduced`: Total number of records created by all producers
62 | - `totalRecordsConsumed`: Total number of records processed by all consumers
63 | - `totalBytesProduced`: Total bytes of data produced
64 | - `totalBytesConsumed`: Total bytes of data consumed
65 | - `avgProcessingTimeMs`: Average processing time across all records
66 | - `processingTimeSamples`: Number of samples used for average calculation
67 |
68 | ### Producer Metrics
69 | - `recordsProduced`: Total records produced by this producer
70 | - `bytesProduced`: Total bytes produced by this producer
71 | - `produceRate`: Current produce rate in bytes per second
72 | - `recordsRate`: Current produce rate in records per second
73 | - `lastUpdateTime`: Last time metrics were updated
74 |
75 | ### Consumer Metrics
76 | - `recordsConsumed`: Total records consumed by this consumer
77 | - `bytesConsumed`: Total bytes consumed by this consumer
78 | - `consumeRate`: Current consumption rate in bytes per second
79 | - `recordsRate`: Current consumption rate in records per second
80 | - `lastUpdateTime`: Last time metrics were updated
81 | - `processingTimes`: Recent processing times (last 10 records)
82 |
83 | # Core Components
84 |
85 | ## Producers
86 |
87 | ### Behavior
88 | - Produce records at a rate defined by `Config.producerRate` (records per second)
89 | - Random delay can be applied using `Config.producerDelayRandomFactor` (0-1s range)
90 | - Production scheduling is calculated using milliseconds-based timing
91 | - Records are assigned to partitions based on their key (using modulo partitioning)
92 |
93 | ### Schema
94 | ```javascript
95 | {
96 | id: Number, // Unique identifier
97 | lastProduceTime: Number // Timestamp of last record produced
98 | }
99 | ```
100 |
101 | ## Partitions
102 |
103 | ### Behavior
104 | - Receive records from producers
105 | - Move records along at network speed defined by `Config.partitionBandwidth` (bytes/second)
106 | - When a record reaches the end of the partition, it notifies the assigned consumer
107 | - Each partition has an offset counter that increments for each record
108 | - Records remain in the partition during processing and are removed when processing completes
109 |
110 | ### Schema
111 | ```javascript
112 | {
113 | id: Number, // Unique identifier
114 | records: Array, // Array of record objects in this partition
115 | currentOffset: Number // Current offset counter
116 | }
117 | ```
118 |
119 | ## Consumers
120 |
121 | ### Behavior
122 | - Process records that have reached the end of their assigned partitions
123 | - Assigned to partitions using strategies defined by `Config.consumerAssignmentStrategy`:
124 | - `round-robin`: Distributes partitions evenly across consumers
125 | - `range`: Divides partitions into continuous ranges per consumer
126 | - `sticky`: Attempts to maintain previous assignments when possible
127 | - `cooperative-sticky`: Uses round-robin but creates locality clustering
128 | - Have a throughput limit defined by `Config.consumerThroughputMaxInBytes`
129 | - Process records concurrently across all assigned partitions
130 | - Distribute processing capacity evenly across active records
131 | - Track processing state and progress for each record
132 | - Queue records that arrive while at maximum capacity
133 |
134 | ### Schema
135 | ```javascript
136 | {
137 | id: Number, // Unique identifier
138 | assignedPartitions: Array, // Array of partition IDs assigned to this consumer
139 | activePartitions: Object, // Map of partitionId -> record being processed
140 | processingTimes: Object, // Map of recordId -> {startTime, endTime}
141 | throughputMax: Number, // Maximum bytes per second this consumer can process
142 | processingQueues: Object, // Map of partitionId -> queue of records waiting
143 | transitRecords: Array, // Records visually moving from partition to consumer
144 | recordProcessingState: Object // Tracks bytes processed per record
145 | }
146 | ```
147 |
148 | ## Records
149 |
150 | ### Behavior
151 | - Created by producers with randomized characteristics
152 | - Flow through partitions at speed determined by record size and partition bandwidth
153 | - Have a unique ID, key, and value (size in bytes)
154 | - Size visually represented by radius (larger value = larger radius)
155 | - When they reach the end of a partition, they wait for consumer processing
156 | - Processing time depends on record size and consumer throughput
157 | - Tracked for ordering issues by key (out-of-order processing detection)
158 |
159 | ### Schema
160 | ```javascript
161 | {
162 | id: Number, // Unique record identifier
163 | key: Number, // Record key (determines partition)
164 | value: Number, // Size in bytes
165 | producerId: Number, // Producer that created this record
166 | partitionId: Number, // Partition this record belongs to
167 | speed: Number, // Speed in ms based on size and bandwidth
168 | offset: Number, // Position in the partition sequence
169 | eventTime: Number, // Timestamp when record was created
170 | isBeingProcessed: Boolean, // Whether record is currently being processed
171 | isWaiting: Boolean, // Whether record is waiting to be processed
172 | isProcessed: Boolean, // Whether record has been processed
173 | processingProgress: Number, // Processing progress (0-1)
174 | processingTimeMs: Number // Estimated processing time in milliseconds
175 | }
176 | ```
177 |
--------------------------------------------------------------------------------
/src/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
12 |
13 |
14 | Evoura:Kafka Topic Simulation
15 |
16 |
119 |
120 |
121 |
122 |
Kafka Traffic visual "Simulation"
123 |
124 |
135 |
136 |
137 |
138 |
139 |
146 |
147 |
154 |
155 |
162 |
163 |
164 |
Consumer Assignment:
165 |
166 |
172 |
173 |
174 |
175 |
182 |
183 |
190 |
191 |
192 |
193 |
200 |
201 |
208 |
209 |
216 |
217 |
224 |
225 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
--------------------------------------------------------------------------------
/src/config.js:
--------------------------------------------------------------------------------
1 | export const Config = {};
2 |
3 | export const ConfigMetadata = {
4 | partitionsAmount: {
5 | min: 1,
6 | max: 32,
7 | default: 1,
8 | type: 'number'
9 | },
10 |
11 | producersAmount: {
12 | min: 0,
13 | max: 32,
14 | default: 1,
15 | type: 'number'
16 | },
17 |
18 | consumersAmount: {
19 | min: 0,
20 | max: 32,
21 | default: 1,
22 | type: 'number'
23 | },
24 |
25 | producerRate: {
26 | min: 1,
27 | max: 16,
28 | default: 1,
29 | type: 'number'
30 | },
31 |
32 | producerDelayRandomFactor: {
33 | min: 0,
34 | max: 1,
35 | default: 0,
36 | step: 0.1,
37 | type: 'number',
38 | format: (value) => value.toFixed(1)
39 | },
40 |
41 | partitionBandwidth: {
42 | min: 1000,
43 | max: 32000,
44 | step: 1000,
45 | default: 1000,
46 | type: 'number'
47 | },
48 |
49 | consumerAssignmentStrategy: {
50 | choices: ['round-robin', 'range', 'sticky', 'cooperative-sticky'],
51 | default: 'sticky',
52 | type: 'string'
53 | },
54 |
55 | consumerThroughputMaxInBytes: {
56 | min: 1000,
57 | max: 2000000,
58 | step: 1000,
59 | default: 1000,
60 | type: 'number'
61 | },
62 |
63 | recordValueSizeMin: {
64 | min: 1,
65 | max: 10000,
66 | default: 800,
67 | type: 'number'
68 | },
69 |
70 | recordValueSizeMax: {
71 | min: 1,
72 | max: 10000,
73 | default: 1200,
74 | type: 'number'
75 | },
76 |
77 | recordKeyRange: {
78 | min: 1,
79 | max: 128,
80 | default: 4,
81 | type: 'number'
82 | }
83 | };
84 |
85 | export default function createConfigManager() {
86 | return {
87 | _p: null,
88 | _uiElements: {},
89 | _observers: {},
90 | _initialized: false,
91 |
92 | // Initialize the configuration system
93 | init(p) {
94 | this._p = p;
95 |
96 | // First initialize all values from defaults
97 | this._initFromDefaults();
98 |
99 | // Find and store UI elements
100 | this._findUIElements();
101 |
102 | // UPDATE UI ELEMENTS FIRST to set them to default values
103 | this._updateAllUI();
104 |
105 | // Set up dependencies between properties
106 | this._setupDependencies();
107 |
108 | // Attach event listeners to UI elements
109 | this._attachEventListeners();
110 |
111 | this._initialized = true;
112 | console.debug(JSON.stringify({type: "config_initialized", config: Config}));
113 | return this;
114 | },
115 |
116 | // Initialize configuration from defaults in ConfigMetadata
117 | _initFromDefaults() {
118 | for (const [key, metadata] of Object.entries(ConfigMetadata)) {
119 | Config[key] = metadata.default;
120 | }
121 | },
122 |
123 | // Find and store references to UI elements
124 | _findUIElements() {
125 | this._uiElements = {};
126 |
127 | for (const key in ConfigMetadata) {
128 | const elements = {};
129 |
130 | // Try to find UI elements for this config property
131 | if (key === 'consumerAssignmentStrategy') {
132 | const select = this._p.select(`#${key}Select`);
133 | if (select && select.elt) {
134 | elements.select = select;
135 | }
136 | } else {
137 | const slider = this._p.select(`#${key}Slider`);
138 | const input = this._p.select(`#${key}Input`);
139 |
140 | if (slider && slider.elt) {
141 | elements.slider = slider;
142 | }
143 |
144 | if (input && input.elt) {
145 | elements.input = input;
146 | }
147 | }
148 |
149 | // Only store if we found any elements
150 | if (Object.keys(elements).length > 0) {
151 | this._uiElements[key] = elements;
152 | }
153 | }
154 | },
155 |
156 | // Attach event listeners to UI elements
157 | _attachEventListeners() {
158 | // For each UI element set, attach appropriate event handlers
159 | for (const key in this._uiElements) {
160 | const elements = this._uiElements[key];
161 |
162 | if (elements.select) {
163 | elements.select.changed(() => {
164 | this._handleUIChange(key, 'select');
165 | });
166 | }
167 |
168 | if (elements.slider) {
169 | // Use both input and changed events for sliders to catch all changes
170 | elements.slider.input(() => {
171 | this._handleUIChange(key, 'slider');
172 | });
173 | elements.slider.changed(() => {
174 | this._handleUIChange(key, 'slider');
175 | });
176 | }
177 |
178 | if (elements.input) {
179 | // Use both input and changed events for inputs to catch all changes
180 | elements.input.input(() => {
181 | this._handleUIChange(key, 'input');
182 | });
183 | elements.input.changed(() => {
184 | this._handleUIChange(key, 'input');
185 | });
186 | }
187 | }
188 | },
189 |
190 | // Update all UI elements with current configuration values
191 | _updateAllUI() {
192 | for (const key in this._uiElements) {
193 | this._updateUI(key);
194 | }
195 | },
196 |
197 | // Handle UI element change
198 | _handleUIChange(key, sourceType) {
199 | const elements = this._uiElements[key];
200 | if (!elements) return;
201 |
202 | let value = undefined;
203 | let sourceElement = null;
204 |
205 | // Get value from the appropriate UI element
206 | if (sourceType === 'select' && elements.select) {
207 | value = elements.select.value();
208 | sourceElement = elements.select;
209 | } else if (sourceType === 'slider' && elements.slider) {
210 | value = this._parseValue(elements.slider.value(), key);
211 | sourceElement = elements.slider;
212 | } else if (sourceType === 'input' && elements.input) {
213 | value = this._parseValue(elements.input.value(), key);
214 | sourceElement = elements.input;
215 | }
216 |
217 | // Only proceed if we got a valid value
218 | if (value === undefined || value === null) return;
219 |
220 | // Update the configuration value (this will also update other UI elements)
221 | const oldValue = Config[key];
222 |
223 | // Update the value
224 | value = this._validateValue(key, value);
225 | Config[key] = value;
226 |
227 | // If value hasn't changed, don't proceed
228 | if (oldValue === value) return;
229 |
230 | if (console.debug) console.debug(JSON.stringify({
231 | type: "config_changed",
232 | key,
233 | newValue: value,
234 | oldValue,
235 | source: sourceType
236 | }));
237 |
238 | // Update UI elements (except source)
239 | this._updateUI(key, sourceElement);
240 |
241 | // Notify observers
242 | this._notifyObservers(key, value, oldValue);
243 | },
244 |
245 | // Set up dependencies between configuration properties
246 | _setupDependencies() {
247 | // Min/Max value pairs
248 | this.onChange('recordValueSizeMin', (newValue) => {
249 | if (Config.recordValueSizeMax < newValue) {
250 | this.setValue('recordValueSizeMax', newValue);
251 | }
252 | });
253 |
254 | this.onChange('recordValueSizeMax', (newValue) => {
255 | if (Config.recordValueSizeMin > newValue) {
256 | this.setValue('recordValueSizeMin', newValue);
257 | }
258 | });
259 | },
260 |
261 | // Update UI elements for a specific configuration property
262 | _updateUI(key, excludeElement = null) {
263 | const elements = this._uiElements[key];
264 | if (!elements) return;
265 |
266 | const value = Config[key];
267 | const metadata = ConfigMetadata[key];
268 |
269 | // Format display value if needed
270 | let displayValue = value;
271 | if (metadata.format) {
272 | displayValue = metadata.format(value);
273 | } else if (typeof value === 'number' && !Number.isInteger(value)) {
274 | displayValue = value.toFixed(1);
275 | }
276 |
277 | // Update select if it exists and is not the source element
278 | if (elements.select && elements.select !== excludeElement) {
279 | elements.select.elt.value = value;
280 | }
281 |
282 | // Update slider if it exists and is not the source element
283 | if (elements.slider && elements.slider !== excludeElement) {
284 | elements.slider.elt.value = value;
285 | }
286 |
287 | // Update input if it exists and is not the source element
288 | if (elements.input && elements.input !== excludeElement) {
289 | elements.input.elt.value = displayValue;
290 | }
291 | },
292 |
293 | // Parse value according to expected type
294 | _parseValue(value, key) {
295 | const metadata = ConfigMetadata[key];
296 |
297 | if (metadata.type === 'number') {
298 | const numValue = Number(value);
299 | if (isNaN(numValue)) return undefined;
300 | return numValue;
301 | }
302 |
303 | return value;
304 | },
305 |
306 | // Validate and normalize a value according to its metadata
307 | _validateValue(key, value) {
308 | const metadata = ConfigMetadata[key];
309 | if (!metadata) return value;
310 |
311 | // Validate type
312 | if (metadata.type === 'number' && typeof value !== 'number') {
313 | value = Number(value);
314 | if (isNaN(value)) return Config[key]; // Return current value if invalid
315 | }
316 |
317 | // Validate range
318 | if (metadata.min !== undefined && value < metadata.min) {
319 | value = metadata.min;
320 | }
321 | if (metadata.max !== undefined && value > metadata.max) {
322 | value = metadata.max;
323 | }
324 |
325 | // Validate choices
326 | if (metadata.choices && !metadata.choices.includes(value)) {
327 | return Config[key]; // Return current value if invalid
328 | }
329 |
330 | // Handle step
331 | if (metadata.type === 'number' && metadata.step) {
332 | value = Math.round(value / metadata.step) * metadata.step;
333 | }
334 |
335 | return value;
336 | },
337 |
338 | // PUBLIC API METHODS
339 |
340 | // Register a change observer for a configuration property
341 | onChange(key, callback) {
342 | if (!this._observers[key]) {
343 | this._observers[key] = [];
344 | }
345 |
346 | this._observers[key].push(callback);
347 | return this;
348 | },
349 |
350 | // Set a configuration value
351 | setValue(key, value, options = {}) {
352 | // Make sure the key exists in our configuration
353 | if (!(key in ConfigMetadata)) return false;
354 |
355 | // Get current value for comparison
356 | const oldValue = Config[key];
357 |
358 | // Validate and normalize the value
359 | value = this._validateValue(key, value);
360 |
361 | // Don't proceed if value hasn't changed
362 | if (oldValue === value) return true;
363 |
364 | // Update the configuration
365 | Config[key] = value;
366 |
367 | // Update UI elements
368 | if (!options.silent) {
369 | this._updateUI(key);
370 | }
371 |
372 | // Notify observers
373 | if (!options.silent) {
374 | this._notifyObservers(key, value, oldValue);
375 | }
376 |
377 | return true;
378 | },
379 |
380 | _notifyObservers(key, newValue, oldValue) {
381 | if (this._observers[key]) {
382 | this._observers[key].forEach(callback => {
383 | callback(newValue, oldValue, key);
384 | });
385 | }
386 |
387 | // Global observers
388 | if (this._observers['*']) {
389 | this._observers['*'].forEach(callback => {
390 | callback(newValue, oldValue, key);
391 | });
392 | }
393 | },
394 |
395 | // Reset a configuration property to its default value
396 | resetToDefault(key) {
397 | const metadata = ConfigMetadata[key];
398 | if (metadata && metadata.default !== undefined) {
399 | this.setValue(key, metadata.default);
400 | }
401 | },
402 |
403 | // Reset all configuration properties to their default values
404 | resetAllToDefaults() {
405 | for (const key in ConfigMetadata) {
406 | this.resetToDefault(key);
407 | }
408 | }
409 | };
410 | }
411 |
--------------------------------------------------------------------------------
/src/sketchSimulation.js:
--------------------------------------------------------------------------------
1 | import {createConsumerRenderer} from './canvas/consumers.js';
2 | import createProducerEffectsManager, {createProducerRenderer} from "./canvas/producers";
3 | import {createPartitionRenderer} from './canvas/partitions.js';
4 | import createConfigManager, {Config} from './config.js';
5 | import sharedState from './sharedState.js';
6 | import {generateConsistentColor} from './utils.js';
7 |
8 | const sketchSimulation = (p) => {
9 | // ------ Canvas, UI and Animations ------
10 | const CANVAS_WIDTH = 1100;
11 | const CANVAS_HEIGHT = 700;
12 | const CANVAS_PARTITION_WIDTH = 400;
13 | const CANVAS_PARTITION_HEIGHT = 30;
14 | const CANVAS_PARTITION_HEIGHT_SPACING = 20;
15 | const CANVAS_PARTITION_START_X = 200;
16 | const CANVAS_PARTITION_START_Y = 30;
17 | const CANVAS_PRODUCER_POSITION_X = 120;
18 | const CANVAS_CONSUMER_POSITION_X = CANVAS_PARTITION_START_X + CANVAS_PARTITION_WIDTH + 50;
19 | const CANVAS_RECORD_RADIUS_MAX = 15;
20 | const CANVAS_RECORD_RADIUS_MIN = 6;
21 | const ANIMATION_PRODUCER_LINE_DURATION = 100;
22 |
23 | const ConfigManager = createConfigManager();
24 |
25 | // Dynamic canvas height based on content
26 | let canvasHeightDynamic = CANVAS_HEIGHT;
27 |
28 | // Record ID counter
29 | let recordIDIncrementCounter = 0;
30 |
31 | // Runtime Data structures
32 | let partitions = [];
33 | let producers = [];
34 | let consumers = [];
35 |
36 | // Canvas Components
37 | let producerEffectsManager;
38 | let producerRenderer;
39 | let consumerRenderer;
40 | let partitionRenderer;
41 |
42 | // Metrics tracking with last-updated timestamps
43 | let metrics = {
44 | startTime: 0,
45 | lastUpdateTime: 0,
46 | producers: {}, // Map of producer ID -> metrics
47 | consumers: {}, // Map of consumer ID -> metrics
48 | global: {
49 | totalRecordsProduced: 0,
50 | totalRecordsConsumed: 0,
51 | totalBytesProduced: 0,
52 | totalBytesConsumed: 0,
53 | avgProcessingTimeMs: 0,
54 | processingTimeSamples: 0
55 | }
56 | };
57 |
58 | // ------ EVENT SYSTEM ------
59 | // Event types for the reactive system
60 | const EVENTS = {
61 | RECORD_PRODUCED: 'record_produced',
62 | RECORD_REACHED_PARTITION_END: 'record_reached_partition_end',
63 | RECORD_PROCESSING_STARTED: 'record_processing_started',
64 | RECORD_PROCESSING_COMPLETED: 'record_processing_completed',
65 | };
66 |
67 | // Simple event emitter
68 | class EventEmitter {
69 | constructor() {
70 | this.events = {};
71 | }
72 |
73 | on(event, callback) {
74 | if (!this.events[event]) {
75 | this.events[event] = [];
76 | }
77 | this.events[event].push(callback);
78 | return this; // For chaining
79 | }
80 |
81 | emit(event, data) {
82 | if (!this.events[event]) return;
83 | this.events[event].forEach(callback => callback(data));
84 | }
85 | }
86 |
87 | // Global event emitter
88 | const eventEmitter = new EventEmitter();
89 |
90 | p.setup = () => { // called once during boot
91 | // Create canvas and add it to the container div
92 | let canvas = p.createCanvas(CANVAS_WIDTH, canvasHeightDynamic);
93 | canvas.parent('canvas-simulation');
94 |
95 | // Initialize Canvas Components
96 | producerEffectsManager = createProducerEffectsManager(p);
97 | producerRenderer = createProducerRenderer(p, CANVAS_PRODUCER_POSITION_X);
98 | consumerRenderer = createConsumerRenderer(p, CANVAS_CONSUMER_POSITION_X);
99 | partitionRenderer = createPartitionRenderer(
100 | p,
101 | CANVAS_PARTITION_START_X,
102 | CANVAS_PARTITION_START_Y,
103 | CANVAS_PARTITION_WIDTH,
104 | CANVAS_PARTITION_HEIGHT,
105 | CANVAS_PARTITION_HEIGHT_SPACING
106 | );
107 |
108 | metrics.startTime = p.millis();
109 | metrics.lastUpdateTime = metrics.startTime;
110 |
111 | ConfigManager.init(p);
112 |
113 | // Set up config observers
114 | setupConfigObservers();
115 |
116 | // Set up event handlers
117 | setupEventHandlers();
118 |
119 | // Reset counters and state
120 | recordIDIncrementCounter = 0;
121 |
122 | // Reset metrics
123 | metrics = {
124 | startTime: p.millis(),
125 | lastUpdateTime: p.millis(),
126 | producers: {},
127 | consumers: {},
128 | global: {
129 | totalRecordsProduced: 0,
130 | totalRecordsConsumed: 0,
131 | totalBytesProduced: 0,
132 | totalBytesConsumed: 0,
133 | avgProcessingTimeMs: 0,
134 | processingTimeSamples: 0
135 | }
136 | };
137 |
138 | // Initialize data structures AFTER config is initialized
139 | initializePartitions();
140 | initializeProducers();
141 | initializeConsumers();
142 |
143 | setupOrderingIssuesDetection();
144 |
145 | // Update canvas height to accommodate partitions
146 | updateCanvasHeight();
147 | };
148 |
149 | p.draw = () => { // called 60 times/second
150 | p.background(240);
151 |
152 | // No need for handleControlChanges anymore - changes happen through observers
153 | producerEffectsManager.update();
154 | produceRecords();
155 | partitionRenderer.drawPartitionRecordsMovement(partitions, eventEmitter);
156 | consumeRecords();
157 |
158 | // Draw simulation components
159 | partitionRenderer.drawPartitions(partitions);
160 | producerRenderer.drawProducers(producers, metrics);
161 | consumerRenderer.drawConsumersWithConnections(
162 | consumers,
163 | partitions,
164 | metrics,
165 | CANVAS_PARTITION_START_X,
166 | CANVAS_PARTITION_WIDTH,
167 | CANVAS_PARTITION_HEIGHT
168 | );
169 | producerEffectsManager.draw();
170 |
171 | // Update shared state with current metrics and consumers for the metrics panel
172 | sharedState.updateMetrics(metrics);
173 | sharedState.updateConsumers(consumers);
174 | };
175 |
176 | function setupConfigObservers() {
177 | // Register observers for configuration changes that require data structure updates
178 | ConfigManager.onChange('partitionsAmount', () => updatePartitions());
179 | ConfigManager.onChange('producersAmount', () => updateProducers());
180 | ConfigManager.onChange('consumersAmount', () => updateConsumers());
181 |
182 | ConfigManager.onChange('consumerAssignmentStrategy', () => {
183 | if (Config.consumersAmount > 0) {
184 | updateConsumers();
185 | }
186 | });
187 |
188 | ConfigManager.onChange('partitionBandwidth', () => updateAllRecordSpeeds());
189 |
190 | ConfigManager.onChange('consumerThroughputMaxInBytes', (value) => {
191 | // Update throughput for all existing consumers
192 | for (const consumer of consumers) {
193 | consumer.throughputMax = value;
194 | }
195 | });
196 | }
197 |
198 | function setupEventHandlers() {
199 | // Set up reactive event handlers for metrics
200 | eventEmitter.on(EVENTS.RECORD_PRODUCED, (data) => {
201 | // Update producer metrics reactively
202 | if (!metrics.producers[data.producerId]) {
203 | metrics.producers[data.producerId] = {
204 | recordsProduced: 0,
205 | bytesProduced: 0,
206 | produceRate: 0,
207 | recordsRate: 0,
208 | lastUpdateTime: p.millis()
209 | };
210 | }
211 |
212 | metrics.producers[data.producerId].recordsProduced++;
213 | metrics.producers[data.producerId].bytesProduced += data.value;
214 | metrics.global.totalRecordsProduced++;
215 | metrics.global.totalBytesProduced += data.value;
216 |
217 | // Calculate rate based on time since last update
218 | const now = p.millis();
219 | const elapsed = (now - metrics.producers[data.producerId].lastUpdateTime) / 1000;
220 | if (elapsed > 0.1) { // Only update rate if enough time has passed
221 | metrics.producers[data.producerId].produceRate = data.value / elapsed;
222 | metrics.producers[data.producerId].recordsRate = 1 / elapsed;
223 | metrics.producers[data.producerId].lastUpdateTime = now;
224 | }
225 | });
226 |
227 | eventEmitter.on(EVENTS.RECORD_PROCESSING_COMPLETED, (data) => {
228 | // Update consumer metrics reactively
229 | if (!metrics.consumers[data.consumerId]) {
230 | metrics.consumers[data.consumerId] = {
231 | recordsConsumed: 0,
232 | bytesConsumed: 0,
233 | consumeRate: 0,
234 | recordsRate: 0,
235 | lastUpdateTime: p.millis(),
236 | processingTimes: []
237 | };
238 | }
239 |
240 | metrics.consumers[data.consumerId].recordsConsumed++;
241 | metrics.consumers[data.consumerId].bytesConsumed += data.value;
242 | metrics.global.totalRecordsConsumed++;
243 | metrics.global.totalBytesConsumed += data.value;
244 |
245 | // Track processing time for this record
246 | metrics.consumers[data.consumerId].processingTimes.push(data.processingTimeMs);
247 | // Keep only the last 10 processing times
248 | if (metrics.consumers[data.consumerId].processingTimes.length > 10) {
249 | metrics.consumers[data.consumerId].processingTimes.shift();
250 | }
251 |
252 | // Update global average processing time
253 | metrics.global.avgProcessingTimeMs =
254 | (metrics.global.avgProcessingTimeMs * metrics.global.processingTimeSamples + data.processingTimeMs) /
255 | (metrics.global.processingTimeSamples + 1);
256 | metrics.global.processingTimeSamples++;
257 |
258 | // Calculate rate based on time since last update
259 | const now = p.millis();
260 | const elapsed = (now - metrics.consumers[data.consumerId].lastUpdateTime) / 1000;
261 | if (elapsed > 0.1) { // Only update rate if enough time has passed
262 | metrics.consumers[data.consumerId].consumeRate = data.value / elapsed;
263 | metrics.consumers[data.consumerId].recordsRate = 1 / elapsed;
264 | metrics.consumers[data.consumerId].lastUpdateTime = now;
265 | }
266 | });
267 | }
268 |
269 | function initializePartitions() {
270 | partitions = [];
271 |
272 | for (let i = 0; i < Config.partitionsAmount; i++) {
273 | partitions.push({
274 | id: i,
275 | y: CANVAS_PARTITION_START_Y + i * (CANVAS_PARTITION_HEIGHT + CANVAS_PARTITION_HEIGHT_SPACING),
276 | records: [],
277 | currentOffset: 0 // Initialize offset counter for each partition
278 | });
279 | }
280 | }
281 |
282 | function initializeProducers() {
283 | producers = [];
284 |
285 | // Calculate the top and bottom Y coordinates of partitions for centering
286 | const topPartitionY = CANVAS_PARTITION_START_Y;
287 | const bottomPartitionY = CANVAS_PARTITION_START_Y + (Config.partitionsAmount - 1) * (CANVAS_PARTITION_HEIGHT + CANVAS_PARTITION_HEIGHT_SPACING);
288 |
289 | for (let i = 0; i < Config.producersAmount; i++) {
290 |
291 | const color = generateConsistentColor(p, i);
292 |
293 | // Initially position producers evenly across the partition range
294 | const y = p.map(i, 0, Math.max(1, Config.producersAmount - 1),
295 | topPartitionY + CANVAS_PARTITION_HEIGHT / 2,
296 | bottomPartitionY + CANVAS_PARTITION_HEIGHT / 2);
297 |
298 | producers.push({
299 | id: i,
300 | y: y,
301 | color: color
302 | });
303 |
304 | // Initialize producer metrics
305 | metrics.producers[i] = {
306 | recordsProduced: 0,
307 | bytesProduced: 0,
308 | produceRate: 0,
309 | recordsRate: 0,
310 | lastUpdateTime: p.millis()
311 | };
312 | }
313 |
314 | // Adjust producer positions to prevent overlap
315 | adjustProducerPositions();
316 | }
317 |
318 | function initializeConsumers() {
319 | // Create an empty array for consumers
320 | consumers = [];
321 |
322 | // If no consumers requested, just return
323 | if (Config.consumersAmount <= 0) return;
324 |
325 | // Get partition assignments using the rebalance algorithm
326 | let partitionAssignments = rebalanceConsumerGroup(
327 | Config.partitionsAmount,
328 | Config.consumersAmount,
329 | Config.consumerAssignmentStrategy
330 | );
331 |
332 | for (let i = 0; i < Config.consumersAmount; i++) {
333 | // Find partitions assigned to this consumer
334 | const assignedPartitions = [];
335 | for (let j = 0; j < Config.partitionsAmount; j++) {
336 | if (partitionAssignments[j] === i) {
337 | assignedPartitions.push(j);
338 | }
339 | }
340 |
341 | // Calculate average y position based on assigned partitions
342 | let avgY = 0;
343 | if (assignedPartitions.length > 0) {
344 | for (const partitionId of assignedPartitions) {
345 | avgY += partitions[partitionId].y + CANVAS_PARTITION_HEIGHT / 2;
346 | }
347 | avgY = avgY / assignedPartitions.length;
348 | } else {
349 | // Default position for unassigned consumers
350 | avgY = CANVAS_PARTITION_START_Y + Config.partitionsAmount * (CANVAS_PARTITION_HEIGHT + CANVAS_PARTITION_HEIGHT_SPACING) + 50 + i * 70;
351 | }
352 |
353 | const color = generateConsistentColor(p, i, 0.5, 70, 90);
354 |
355 | consumers.push({
356 | id: i,
357 | y: avgY,
358 | color: color,
359 | assignedPartitions: assignedPartitions,
360 | // Structure for concurrent processing
361 | activePartitions: {}, // Map of partitionId -> record being processed
362 | processingTimes: {}, // Map of recordId -> {startTime, endTime}
363 | throughputMax: Config.consumerThroughputMaxInBytes, // Bytes per second this consumer can process
364 | processingQueues: {}, // Map of partitionId -> queue of records waiting
365 | transitRecords: []
366 | });
367 |
368 | // Initialize consumer metrics
369 | metrics.consumers[i] = {
370 | recordsConsumed: 0,
371 | bytesConsumed: 0,
372 | consumeRate: 0,
373 | recordsRate: 0,
374 | lastUpdateTime: p.millis(),
375 | processingTimes: []
376 | };
377 |
378 | // Initialize processing queues for each assigned partition
379 | for (const partitionId of assignedPartitions) {
380 | consumers[i].processingQueues[partitionId] = [];
381 | }
382 | }
383 |
384 | // Only adjust positions if we have consumers
385 | if (Config.consumersAmount > 0) {
386 | adjustConsumerPositions();
387 | }
388 | }
389 |
390 | function adjustConsumerPositions() {
391 | // If no consumers, just return
392 | if (consumers.length === 0) return;
393 |
394 | // Keep track of original/ideal positions for each consumer
395 | const originalPositions = consumers.map(c => c.y);
396 |
397 | // Define minimum spacing between consumer centers
398 | const MIN_CONSUMER_SPACING = 70;
399 |
400 | // Sort consumers by their assigned position
401 | consumers.sort((a, b) => a.y - b.y);
402 |
403 | // For unassigned consumers (those with no partitions), distribute them evenly
404 | const unassignedConsumers = consumers.filter(c => c.assignedPartitions.length === 0);
405 | if (unassignedConsumers.length > 0) {
406 | const bottomY = CANVAS_PARTITION_START_Y + Config.partitionsAmount * (CANVAS_PARTITION_HEIGHT + CANVAS_PARTITION_HEIGHT_SPACING) + 50;
407 |
408 | for (let i = 0; i < unassignedConsumers.length; i++) {
409 | unassignedConsumers[i].y = bottomY + i * MIN_CONSUMER_SPACING;
410 | }
411 |
412 | // Resort consumers by position after adjusting unassigned consumers
413 | consumers.sort((a, b) => a.y - b.y);
414 | }
415 |
416 | // Now fix overlaps while trying to keep each consumer as close as possible to its ideal position
417 | for (let i = 1; i < consumers.length; i++) {
418 | const prevConsumer = consumers[i - 1];
419 | const currentConsumer = consumers[i];
420 |
421 | // Check if too close to previous consumer
422 | if (currentConsumer.y - prevConsumer.y < MIN_CONSUMER_SPACING) {
423 | // Position this consumer below the previous one with minimum spacing
424 | currentConsumer.y = prevConsumer.y + MIN_CONSUMER_SPACING;
425 | }
426 | }
427 |
428 | // Try to maintain assignment-based positioning for consumers with partitions
429 | // This helps ensure consumers stay aligned with their partitions
430 | const maxIterations = 3; // Limit optimization attempts
431 | for (let iter = 0; iter < maxIterations; iter++) {
432 | let improved = false;
433 |
434 | for (let i = 0; i < consumers.length; i++) {
435 | const consumer = consumers[i];
436 |
437 | // Only try to adjust consumers with assigned partitions
438 | if (consumer.assignedPartitions.length > 0) {
439 | const originalY = originalPositions[i];
440 |
441 | // Check if we can move this consumer closer to its ideal position
442 | // without violating spacing constraints
443 | if (i > 0 && i < consumers.length - 1) {
444 | // Consumer is between others, check both directions
445 | const minY = consumers[i - 1].y + MIN_CONSUMER_SPACING; // Can't go above this
446 | const maxY = consumers[i + 1].y - MIN_CONSUMER_SPACING; // Can't go below this
447 |
448 | if (originalY >= minY && originalY <= maxY) {
449 | // Can move directly to original position
450 | if (consumer.y !== originalY) {
451 | consumer.y = originalY;
452 | improved = true;
453 | }
454 | } else if (originalY < minY && consumer.y > minY) {
455 | // Try to move up as much as possible
456 | consumer.y = minY;
457 | improved = true;
458 | } else if (originalY > maxY && consumer.y < maxY) {
459 | // Try to move down as much as possible
460 | consumer.y = maxY;
461 | improved = true;
462 | }
463 | } else if (i === 0) {
464 | // First consumer, can only be constrained from below
465 | const maxY = consumers.length > 1 ? consumers[i + 1].y - MIN_CONSUMER_SPACING : Infinity;
466 | if (originalY <= maxY && consumer.y !== originalY) {
467 | consumer.y = originalY;
468 | improved = true;
469 | }
470 | } else if (i === consumers.length - 1) {
471 | // Last consumer, can only be constrained from above
472 | const minY = consumers[i - 1].y + MIN_CONSUMER_SPACING;
473 | if (originalY >= minY && consumer.y !== originalY) {
474 | consumer.y = originalY;
475 | improved = true;
476 | }
477 | }
478 | }
479 | }
480 |
481 | // If no improvements were made in this iteration, stop
482 | if (!improved) break;
483 | }
484 | }
485 |
486 | // Function to prevent producer overlapping
487 | function adjustProducerPositions() {
488 | // Define minimum spacing between producer centers
489 | const MIN_PRODUCER_SPACING = 70;
490 |
491 | // Sort producers by their assigned position
492 | producers.sort((a, b) => a.y - b.y);
493 |
494 | // Now fix overlaps while trying to maintain even distribution
495 | for (let i = 1; i < producers.length; i++) {
496 | const prevProducer = producers[i - 1];
497 | const currentProducer = producers[i];
498 |
499 | // Check if too close to previous producer
500 | if (currentProducer.y - prevProducer.y < MIN_PRODUCER_SPACING) {
501 | // Position this producer below the previous one with minimum spacing
502 | currentProducer.y = prevProducer.y + MIN_PRODUCER_SPACING;
503 | }
504 | }
505 | }
506 |
507 | function updateCanvasHeight() {
508 | const minHeight = 700; // Minimum canvas height
509 |
510 | // Find the lowest element (partition, consumer, or producer)
511 | let lowestY = CANVAS_PARTITION_START_Y + Config.partitionsAmount * (CANVAS_PARTITION_HEIGHT + CANVAS_PARTITION_HEIGHT_SPACING);
512 |
513 | // Check if any consumers are lower
514 | for (const consumer of consumers) {
515 | // Add some margin below the consumer
516 | const consumerBottom = consumer.y + 50;
517 | if (consumerBottom > lowestY) {
518 | lowestY = consumerBottom;
519 | }
520 | }
521 |
522 | // Check if any producers are lower
523 | for (const producer of producers) {
524 | // Add some margin below the producer
525 | const producerBottom = producer.y + 50;
526 | if (producerBottom > lowestY) {
527 | lowestY = producerBottom;
528 | }
529 | }
530 |
531 | // Add some margin at the bottom
532 | const requiredHeight = lowestY + 100;
533 |
534 | canvasHeightDynamic = p.max(minHeight, requiredHeight);
535 | p.resizeCanvas(CANVAS_WIDTH, canvasHeightDynamic);
536 | }
537 |
538 | function setupOrderingIssuesDetection() {
539 | // Track the last processed event time per key
540 | const keyProcessingHistory = {};
541 |
542 | // Add detection to the RECORD_PROCESSING_COMPLETED event
543 | eventEmitter.on(EVENTS.RECORD_PROCESSING_COMPLETED, (data) => {
544 | const key = data.key;
545 |
546 | // Initialize history for this key if needed
547 | if (!keyProcessingHistory[key]) {
548 | keyProcessingHistory[key] = [];
549 | }
550 |
551 | // Add this record to the processing history
552 | keyProcessingHistory[key].push({
553 | recordId: data.id,
554 | eventTime: data.eventTime,
555 | processingTime: p.millis(),
556 | offset: data.offset,
557 | partition: data.partitionId,
558 | consumerId: data.consumerId
559 | });
560 |
561 | // Get the history for this key and sort by processing time
562 | const history = keyProcessingHistory[key];
563 |
564 | // Only check for out-of-order if we have more than one record
565 | if (history.length > 1) {
566 | // Sort by processing time to see the order they were actually processed
567 | const processedOrder = [...history].sort((a, b) => a.processingTime - b.processingTime);
568 |
569 | // Find the most recently processed record (last in the sorted array)
570 | const newestProcessed = processedOrder[processedOrder.length - 1];
571 |
572 | // Look through previously processed records to find any out-of-order conditions
573 | for (let i = processedOrder.length - 2; i >= 0; i--) {
574 | // If this record has a later event time but was processed earlier
575 | if (processedOrder[i].eventTime > newestProcessed.eventTime) {
576 | const olderRecord = newestProcessed;
577 | const newerRecord = processedOrder[i];
578 | const timeDifference = newerRecord.eventTime - olderRecord.eventTime;
579 |
580 | console.warn("Out of order processing detected: {" +
581 | "\"key\": " + key + ", " +
582 | "\"olderRecord\": {" +
583 | "\"id\": " + olderRecord.recordId + ", " +
584 | "\"eventTime\": " + olderRecord.eventTime.toFixed(1) + ", " +
585 | "\"partition\": " + olderRecord.partition + ", " +
586 | "\"consumerId\": " + olderRecord.consumerId +
587 | "}, " +
588 | "\"newerRecord\": {" +
589 | "\"id\": " + newerRecord.recordId + ", " +
590 | "\"eventTime\": " + newerRecord.eventTime.toFixed(1) + ", " +
591 | "\"partition\": " + newerRecord.partition + ", " +
592 | "\"consumerId\": " + newerRecord.consumerId +
593 | "}, " +
594 | "\"timeDifference\": " + timeDifference.toFixed(1) + " ms" +
595 | "}");
596 |
597 | // Log only once per event
598 | break;
599 | }
600 | }
601 | }
602 |
603 | // Limit history size to prevent memory issues
604 | if (history.length > 100) {
605 | keyProcessingHistory[key] = history.slice(-100);
606 | }
607 | });
608 |
609 | return {keyProcessingHistory};
610 | }
611 |
612 | function updatePartitions() {
613 | // Save existing records and offsets
614 | const oldRecords = {};
615 | const oldOffsets = {};
616 | for (let i = 0; i < partitions.length; i++) {
617 | if (i < Config.partitionsAmount) {
618 | oldRecords[i] = partitions[i].records;
619 | oldOffsets[i] = partitions[i].currentOffset || 0;
620 | }
621 | }
622 |
623 | // Reinitialize partitions
624 | initializePartitions();
625 |
626 | // Restore records and offsets where possible
627 | for (let i = 0; i < Config.partitionsAmount; i++) {
628 | if (oldRecords[i]) {
629 | partitions[i].records = oldRecords[i];
630 | }
631 | if (oldOffsets[i] !== undefined) {
632 | partitions[i].currentOffset = oldOffsets[i];
633 | }
634 | }
635 |
636 | // Reassign consumers
637 | initializeConsumers();
638 |
639 | // Update canvas height
640 | updateCanvasHeight();
641 | }
642 |
643 | function updateProducers() {
644 | // Save old metrics
645 | const oldMetrics = {...metrics.producers};
646 |
647 | // Initialize new producers
648 | initializeProducers();
649 |
650 | // Restore metrics where applicable
651 | for (let i = 0; i < Config.producersAmount && i < Object.keys(oldMetrics).length; i++) {
652 | if (oldMetrics[i]) {
653 | metrics.producers[i] = oldMetrics[i];
654 | }
655 | }
656 |
657 | // Update canvas height in case producers extend beyond current canvas
658 | updateCanvasHeight();
659 | }
660 |
661 | function updateConsumers() {
662 | // Save old metrics and processing state
663 | const oldMetrics = {...metrics.consumers};
664 | const oldConsumers = [...consumers];
665 |
666 | // Initialize new consumers
667 | initializeConsumers();
668 |
669 | // Restore metrics where applicable
670 | for (let i = 0; i < Config.consumersAmount && i < Object.keys(oldMetrics).length; i++) {
671 | if (oldMetrics[i]) {
672 | metrics.consumers[i] = oldMetrics[i];
673 | }
674 | }
675 |
676 | // Restore processing state for assigned partitions
677 | for (let i = 0; i < Config.consumersAmount && i < oldConsumers.length; i++) {
678 | // Copy transit records for partitions still assigned
679 | consumers[i].transitRecords = oldConsumers[i].transitRecords?.filter(record => {
680 | return consumers[i].assignedPartitions.includes(parseInt(record.partitionId));
681 | }) || [];
682 |
683 | // Copy active partitions and processing queues
684 | if (oldConsumers[i].activePartitions) {
685 | for (const partitionId in oldConsumers[i].activePartitions) {
686 | if (consumers[i].assignedPartitions.includes(parseInt(partitionId))) {
687 | // Copy active record
688 | const record = oldConsumers[i].activePartitions[partitionId];
689 | if (record) {
690 | consumers[i].activePartitions[partitionId] = record;
691 |
692 | // Copy processing times
693 | if (oldConsumers[i].processingTimes && oldConsumers[i].processingTimes[record.id]) {
694 | consumers[i].processingTimes[record.id] = oldConsumers[i].processingTimes[record.id];
695 | }
696 | }
697 | }
698 | }
699 | }
700 |
701 | // Copy processing queues
702 | if (oldConsumers[i].processingQueues) {
703 | for (const partitionId in oldConsumers[i].processingQueues) {
704 | if (consumers[i].assignedPartitions.includes(parseInt(partitionId))) {
705 | consumers[i].processingQueues[partitionId] = oldConsumers[i].processingQueues[partitionId] || [];
706 | }
707 | }
708 | }
709 | }
710 |
711 | // Update canvas height
712 | updateCanvasHeight();
713 | }
714 |
715 | function produceRecords() {
716 | const currentTime = p.millis();
717 |
718 | // Check for new records to produce
719 | for (const producer of producers) {
720 | // Calculate base time between records in milliseconds
721 | // Apply random delay factor if configured
722 | let actualDelay = 1000 / Config.producerRate;
723 | if (Config.producerDelayRandomFactor > 0) {
724 | // Calculate a random factor between 1.0 and (1.0 + producerDelayRandomFactor)
725 | const randomFactor = 1.0 + p.random(0, Config.producerDelayRandomFactor);
726 | actualDelay *= randomFactor;
727 | }
728 |
729 | // If this is the first record or enough time has elapsed since last production
730 | if (!producer.lastProduceTime || currentTime - producer.lastProduceTime >= actualDelay) {
731 | // Create and add a new record
732 | createAndEmitRecord(producer);
733 |
734 | // Update last produce time to current time
735 | producer.lastProduceTime = currentTime;
736 | }
737 | }
738 | }
739 |
740 | function createAndEmitRecord(producer) {
741 | // Generate record characteristics
742 | const recordSize = p.random(Config.recordValueSizeMin, Config.recordValueSizeMax);
743 | const recordRadius = calculateRecordRadius(recordSize);
744 | const recordKey = p.int(p.random(1, Config.recordKeyRange + 1));
745 | const partitionId = recordKey % Config.partitionsAmount;
746 | const recordSpeed = calculateRecordSpeedMS(recordSize);
747 | const eventTime = p.millis(); // Add creation timestamp
748 |
749 | // Get the current offset and increment it for this partition
750 | const offset = partitions[partitionId].currentOffset++;
751 |
752 | // Create the record object
753 | const record = {
754 | id: recordIDIncrementCounter++,
755 | key: recordKey,
756 | value: recordSize,
757 | radius: recordRadius,
758 | producerId: producer.id,
759 | partitionId: partitionId,
760 | speed: recordSpeed,
761 | offset: offset,
762 | eventTime: eventTime, // Store creation time with the record
763 |
764 | // UI
765 | x: CANVAS_PARTITION_START_X + recordRadius,
766 | color: producer.color,
767 |
768 | // State flags for UI
769 | isBeingProcessed: false,
770 | isWaiting: false,
771 | isProcessed: false
772 | };
773 |
774 | // Add the record to the partition
775 | partitions[partitionId].records.push(record);
776 |
777 | // Emit record produced event
778 | eventEmitter.emit(EVENTS.RECORD_PRODUCED, record);
779 |
780 | // Add visual effect for production
781 | producerEffectsManager.addEffect(
782 | CANVAS_PRODUCER_POSITION_X + 15,
783 | producer.y,
784 | CANVAS_PARTITION_START_X,
785 | partitions[partitionId].y + CANVAS_PARTITION_HEIGHT / 2,
786 | producer.color,
787 | ANIMATION_PRODUCER_LINE_DURATION
788 | );
789 |
790 | console.log("Record produced: {" +
791 | "\"id\": " + record.id + ", " +
792 | "\"key\": " + record.key + ", " +
793 | "\"valueBytes\": " + Math.round(recordSize) + ", " +
794 | "\"partition\": " + partitionId + ", " +
795 | "\"offset\": " + offset + ", " +
796 | "\"producer\": " + producer.id + ", " +
797 | "\"producedAt\": " + p.millis().toFixed(1) + ", " +
798 | "\"eventTime\": " + eventTime.toFixed(1) + "}"
799 | );
800 | }
801 |
802 | function calculateRecordRadius(size) {
803 | // Handle edge case when min and max are equal
804 | if (Config.recordValueSizeMin === Config.recordValueSizeMax) {
805 | return CANVAS_RECORD_RADIUS_MAX;
806 | }
807 |
808 | // Handle edge case when min and max are invalid
809 | if (Config.recordValueSizeMin > Config.recordValueSizeMax) {
810 | return (CANVAS_RECORD_RADIUS_MIN + CANVAS_RECORD_RADIUS_MAX) / 2;
811 | }
812 |
813 | // Linear mapping from size to radius
814 | return p.map(
815 | size,
816 | Config.recordValueSizeMin,
817 | Config.recordValueSizeMax,
818 | CANVAS_RECORD_RADIUS_MIN,
819 | CANVAS_RECORD_RADIUS_MAX
820 | );
821 | }
822 |
823 | // Calculate record speed based on partition bandwidth
824 | function calculateRecordSpeedMS(recordSize) {
825 | // Calculate transfer time in milliseconds
826 | // Formula: time (ms) = size (bytes) / bandwidth (bytes/s) * 1000
827 | return (recordSize / Config.partitionBandwidth) * 1000;
828 | }
829 |
830 | // Function to update all existing record speeds
831 | function updateAllRecordSpeeds() {
832 | // Update partition records
833 | for (const partition of partitions) {
834 | for (const record of partition.records) {
835 | record.speed = calculateRecordSpeedMS(record.value);
836 | }
837 | }
838 |
839 | // Update consumer transit records
840 | for (const consumer of consumers) {
841 | if (consumer.transitRecords) {
842 | for (const record of consumer.transitRecords) {
843 | record.speed = calculateRecordSpeedMS(record.value);
844 | }
845 | }
846 | }
847 | }
848 |
849 | function consumeRecords() {
850 | const currentTime = p.millis();
851 |
852 | for (const consumer of consumers) {
853 | // Ensure we have the necessary data structures
854 | if (!consumer.activePartitions) consumer.activePartitions = {};
855 | if (!consumer.recordProcessingState) consumer.recordProcessingState = {}; // New tracking object
856 | if (!consumer.processingQueues) consumer.processingQueues = {};
857 | if (!consumer.lastUpdateTime) consumer.lastUpdateTime = currentTime;
858 |
859 | // Calculate time elapsed since last update
860 | const elapsedTimeMs = currentTime - consumer.lastUpdateTime;
861 | consumer.lastUpdateTime = currentTime;
862 |
863 | // Skip processing if no time has passed (prevents division by zero)
864 | if (elapsedTimeMs <= 0) continue;
865 |
866 | // Count active records (not just partitions)
867 | const activeRecords = [];
868 | for (const partitionId in consumer.activePartitions) {
869 | const record = consumer.activePartitions[partitionId];
870 | if (record) {
871 | activeRecords.push({
872 | id: record.id,
873 | partitionId: partitionId,
874 | record: record
875 | });
876 | }
877 | }
878 |
879 | const activeRecordCount = activeRecords.length;
880 |
881 | if (activeRecordCount > 0) {
882 | // Distribute throughput evenly across active records
883 | const throughputPerRecord = consumer.throughputMax / activeRecordCount;
884 |
885 | // Calculate bytes processed during this time slice for each active record
886 | const bytesProcessedPerRecord = (throughputPerRecord * elapsedTimeMs) / 1000;
887 |
888 | // Process each active record
889 | for (const activeRecord of activeRecords) {
890 | const record = activeRecord.record;
891 | const partitionId = activeRecord.partitionId;
892 |
893 | // Initialize processing state if needed
894 | if (!consumer.recordProcessingState[record.id]) {
895 | consumer.recordProcessingState[record.id] = {
896 | startTime: currentTime,
897 | bytesProcessed: 0,
898 | bytesTotal: record.value,
899 | partitionId: partitionId,
900 | lastProgressUpdate: currentTime
901 | };
902 | }
903 |
904 | const state = consumer.recordProcessingState[record.id];
905 |
906 | // Update bytes processed
907 | state.bytesProcessed += bytesProcessedPerRecord;
908 | state.lastProgressUpdate = currentTime;
909 |
910 | // Update visual progress indicator
911 | record.processingProgress = Math.min(state.bytesProcessed / state.bytesTotal, 0.99);
912 |
913 | // Check if record is complete
914 | if (state.bytesProcessed >= state.bytesTotal) {
915 | const finishedTime = p.millis();
916 | // Record is finished, remove it from active partitions
917 | const finishedRecord = {...record};
918 | delete consumer.activePartitions[partitionId];
919 |
920 | // Calculate actual processing time and any lost bytes
921 | const actualTime = currentTime - state.startTime;
922 | const lostBytes = Math.max(0, state.bytesProcessed - state.bytesTotal);
923 |
924 | // Clean up state
925 | delete consumer.recordProcessingState[record.id];
926 |
927 | // Mark record as processed
928 | finishedRecord.isBeingProcessed = false;
929 | finishedRecord.isProcessed = true;
930 |
931 | // Emit completion event with processing metrics
932 | eventEmitter.emit(EVENTS.RECORD_PROCESSING_COMPLETED, {
933 | ...finishedRecord,
934 | consumerId: consumer.id,
935 | processingTimeMs: actualTime,
936 | lostBytes: lostBytes
937 | });
938 |
939 | // Now remove the record from the partition since processing is complete
940 | const partition = partitions[partitionId];
941 | if (partition) {
942 | // Find and remove this record from the partition
943 | const recordIndex = partition.records.findIndex(r => r.id === record.id);
944 | if (recordIndex >= 0) {
945 | partition.records.splice(recordIndex, 1);
946 | }
947 | }
948 |
949 | // Calculate end-to-end latency in milliseconds
950 | const e2eLatencyMs = finishedTime - record.eventTime;
951 |
952 | console.log("Record processing completed: {" +
953 | "\"id\": " + record.id + ", " +
954 | "\"key\": " + record.key + ", " +
955 | "\"valueBytes\": " + Math.round(record.value) + ", " +
956 | "\"partition\": " + partitionId + ", " +
957 | "\"offset\": " + record.offset + ", " +
958 | "\"consumer\": " + consumer.id + ", " +
959 | "\"actualTimeMs\": " + Math.round(actualTime) + ", " +
960 | "\"lostBytes\": " + Math.round(lostBytes) + ", " +
961 | "\"committedAt\": " + finishedTime.toFixed(1) + ", " +
962 | "\"e2eLatencyMs\": " + Math.round(e2eLatencyMs) + "}"
963 | );
964 |
965 | // If there are more records in the queue for this partition, process the next one
966 | if (consumer.processingQueues[partitionId] && consumer.processingQueues[partitionId].length > 0) {
967 | const nextRecord = consumer.processingQueues[partitionId].shift();
968 | startProcessingRecord(consumer, nextRecord, partitionId, currentTime);
969 | }
970 | }
971 | }
972 |
973 | // After processing current active records, update expected completion times for UI
974 | recalculateProcessingTimes(consumer, currentTime);
975 | }
976 |
977 | // Check all assigned partitions for new records that have reached the end
978 | for (const partitionId of consumer.assignedPartitions) {
979 | // Skip this partition if it's already processing a record
980 | if (consumer.activePartitions[partitionId]) continue;
981 |
982 | // Check if there's a record at the end of this partition
983 | const partition = partitions[partitionId];
984 | if (partition && partition.records.length > 0) {
985 | const firstRecord = partition.records[0];
986 |
987 | // Check if record has reached the end of the partition
988 | if (firstRecord.x >= CANVAS_PARTITION_START_X + CANVAS_PARTITION_WIDTH - firstRecord.radius - 5) {
989 | // Don't remove the record from the partition - keep it there during processing
990 | // Just reference it in the consumer's active records
991 |
992 | // Start processing this record
993 | startProcessingRecord(consumer, firstRecord, partitionId, currentTime);
994 |
995 | // Mark record as being processed by this consumer (but it stays in the partition)
996 | transferRecordToConsumer(consumer, firstRecord, partitionId);
997 | }
998 | }
999 | }
1000 | }
1001 | }
1002 |
1003 | function startProcessingRecord(consumer, record, partitionId, currentTime) {
1004 | // Ensure necessary data structures exist
1005 | if (!consumer.activePartitions) consumer.activePartitions = {};
1006 | if (!consumer.recordProcessingState) consumer.recordProcessingState = {};
1007 | if (!consumer.lastUpdateTime) consumer.lastUpdateTime = currentTime;
1008 |
1009 | // Add record to active partitions
1010 | consumer.activePartitions[partitionId] = record;
1011 |
1012 | // Initialize processing state with byte tracking
1013 | consumer.recordProcessingState[record.id] = {
1014 | startTime: currentTime,
1015 | bytesProcessed: 0,
1016 | bytesTotal: record.value,
1017 | partitionId: partitionId,
1018 | lastProgressUpdate: currentTime
1019 | };
1020 |
1021 | // Calculate estimated processing time for UI/metrics
1022 | const activeRecordCount = Object.keys(consumer.activePartitions).length;
1023 | const throughputPerRecord = consumer.throughputMax / activeRecordCount;
1024 | const estimatedProcessingTimeMs = (record.value / throughputPerRecord) * 1000;
1025 |
1026 | // Update record state
1027 | record.isBeingProcessed = true;
1028 | record.isWaiting = false;
1029 | record.processingTimeMs = estimatedProcessingTimeMs;
1030 | record.processingProgress = 0;
1031 |
1032 | // If there's a transit record for this record, update its properties for synchronized animation
1033 | if (consumer.transitRecords) {
1034 | const transitRecord = consumer.transitRecords.find(tr => tr.id === record.id);
1035 | if (transitRecord) {
1036 | transitRecord.processingStartTime = currentTime;
1037 | transitRecord.estimatedProcessingTimeMs = estimatedProcessingTimeMs;
1038 | }
1039 | }
1040 |
1041 | // Emit event for processing start
1042 | eventEmitter.emit(EVENTS.RECORD_PROCESSING_STARTED, {
1043 | ...record,
1044 | consumerId: consumer.id,
1045 | estimatedTimeMs: estimatedProcessingTimeMs
1046 | });
1047 |
1048 | console.log("Record processing started: {" +
1049 | "\"id\": " + record.id + ", " +
1050 | "\"key\": " + record.key + ", " +
1051 | "\"valueBytes\": " + Math.round(record.value) + ", " +
1052 | "\"partition\": " + partitionId + ", " +
1053 | "\"offset\": " + record.offset + ", " +
1054 | "\"consumer\": " + consumer.id + ", " +
1055 | "\"estimatedTimeMs\": " + Math.round(estimatedProcessingTimeMs) + "}"
1056 | );
1057 |
1058 | // After adding a new record, recalculate processing times for all records
1059 | recalculateProcessingTimes(consumer, currentTime);
1060 | }
1061 |
1062 | // New function to update all processing times based on current capacity allocation
1063 | function recalculateProcessingTimes(consumer, currentTime) {
1064 | // Count active records
1065 | const activeRecords = [];
1066 | for (const partitionId in consumer.activePartitions) {
1067 | const record = consumer.activePartitions[partitionId];
1068 | if (record) {
1069 | activeRecords.push({
1070 | id: record.id,
1071 | partitionId: partitionId,
1072 | record: record
1073 | });
1074 | }
1075 | }
1076 |
1077 | const activeRecordCount = activeRecords.length;
1078 |
1079 | if (activeRecordCount === 0) return;
1080 |
1081 | // Throughput per record given equal distribution
1082 | const throughputPerRecord = consumer.throughputMax / activeRecordCount;
1083 |
1084 | // Update each active record's expected completion time
1085 | for (const activeRecord of activeRecords) {
1086 | const record = activeRecord.record;
1087 | const partitionId = activeRecord.partitionId;
1088 |
1089 | const state = consumer.recordProcessingState[record.id];
1090 | if (!state) continue;
1091 |
1092 | // Calculate remaining bytes
1093 | const bytesRemaining = Math.max(0, state.bytesTotal - state.bytesProcessed);
1094 |
1095 | // Calculate time needed to process remaining bytes at current throughput
1096 | const timeRemainingMs = (bytesRemaining / throughputPerRecord) * 1000;
1097 |
1098 | // Set expected completion time for UI purposes
1099 | const expectedEndTime = currentTime + timeRemainingMs;
1100 |
1101 | // Store end time for visualization
1102 | if (!consumer.processingTimes) consumer.processingTimes = {};
1103 | consumer.processingTimes[record.id] = {
1104 | startTime: state.startTime,
1105 | endTime: expectedEndTime,
1106 | partitionId: partitionId
1107 | };
1108 |
1109 | // Update the processing progress for visualization
1110 | record.processingProgress = Math.min(state.bytesProcessed / state.bytesTotal, 0.99);
1111 | }
1112 | }
1113 |
1114 | // Mark a record as being processed by a consumer (no longer creates a transit path)
1115 | function transferRecordToConsumer(consumer, record, partitionId, isWaiting = false) {
1116 | // We no longer need to create a transit animation
1117 | // Just mark the record as being processed so it stays in place in the partition
1118 | record.isBeingProcessed = !isWaiting;
1119 | record.isWaiting = isWaiting;
1120 | record.processingConsumerId = consumer.id; // Mark which consumer is processing this record
1121 | }
1122 |
1123 | // Consumer partition assignment
1124 | function rebalanceConsumerGroup(partitions, consumerCount, strategy = 'round-robin') {
1125 | // Array to store partition assignments (which consumer owns which partition)
1126 | let assignments = new Array(partitions).fill(-1);
1127 |
1128 | if (consumerCount <= 0) return assignments;
1129 |
1130 | switch (strategy) {
1131 | case 'range':
1132 | // Range strategy: divide partitions into ranges and assign each range to a consumer
1133 | const partitionsPerConsumer = Math.floor(partitions / consumerCount);
1134 | const remainder = partitions % consumerCount;
1135 |
1136 | let startIndex = 0;
1137 | for (let i = 0; i < consumerCount; i++) {
1138 | // Calculate how many partitions this consumer gets
1139 | const numPartitions = partitionsPerConsumer + (i < remainder ? 1 : 0);
1140 |
1141 | // Assign this range of partitions to the consumer
1142 | for (let j = 0; j < numPartitions; j++) {
1143 | if (startIndex + j < partitions) {
1144 | assignments[startIndex + j] = i;
1145 | }
1146 | }
1147 |
1148 | startIndex += numPartitions;
1149 | }
1150 | break;
1151 |
1152 | case 'sticky':
1153 | // Sticky strategy: attempt to maintain previous assignments when possible
1154 | const partitionsPerConsumerSticky = Math.ceil(partitions / consumerCount);
1155 |
1156 | for (let i = 0; i < partitions; i++) {
1157 | const consumerId = Math.floor(i / partitionsPerConsumerSticky);
1158 | assignments[i] = consumerId < consumerCount ? consumerId : consumerCount - 1;
1159 | }
1160 | break;
1161 |
1162 | case 'cooperative-sticky':
1163 | // First, do round-robin assignment
1164 | for (let i = 0; i < partitions; i++) {
1165 | assignments[i] = i % consumerCount;
1166 | }
1167 |
1168 | // Then, adjust to create some locality clustering
1169 | if (partitions >= consumerCount * 2) {
1170 | for (let c = 0; c < consumerCount; c++) {
1171 | // Try to give each consumer a small cluster of partitions
1172 | const clusterSize = Math.floor(partitions / consumerCount / 2);
1173 | const startPos = c * clusterSize;
1174 |
1175 | for (let i = 0; i < clusterSize && startPos + i < partitions; i++) {
1176 | // Only reassign if it doesn't create too much imbalance
1177 | const currentOwner = assignments[startPos + i];
1178 | if (currentOwner !== c) {
1179 | // Count partitions owned by each consumer
1180 | let consumerPartitionCounts = new Array(consumerCount).fill(0);
1181 | for (let j = 0; j < partitions; j++) {
1182 | consumerPartitionCounts[assignments[j]]++;
1183 | }
1184 |
1185 | // Only reassign if it doesn't create too much imbalance
1186 | if (consumerPartitionCounts[currentOwner] > consumerPartitionCounts[c]) {
1187 | assignments[startPos + i] = c;
1188 | }
1189 | }
1190 | }
1191 | }
1192 | }
1193 | break;
1194 |
1195 | case 'round-robin':
1196 | default:
1197 | // Round-robin strategy: distribute partitions evenly across consumers
1198 | for (let i = 0; i < partitions; i++) {
1199 | assignments[i] = i % consumerCount;
1200 | }
1201 | break;
1202 | }
1203 |
1204 | return assignments;
1205 | }
1206 |
1207 | // ------ UTILITIES ------
1208 | function colorFromHSB(h, s, b) {
1209 | p.colorMode(p.HSB, 360, 100, 100);
1210 | const col = p.color(h, s, b);
1211 | p.colorMode(p.RGB, 255, 255, 255);
1212 | return col;
1213 | }
1214 | };
1215 |
1216 | export default sketchSimulation;
1217 |
--------------------------------------------------------------------------------