├── config ├── grafana │ ├── conf │ │ └── custom.ini │ └── provisioning │ │ ├── dashboards │ │ ├── dashboard.yml │ │ └── ingestion-agent.json │ │ └── datasources │ │ └── prometheus.yml ├── prometheus.yml └── valkey.conf ├── docker-compose.yml ├── workbook.sql ├── demo_database.sql ├── CHANGELOG.md ├── README.md └── audio-arrow-streamer.html /config/grafana/conf/custom.ini: -------------------------------------------------------------------------------- 1 | [dashboards] 2 | min_refresh_interval = 1s 3 | 4 | [auth.anonymous] 5 | enabled = true 6 | org_role = Admin 7 | 8 | [security] 9 | allow_embedding = true 10 | 11 | [panels] 12 | disable_sanitize_html = true 13 | 14 | [server] 15 | enforce_domain = false -------------------------------------------------------------------------------- /config/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 1s 3 | evaluation_interval: 1s 4 | 5 | scrape_configs: 6 | - job_name: "prometheus" 7 | static_configs: 8 | - targets: ["localhost:9090"] 9 | 10 | - job_name: "boilstream" 11 | static_configs: 12 | - targets: ["host.docker.internal:8081"] 13 | metrics_path: /metrics 14 | scrape_interval: 1s 15 | -------------------------------------------------------------------------------- /config/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'BoilStream Dashboards' 5 | orgId: 1 6 | folder: 'BoilStream' 7 | type: file 8 | disableDeletion: false 9 | updateIntervalSeconds: 10 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards 13 | foldersFromFilesStructure: true -------------------------------------------------------------------------------- /config/grafana/provisioning/datasources/prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | url: http://prometheus:9090 8 | isDefault: true 9 | editable: false 10 | jsonData: 11 | timeInterval: "1s" 12 | queryTimeout: "60s" 13 | httpMethod: "POST" 14 | exemplarTraceIdDestinations: 15 | - name: trace_id 16 | datasourceUid: prometheus -------------------------------------------------------------------------------- /config/valkey.conf: -------------------------------------------------------------------------------- 1 | # Valkey configuration for Boil Stream Ingestion Agent 2 | # This is a simplified configuration for development 3 | 4 | # Network 5 | bind 0.0.0.0 6 | port 6379 7 | protected-mode no 8 | 9 | # Persistence 10 | dir /data 11 | appendonly yes 12 | appendfsync everysec 13 | 14 | # Memory management 15 | maxmemory 256mb 16 | maxmemory-policy allkeys-lru 17 | 18 | # Logging 19 | loglevel notice 20 | logfile "" 21 | 22 | # CPU Affinity 23 | # Set if environment requires CPU pinning 24 | # cpu-affinity 1 25 | 26 | # Connection limits 27 | timeout 0 28 | tcp-keepalive 300 29 | maxclients 10000 30 | 31 | # Other performance settings 32 | io-threads 2 -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # MinIO for local testing 3 | minio: 4 | image: quay.io/minio/minio:latest 5 | container_name: minio 6 | restart: unless-stopped 7 | ports: 8 | - "9000:9000" # API port 9 | - "9001:9001" # Web console port 10 | environment: 11 | MINIO_ROOT_USER: minioadmin 12 | MINIO_ROOT_PASSWORD: minioadmin 13 | MINIO_PROMETHEUS_AUTH_TYPE: "public" 14 | volumes: 15 | - minio_data:/data 16 | command: server /data --console-address ":9001" 17 | healthcheck: 18 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 19 | interval: 5s 20 | timeout: 5s 21 | retries: 3 22 | networks: 23 | - boilstream-network 24 | 25 | # Create default buckets on startup 26 | minio-setup: 27 | image: quay.io/minio/mc:latest 28 | container_name: minio-setup 29 | depends_on: 30 | - minio 31 | entrypoint: > 32 | /bin/sh -c " 33 | sleep 5; 34 | /usr/bin/mc config host add minio http://minio:9000 minioadmin minioadmin; 35 | /usr/bin/mc mb --ignore-existing minio/test-bucket; 36 | /usr/bin/mc mb --ignore-existing minio/ingestion-data; 37 | /usr/bin/mc anonymous set download minio/test-bucket; 38 | exit 0; 39 | " 40 | networks: 41 | - boilstream-network 42 | 43 | # Prometheus for metrics collection 44 | prometheus: 45 | image: prom/prometheus:latest 46 | container_name: prometheus 47 | restart: unless-stopped 48 | ports: 49 | - "9090:9090" 50 | volumes: 51 | - ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro 52 | - prometheus_data:/prometheus 53 | command: 54 | - "--config.file=/etc/prometheus/prometheus.yml" 55 | - "--storage.tsdb.path=/prometheus" 56 | - "--web.console.libraries=/etc/prometheus/console_libraries" 57 | - "--web.console.templates=/etc/prometheus/consoles" 58 | - "--web.enable-lifecycle" 59 | - "--storage.tsdb.min-block-duration=2h" 60 | - "--storage.tsdb.max-block-duration=2h" 61 | - "--query.lookback-delta=1s" 62 | - "--storage.tsdb.retention.time=7d" # 7 days retention for free tier 63 | healthcheck: 64 | test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9090/-/healthy"] 65 | interval: 10s 66 | timeout: 5s 67 | retries: 3 68 | networks: 69 | - boilstream-network 70 | # Add extra hosts to resolve host.docker.internal on all platforms 71 | extra_hosts: 72 | - "host.docker.internal:host-gateway" 73 | 74 | # Grafana for visualization and monitoring dashboards 75 | grafana: 76 | image: grafana/grafana:latest 77 | container_name: grafana 78 | restart: unless-stopped 79 | ports: 80 | - "3000:3000" 81 | volumes: 82 | - grafana_data:/var/lib/grafana 83 | - ./config/grafana/provisioning:/etc/grafana/provisioning:ro 84 | environment: 85 | - GF_SECURITY_ADMIN_USER=admin 86 | - GF_SECURITY_ADMIN_PASSWORD=admin 87 | - GF_USERS_ALLOW_SIGN_UP=false 88 | - GF_DASHBOARDS_MIN_REFRESH_INTERVAL=1s 89 | - GF_AUTH_ANONYMOUS_ENABLED=true 90 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 91 | - GF_INSTALL_PLUGINS= 92 | depends_on: 93 | - prometheus 94 | healthcheck: 95 | test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"] 96 | interval: 10s 97 | timeout: 5s 98 | retries: 3 99 | networks: 100 | - boilstream-network 101 | # Add extra hosts to resolve host.docker.internal on all platforms 102 | extra_hosts: 103 | - "host.docker.internal:host-gateway" 104 | 105 | superset: 106 | image: apache/superset:latest 107 | container_name: superset 108 | restart: unless-stopped 109 | ports: 110 | - "8088:8088" 111 | environment: 112 | - SUPERSET_SECRET_KEY=your_secret_key_here_change_this_to_something_secure 113 | extra_hosts: 114 | - "host.docker.internal:host-gateway" 115 | volumes: 116 | - superset_home:/app/superset_home 117 | command: > 118 | bash -c " 119 | pip install psycopg2-binary && 120 | superset fab create-admin 121 | --username admin 122 | --firstname Superset 123 | --lastname Admin 124 | --email admin@superset.com 125 | --password admin && 126 | superset db upgrade && 127 | superset init && 128 | superset run -p 8088 --with-threads --reload --debugger --host=0.0.0.0 129 | " 130 | 131 | volumes: 132 | superset_home: 133 | driver: local 134 | minio_data: 135 | driver: local 136 | prometheus_data: 137 | driver: local 138 | grafana_data: 139 | driver: local 140 | 141 | networks: 142 | boilstream-network: 143 | driver: bridge 144 | -------------------------------------------------------------------------------- /workbook.sql: -------------------------------------------------------------------------------- 1 | -- ------------------------------------- 2 | -- Load Airport extension 3 | INSTALL airport; 4 | LOAD airport; 5 | 6 | SELECT function_name FROM duckdb_functions() WHERE function_name LIKE 'airport%'; 7 | SELECT airport_version(), airport_user_agent(); 8 | 9 | -- NYC Yellow Taxi rides 10 | CREATE TABLE boilstream.s3.nyc( 11 | VendorID INTEGER, 12 | tpep_pickup_datetime TIMESTAMP, 13 | tpep_dropoff_datetime TIMESTAMP, 14 | passenger_count BIGINT, 15 | trip_distance DOUBLE, 16 | RatecodeID BIGINT, 17 | store_and_fwd_flag VARCHAR, 18 | PULocationID INTEGER, 19 | DOLocationID INTEGER, 20 | payment_type BIGINT, 21 | fare_amount DOUBLE, 22 | extra DOUBLE, 23 | mta_tax DOUBLE, 24 | tip_amount DOUBLE, 25 | tolls_amount DOUBLE, 26 | improvement_surcharge DOUBLE, 27 | total_amount DOUBLE, 28 | congestion_surcharge DOUBLE, 29 | Airport_fee DOUBLE, 30 | cbd_congestion_fee double 31 | ); 32 | 33 | -- Attach boilstream ingestion port into itself, so you can write to it with SQL 34 | ATTACH 'boilstream' as data (TYPE AIRPORT, location 'grpc://localhost:50051/'); 35 | -- Download NYC Yellow Taxi tripdata parquet files from Internet and ingest them through BoilStream 36 | insert into data.s3.nyc select * from parquet_scan('yellow_tripdata_2025-*.parquet'); 37 | -- BoilStream will create a VIEW over the nyc table / topic, once it gets data and you have DuckDB 38 | -- local persistence enabled (i.e. on-disk caching into DuckDB databases) 39 | -- The Postgres interface connects directly to BoilStream in-memory DuckDB database that has 40 | -- real-time view over the ingested data, while also enabling concurrent writers and readers. 41 | select COUNT(*) from nyc; 42 | select * from nyc order by passenger_count asc limit 10; 43 | 44 | 45 | -- Streaming Topic metadata (catalog) 46 | -- The "topics" table is visible for DuckDB Airport clients along 47 | -- ..with the Arrow schema on "topic_schemas" 48 | -- Derived views are materialised views. 49 | select * from boilstream.topics; 50 | select * from boilstream.topic_schemas; 51 | select * from boilstream.derived_views; 52 | select COUNT(*) from memory.boilstream.derived_views; 53 | 54 | -- Streaming Topic management 55 | CREATE TABLE boilstream.s3.people (name VARCHAR, age INT, tags VARCHAR[]); 56 | CREATE TABLE boilstream.s3.teens AS SELECT name FROM boilstream.s3.people WHERE age > 12 AND age < 20; 57 | CREATE TABLE boilstream.s3.adults AS SELECT name FROM boilstream.s3.people WHERE age = 50; 58 | CREATE TABLE boilstream.s3.oldies AS SELECT name FROM boilstream.s3.people WHERE age = 80; 59 | DROP TABLE boilstream.s3.people; 60 | DROP TABLE boilstream.s3.teens; 61 | DROP TABLE boilstream.s3.adults; 62 | DROP TABLE boilstream.s3.oldies; 63 | 64 | -- With the Airport extesion, attach ourselves for writing to the topic 65 | -- ..Just like from any DuckDB client with Airport extension 66 | detach data; 67 | ATTACH 'boilstream' as data (TYPE AIRPORT, location 'grpc://localhost:50051/'); 68 | show databases; 69 | show all tables; 70 | INSERT INTO data.s3.people 71 | SELECT 72 | 'boilstream_' || i::VARCHAR as name, 73 | (i % 100) + 1 as age, 74 | ['airport', 'datasketches'] as tags 75 | FROM generate_series(1, 6000000) as t(i); 76 | -- After couple of seconds, BoilStream creates VIEW into memory.main named "people" (topic name) 77 | -- memory.main has views over the DuckDB persisted database tables (topics) 78 | select * from duckdb_views() where database_name='memory' and schema_name='main'; 79 | 80 | select COUNT(*) from people; 81 | select COUNT(*) from teens; 82 | select COUNT(*) from adults; 83 | select COUNT(*) from oldies; 84 | select COUNT(*) from people where age < 50; 85 | select COUNT(*), age from people group by age limit 100; 86 | 87 | -- ------------------------------------- 88 | -- Postgres Interface Types testing 89 | -- For complete and extensive type testing, see demo_database.sql 90 | -- It fully works with Power BI 91 | CREATE TABLE IF NOT EXISTS psql_decimal128_test AS 92 | SELECT i AS id, CAST(i * 3.14 AS DECIMAL(22,2)) AS value FROM range(5) t(i); 93 | 94 | describe (select id, value::VARCHAR from psql_decimal128_test); 95 | select id, value::VARCHAR from psql_decimal128_test; 96 | 97 | describe (select id, value::DECIMAL(22,2) from psql_decimal128_test); 98 | select id, value::DECIMAL(22,2) from psql_decimal128_test; 99 | 100 | SELECT uuid() as uuid_v4, 101 | gen_random_uuid() as gen_uuid_v4, 102 | uuidv7() as uuid_v7; 103 | 104 | SELECT 12345.67::DECIMAL(22,2) as price, 105 | 0.00::DECIMAL(22,2) as zero, 106 | -999.99::DECIMAL(22,2) as negative; 107 | 108 | describe (SELECT ['1aa', 'b2', 'c3', 'd4', 'z5'] as string_array); 109 | SELECT ['1aa', 'b2', 'c3', 'd4', 'z5'] as string_array; 110 | 111 | SELECT COUNT(*) as total_count, COUNT(oid) as non_null_oids FROM pg_catalog.pg_type; 112 | 113 | DESCRIBE (SELECT [1, 2, 3, 4, 5] as int_array); 114 | SELECT [1, 2, 3, 4, 5] as int_array; 115 | 116 | SELECT '2023-12-25 14:30:45'::TIMESTAMP as basic_timestamp, 117 | '2023-12-25 14:30:45.123456'::TIMESTAMP as timestamp_with_microseconds, 118 | '2023-12-25 14:30:45+02:00'::TIMESTAMPTZ as timestamp_with_tz, 119 | NOW() as current_timestamp, 120 | '2023-12-25'::DATE as date_only, 121 | '14:30:45'::TIME as time_only, 122 | INTERVAL '1 day 2 hours 30 minutes' as interval_example, 123 | to_timestamp(1703512245) as from_epoch, 124 | date_trunc('hour', NOW()) as truncated_hour, 125 | date_part('year', NOW()) as year_part, 126 | strptime('25/12/2023 14:30:45', '%d/%m/%Y %H:%M:%S') as parsed_timestamp; 127 | 128 | describe (select INTERVAL '1 day 2 hours 30 minutes' as interval_example); 129 | select INTERVAL '1 day 2 hours 30 minutes' as interval_example 130 | 131 | SELECT json_object('name', 'John', 'age', 30, 'city', 'New York') as json_obj; 132 | 133 | SELECT '\x48656c6c6f'::BLOB as hello_blob, '\x576f726c64'::BLOB as world_blob; 134 | 135 | CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); 136 | SELECT enum_code('happy'::mood) as happy_code, 137 | enum_first(NULL::mood) as first_value, 138 | enum_last(NULL::mood) as last_value; 139 | 140 | CREATE VIEW test_view AS SELECT 1 as test_col; 141 | SELECT relname, relkind, relnamespace 142 | FROM pg_catalog.pg_class WHERE relname = 'test_view'; -------------------------------------------------------------------------------- /demo_database.sql: -------------------------------------------------------------------------------- 1 | -- === Schema (keeps things organized) === 2 | CREATE SCHEMA IF NOT EXISTS demo; 3 | 4 | -- === Boolean === 5 | CREATE OR REPLACE TABLE demo.bool_samples(val BOOLEAN); 6 | INSERT INTO demo.bool_samples 7 | VALUES (TRUE),(FALSE),(TRUE),(FALSE),(TRUE),(FALSE),(TRUE),(FALSE),(TRUE),(FALSE); 8 | 9 | -- === Signed integers === 10 | CREATE OR REPLACE TABLE demo.tinyint_samples(val TINYINT); 11 | INSERT INTO demo.tinyint_samples VALUES (-128),(-1),(0),(1),(12),(34),(56),(78),(100),(127); 12 | 13 | CREATE OR REPLACE TABLE demo.smallint_samples(val SMALLINT); 14 | INSERT INTO demo.smallint_samples VALUES (-32768),(-12345),(-1),(0),(1),(99),(500),(12345),(20000),(32767); 15 | 16 | CREATE OR REPLACE TABLE demo.int_samples(val INTEGER); 17 | INSERT INTO demo.int_samples VALUES (-2147483648),(-999999999),(-1),(0),(1),(42),(123456),(2000000000),(2147480000),(2147483647); 18 | 19 | CREATE OR REPLACE TABLE demo.bigint_samples(val BIGINT); 20 | INSERT INTO demo.bigint_samples VALUES 21 | (-9223372036854775808),(-9000000000000000000),(-1),(0),(1),(42),(1234567890123),(9000000000000000000),(9223372036854770000),(9223372036854775807); 22 | 23 | -- === Unsigned integers === 24 | CREATE OR REPLACE TABLE demo.utinyint_samples(val UTINYINT); 25 | INSERT INTO demo.utinyint_samples VALUES (0),(1),(2),(3),(10),(100),(200),(240),(254),(255); 26 | 27 | CREATE OR REPLACE TABLE demo.usmallint_samples(val USMALLINT); 28 | INSERT INTO demo.usmallint_samples VALUES (0),(1),(2),(3),(1000),(10000),(40000),(50000),(65000),(65535); 29 | 30 | CREATE OR REPLACE TABLE demo.uinteger_samples(val UINTEGER); 31 | INSERT INTO demo.uinteger_samples VALUES (0),(1),(2),(3),(1000),(1000000),(4000000000),(4200000000),(4294967000),(4294967295); 32 | 33 | CREATE OR REPLACE TABLE demo.ubigint_samples(val UBIGINT); 34 | INSERT INTO demo.ubigint_samples VALUES 35 | (0),(1),(2),(3),(1000),(1000000),(1000000000000),(18446744073709500000),(18446744073709551614),(18446744073709551615); 36 | 37 | -- === Huge integers (128-bit) === 38 | CREATE OR REPLACE TABLE demo.hugeint_samples(val HUGEINT); 39 | INSERT INTO demo.hugeint_samples VALUES 40 | (-170141183460469231731687303715884105728), -- min 41 | (-1),(0),(1), 42 | (170141183460469231731687303715884105727), -- max 43 | (123456789012345678901234567890), 44 | (-123456789012345678901234567890), 45 | (99999999999999999999999999999), 46 | (-99999999999999999999999999999), 47 | (42); 48 | 49 | -- === Floating point & Decimal === 50 | CREATE OR REPLACE TABLE demo.real_samples(val REAL); 51 | INSERT INTO demo.real_samples VALUES (-1e10),(-3.14),(-0.0),(0.0),(1.0),(3.14159),(1e-5),(1e5),(12345.678),(42.0); 52 | 53 | CREATE OR REPLACE TABLE demo.double_samples(val DOUBLE); 54 | INSERT INTO demo.double_samples VALUES (-1e308),(-2.5),(0.0),(1.0),(2.5),(3.141592653589793),(1e-10),(1e10),(9.99e99),(42.42); 55 | 56 | CREATE OR REPLACE TABLE demo.decimal_samples(val DECIMAL(38,10)); 57 | INSERT INTO demo.decimal_samples VALUES 58 | (-9999999999.1234567890),(-1.0000000000),(0.0000000000),(1.0000000000), 59 | (3.1415926535),(2.7182818281),(1234567890.1234567890), 60 | (0.0000000001),(9999999999.9999999999),(42.4200000000); 61 | 62 | -- === Character / String & BLOB === 63 | CREATE OR REPLACE TABLE demo.varchar_samples(val VARCHAR); 64 | INSERT INTO demo.varchar_samples VALUES 65 | (''),('a'),('hello'),('DuckDB'),('😀 emoji'), 66 | ('multi word string'),('UPPER lower'),('12345'),('json-ish {"a":1}'),('end'); 67 | 68 | CREATE OR REPLACE TABLE demo.blob_samples(val BLOB); 69 | -- Hex literal X'..' 70 | INSERT INTO demo.blob_samples VALUES 71 | (X''), -- empty 72 | (X'00'), 73 | (X'FF'), 74 | (X'DEADBEEF'), 75 | (X'CAFEBABE'), 76 | (X'01020304'), 77 | (X'FFFFFFFF'), 78 | (X'A1B2C3D4'), 79 | (X'1122334455'), 80 | (X'ABCD'); 81 | 82 | -- === UUID === 83 | CREATE OR REPLACE TABLE demo.uuid_samples(val UUID); 84 | INSERT INTO demo.uuid_samples VALUES 85 | ('00000000-0000-0000-0000-000000000000'), 86 | ('11111111-1111-1111-1111-111111111111'), 87 | ('123e4567-e89b-12d3-a456-426614174000'), 88 | ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'), 89 | ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'), 90 | ('cccccccc-cccc-cccc-cccc-cccccccccccc'), 91 | ('dddddddd-dddd-dddd-dddd-dddddddddddd'), 92 | ('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'), 93 | ('ffffffff-ffff-ffff-ffff-ffffffffffff'), 94 | ('00000000-0000-0000-0000-000000000001'); 95 | 96 | -- === Dates & times === 97 | CREATE OR REPLACE TABLE demo.date_samples(val DATE); 98 | INSERT INTO demo.date_samples VALUES 99 | ('0001-01-01'),('1970-01-01'),('1999-12-31'), 100 | ('2000-01-01'),('2020-02-29'),('2024-02-29'), 101 | ('2025-01-01'),('2025-08-19'),('9999-12-31'),('2010-07-15'); 102 | 103 | CREATE OR REPLACE TABLE demo.time_samples(val TIME); 104 | INSERT INTO demo.time_samples VALUES 105 | ('00:00:00'),('00:00:00.123'),('06:30:00'), 106 | ('12:00:00'),('12:00:00.999999'),('18:45:10'), 107 | ('23:59:59.999999'),('01:02:03'),('13:37:59'),('21:21:21'); 108 | 109 | CREATE OR REPLACE TABLE demo.timestamp_samples(val TIMESTAMP); 110 | INSERT INTO demo.timestamp_samples VALUES 111 | ('1970-01-01 00:00:00'), 112 | ('1999-12-31 23:59:59.999'), 113 | ('2000-01-01 00:00:00'), 114 | ('2020-02-29 12:34:56'), 115 | ('2024-02-29 23:59:59.999999'), 116 | ('2025-01-01 00:00:00'), 117 | ('2025-08-19 15:00:00'), 118 | ('2010-07-15 08:09:10'), 119 | ('1995-05-23 03:04:05'), 120 | ('2030-12-31 23:59:59'); 121 | 122 | -- TIMESTAMP WITH TIME ZONE (aka TIMESTAMPTZ) 123 | CREATE OR REPLACE TABLE demo.timestamptz_samples(val TIMESTAMPTZ); 124 | INSERT INTO demo.timestamptz_samples VALUES 125 | ('1970-01-01 00:00:00+00'), 126 | ('1999-12-31 23:59:59+00'), 127 | ('2000-01-01 00:00:00-05'), 128 | ('2020-02-29 12:34:56+03'), 129 | ('2024-02-29 23:59:59.999999-03'), 130 | ('2025-01-01 00:00:00+00'), 131 | ('2025-08-19 15:00:00-03'), 132 | ('2010-07-15 08:09:10+09'), 133 | ('1995-05-23 03:04:05-08'), 134 | ('2030-12-31 23:59:59+14'); 135 | 136 | -- === INTERVAL === 137 | CREATE OR REPLACE TABLE demo.interval_samples(val INTERVAL); 138 | INSERT INTO demo.interval_samples VALUES 139 | (INTERVAL '1' SECOND), 140 | (INTERVAL '1' MINUTE), 141 | (INTERVAL '1' HOUR), 142 | (INTERVAL '1' DAY), 143 | (INTERVAL '1' MONTH), 144 | (INTERVAL '1' YEAR), 145 | (INTERVAL '2 days 03:04:05'), 146 | (INTERVAL '6 months 15 days'), 147 | (INTERVAL '3 years 2 months 1 day'), 148 | (INTERVAL '90 minutes'); 149 | 150 | -- === JSON === 151 | CREATE OR REPLACE TABLE demo.json_samples(val JSON); 152 | INSERT INTO demo.json_samples VALUES 153 | ('null'::JSON), 154 | ('true'::JSON), 155 | ('123'::JSON), 156 | ('"text"'::JSON), 157 | ('{"a":1}'::JSON), 158 | ('{"a":1,"b":[1,2,3]}'::JSON), 159 | ('[1,2,3]'::JSON), 160 | ('[{"k":"v"},{"k":"w"}]'::JSON), 161 | ('{"nested":{"x":10,"y":[false, true]}}'::JSON), 162 | ('{"empty":{}}'::JSON); 163 | 164 | -- === Nested types: LIST, STRUCT, MAP === 165 | CREATE OR REPLACE TABLE demo.list_int_samples(val INT[]); 166 | INSERT INTO demo.list_int_samples VALUES 167 | ([ ]),([1]),([1,2]),([10,20,30]), 168 | ([NULL]),([5,NULL,7]),([100,200]),([3,3,3]),([42]),([1,2,3,4,5]); 169 | 170 | CREATE OR REPLACE TABLE demo.struct_samples(val STRUCT(a INT, b VARCHAR, c BOOLEAN)); 171 | INSERT INTO demo.struct_samples VALUES 172 | ({a: NULL, b: NULL, c: NULL}), 173 | ({a: 1, b: 'x', c: TRUE}), 174 | ({a: 2, b: 'y', c: FALSE}), 175 | ({a: 3, b: 'z', c: TRUE}), 176 | ({a: 10, b: 'abc', c: FALSE}), 177 | ({a: -1, b: '', c: TRUE}), 178 | ({a: 0, b: 'zero', c: FALSE}), 179 | ({a: 999, b: 'end', c: TRUE}), 180 | ({a: 42, b: 'meaning', c: TRUE}), 181 | ({a: 7, b: 'seven', c: FALSE}); 182 | 183 | -- MAP (DuckDB map type). Use the map(keys, values) constructor. 184 | CREATE OR REPLACE TABLE demo.map_samples(val MAP(VARCHAR, INT)); 185 | INSERT INTO demo.map_samples VALUES 186 | (map([], [])), 187 | (map(['a'], [1])), 188 | (map(['k','v'], [10,20])), 189 | (map(['x','y','z'], [1,2,3])), 190 | (map(['only'], [NULL])), 191 | (map(['neg','pos'], [-1,1])), 192 | (map(['wide','tall'], [100,200])), 193 | (map(['forty-two'], [42])), 194 | (map(['n','m','p','q'], [5,6,7,8])); 195 | 196 | -- === ENUM === 197 | CREATE TYPE mood AS ENUM ('happy','sad','neutral','excited','tired'); 198 | CREATE OR REPLACE TABLE demo.enum_samples(val mood); 199 | INSERT INTO demo.enum_samples VALUES 200 | ('happy'),('sad'),('neutral'),('excited'),('tired'), 201 | ('happy'),('neutral'),('excited'),('sad'),('tired'); 202 | 203 | -- === Sanity checks === 204 | -- Count rows for all tables (should be 10 each) 205 | SELECT table_name, row_count 206 | FROM ( 207 | SELECT 'bool_samples' AS table_name, (SELECT COUNT(*) FROM demo.bool_samples) AS row_count UNION ALL 208 | SELECT 'tinyint_samples',(SELECT COUNT(*) FROM demo.tinyint_samples) UNION ALL 209 | SELECT 'smallint_samples',(SELECT COUNT(*) FROM demo.smallint_samples) UNION ALL 210 | SELECT 'int_samples',(SELECT COUNT(*) FROM demo.int_samples) UNION ALL 211 | SELECT 'bigint_samples',(SELECT COUNT(*) FROM demo.bigint_samples) UNION ALL 212 | SELECT 'utinyint_samples',(SELECT COUNT(*) FROM demo.utinyint_samples) UNION ALL 213 | SELECT 'usmallint_samples',(SELECT COUNT(*) FROM demo.usmallint_samples) UNION ALL 214 | SELECT 'uinteger_samples',(SELECT COUNT(*) FROM demo.uinteger_samples) UNION ALL 215 | SELECT 'ubigint_samples',(SELECT COUNT(*) FROM demo.ubigint_samples) UNION ALL 216 | SELECT 'hugeint_samples',(SELECT COUNT(*) FROM demo.hugeint_samples) UNION ALL 217 | SELECT 'real_samples',(SELECT COUNT(*) FROM demo.real_samples) UNION ALL 218 | SELECT 'double_samples',(SELECT COUNT(*) FROM demo.double_samples) UNION ALL 219 | SELECT 'decimal_samples',(SELECT COUNT(*) FROM demo.decimal_samples) UNION ALL 220 | SELECT 'varchar_samples',(SELECT COUNT(*) FROM demo.varchar_samples) UNION ALL 221 | SELECT 'blob_samples',(SELECT COUNT(*) FROM demo.blob_samples) UNION ALL 222 | SELECT 'uuid_samples',(SELECT COUNT(*) FROM demo.uuid_samples) UNION ALL 223 | SELECT 'date_samples',(SELECT COUNT(*) FROM demo.date_samples) UNION ALL 224 | SELECT 'time_samples',(SELECT COUNT(*) FROM demo.time_samples) UNION ALL 225 | SELECT 'timestamp_samples',(SELECT COUNT(*) FROM demo.timestamp_samples) UNION ALL 226 | SELECT 'timestamptz_samples',(SELECT COUNT(*) FROM demo.timestamptz_samples) UNION ALL 227 | SELECT 'interval_samples',(SELECT COUNT(*) FROM demo.interval_samples) UNION ALL 228 | SELECT 'json_samples',(SELECT COUNT(*) FROM demo.json_samples) UNION ALL 229 | SELECT 'list_int_samples',(SELECT COUNT(*) FROM demo.list_int_samples) UNION ALL 230 | SELECT 'struct_samples',(SELECT COUNT(*) FROM demo.struct_samples) UNION ALL 231 | SELECT 'map_samples',(SELECT COUNT(*) FROM demo.map_samples) UNION ALL 232 | SELECT 'enum_samples',(SELECT COUNT(*) FROM demo.enum_samples) 233 | ); 234 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to BoilStream will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [0.7.19] - 2025-10-29 9 | 10 | ### Features 11 | 12 | - Audit logging to separate logs folder on disk with partitioning 13 | 14 | ### Fixes 15 | 16 | - Fixed CORS for auth server to work with boilstream duckdb wasm extension from browser 17 | - Fixed session timestamp for opaque pake login response 18 | - Less bloated info logs 19 | - Server does not try to encrypt empty response body, but sends HTTP 204 instead 20 | 21 | ## [0.7.18] - 2025-10-15 22 | 23 | ### Features 24 | 25 | - Session resumption support for Remote Secrets Store API, matches DuckDB boilstream extension v0.3.1 26 | 27 | ### Fixes 28 | 29 | - Complete separation of Web Auth GUI sessions from OPAQUE login sessions 30 | 31 | ## [0.7.17] - 2025-10-14 32 | 33 | ### Features 34 | 35 | - Re-designed the DuckDB Secure Remote Secrets Store protocol to be based on industry standard approaches (Facebook OPAQUE PAKE, OAuth2, HKDF, SHA256, etc.). See the DuckDB client extension and its [SECURITY_SPECIFICATION.md](https://github.com/dforsber/boilstream-extension/blob/main/SECURITY_SPECIFICATION.md) that also includes full conformance test suite with test vectors. We have independently developed both the server (Rust) and the DuckDB extension using the specification and its conformance test suites to make them fully interoperable. The Facebook's OPAQUE PAKE was audited by NCC back in 2021. 36 | - Secrets Storage comms are integrity protected inside the TLS channel and secrets are encrypted inside the TLS channel with AEAD (i.e. application level e2e protection). Mounting the Remote Secrets Storage happens with anonymised one-time bootstrap token (privacy). 37 | 38 | ### Fixes 39 | 40 | - Shutdown is more swift now (e.g. for rolling restarts/updates) 41 | - Browser caching disabled with the Web Auth GUI 42 | 43 | ## [0.7.16] - 2025-10-09 44 | 45 | ### Features 46 | 47 | - Security improvement: secrets token vending starts with bootstrap token that is exchanged to session token with PKCE token exchange (anti-theft) 48 | - Web GUI shows token status 49 | - Matching DuckDB boilstream community extension version: v0.2.0 50 | 51 | ## [0.7.15] - 2025-10-08 52 | 53 | ### Features 54 | 55 | - DuckDB Secure Remote Secrets Storage REST API along with DuckDB Community Extension (https://github.com/dforsber/boilstream-extension) 56 | - GDPR compliant user management with nonrepudiation/nondisputability with PGP encrypted user email address (identity) when user is deleted. Only if public PGP key is configured. 57 | - Web tokens can be revoked like sessions. E.g. a revoked secrets scoped token used in the BoilStream DuckDB Extension does not have access to remote secrets storage anymore after revocation. 58 | 59 | ### Fixes 60 | 61 | - Added verify password field to user manual sign up 62 | - Clearing Web Auth portal password fields on timeout and tab change 63 | - Added verify encryption key on initial boilstream ceremony 64 | - The superadmin ("boilstream") password now has similar strength requirements as the encryption key 65 | - If max sessions were reached, user was blocked. Now, the oldest session is revoked to allow user log in via API / WebAuth console (authentication must succeed). 66 | - TOTP code cannot be reused 67 | - Improved auth API input validations 68 | - Web tokens are generated per purpose/scope (e.g. "secrets", "ingest") to adhere with least privilege security principle 69 | 70 | ## [0.7.14] - 2025-09-22 71 | 72 | ### Features 73 | 74 | - NEW: Web Portal GUI. Start boilstream and go to https://host:443/ for vending Postgres interface and http ingestion token credentials with social logins (GitHub, Google) and SAML based SSO supported (e.g. AWS SSO SP) through https auth server interface. Includes CloudFlare turnstile captcha. 75 | - MFA with TOTP and PassKey are supported. You can manage these on the auth portal and also revoke sessions, which also close the established postgres sessions if any with the respective credentials. 76 | - BoilStream maintains encrypted users DuckDB database encrypted with key passed during server start (or from file if configued). Key is mem locked and zeroised immediately after use (dbs have been opened). The encrypted dbs are locked into the auth server only. The db encryption is DuckDB v1.4 new feature. By configuring the encryption key path, the key is stored on disk and reused from there, otherwise asked from the user every time the server starts. 77 | - Proper implementation of Postgres `SCRAM-SHA-256` based logins with short time credentials vended with OAuth2/creds via login page served through server's auth https server. Postgres md5 passwords not supported anymore. Server never stores user's salted passwords. 78 | - The users encrypted database is backed up on selected backend. The system validates the backend exists at startup, recovers the users database from backup if missing locally, and automatically backs up after user creation with configurable interval throttling. 79 | - Superadmin account ("boilstream") password is bootstrapped when the server starts the first time and there is no encrypted superadmin.duckb database yet. Using the "boilstream" as username and the associated password, the postgres connection is established to a separate in-memory DuckDB instance that has the users database attached. 80 | - The users.duckdb database is backed up on the primary backend storage 81 | 82 | ### Examples 83 | 84 | - Vend http ingestion token through BoilStream auth portal and use it with [audio-arrow-streamer.html](audio-arrow-streamer.html) to stream audio into BoiilStream DuckDB and Data Lake 85 | 86 | ### Fixes 87 | 88 | - Derived views (materialised topics) were still using old DuckDB instance per view approach. Now derived view processor uses single duckdb instance for much improved scalability. 89 | 90 | ## [0.7.13] - 2025-09-16 91 | 92 | ### Features 93 | 94 | - DuckDB 1.4.0, extensions work again 95 | - Arbitrary number of parameters supported (hard coded max is 10k to avoid OOM) 96 | - Parametrized INSERT/DELETE queries 97 | 98 | ### Fixes 99 | 100 | - DuckDB Arrow lossless Boolean extension type was misinterpreted when returning multiple boolean values 101 | - JSON Array parameters, they were quoted but must not be 102 | 103 | ## [0.7.12] - 2025-09-12 104 | 105 | ### Features 106 | 107 | - NEW INTERFACE: HTTPS ingestion with Arrow payloads, e.g. from Browsers with Flechette JS. >2GB/s and tens of thousands of concurrent connections. 108 | - Configurable query/connection timeouts. Default from 5min to 30min. (pgwire.connection_timeout_seconds) 109 | 110 | ## [0.7.11] - 2025-09-04 111 | 112 | ### Features 113 | 114 | - True streaming through postgres interface with lazy fetching from DuckDB to minimise memory consumption. Allows e.g. streaming tens of millions of rows concurrently to multiple clients without consuming much memory. 115 | - Allow streaming all rows, not just first 1M. Allows e.g. Power BI to download all data. 116 | 117 | ### Fixes 118 | 119 | - Fix "time with time zone", "timestamp with time zone", "uuid array", "boolean array" binary parameters handling for prepared queries 120 | 121 | ## [0.7.10] - 2025-09-04 122 | 123 | ### Fixes 124 | 125 | - PG type name mapping vs native type naming fixed for allowing Power BI to detect all types properly 126 | 127 | ## [0.7.9] - 2025-09-03 128 | 129 | ### Features 130 | 131 | - 1st class support for prepared statements including binary parameter types support (also arrays) 132 | - Higher resiliency against attacks and hundreds of concurrent clients, including malicious 133 | - Improved type compliancy HTML report: https://boilstream.com/test_report.html 134 | 135 | ### Fixes 136 | 137 | - Many PG catalog fixes to make type system more complete 138 | 139 | ## [0.7.8] - 2025-08-30 140 | 141 | ### Fixes 142 | 143 | - Postgres interface hardening in face of attacks and misbehaving clients 144 | 145 | ## [0.7.7] - 2025-08-27 146 | 147 | ### Features 148 | 149 | - Improved Postgres interface robustness and resource management (query timeouts, idle connection mgmt, etc.) 150 | - Postgres interface result row record improvements and type modifiers for allowing Power BI to use proper query folding (query pushdown) 151 | - Type compliance report: https://www.boilstream.com/type_coverage_report.md 152 | - Grafana Dashboard updated with more metrics 153 | - NEW: Preliminary Kafka interface with Avro and schema validation. The boilstream.topic_schemas now also include avro_schema column that is the schema for Kafka clients. 154 | 155 | ### Fixes 156 | 157 | - Storage backend now supports multiple Object Storage backends, not just e.g. S3 + filesystem 158 | - By default having DuckDB arrow_lossless_conversion = true (preserves e.g. time zone information with "tiem with time zone" type). Both settings works. 159 | 160 | ## [0.7.6] - 2025-08-21 161 | 162 | ### Features 163 | 164 | - Full support of Tableou in place. Tableou does not complain about any types it seems, so we only need a minor change to make Tableou work. 165 | 166 | ## [0.7.5] - 2025-08-21 167 | 168 | ### Fixed 169 | 170 | - Extensive tests for various data types and special handling for Power BI as its npgsql version is outdated and can't handle TIME, TIMESTAMP, TIMESTAMPTZ and ARRAYs with NULLs. Thus, we convert them to TEXT (temporal) and JSON (ARRAY), but only for Power BI clients. Other clients get these types without conversion. See the [demo_database.sql](demo_database.sql) that we used for testing with Power BI Desktop client. 171 | 172 | ## [0.7.4] - 2025-08-19 173 | 174 | ### Fixed 175 | 176 | - Fixed more Power BI connection failures due to type mismatch. 177 | 178 | ## [0.7.3] - 2025-08-18 179 | 180 | ### Features 181 | 182 | - Using object_store crate for generalised Object Store and Filesystem support. E.g. AWS, GCP, and Azure object stores, and Minio. 183 | 184 | ### Fixed 185 | 186 | - Performance: Fixed serialised metadata envelope recycling causing some operations to be serialised 187 | - Defect: Power BI connection failure due to type mismatch 188 | 189 | ## [0.7.2] - 2025-08-12 190 | 191 | ### Features 192 | 193 | - Flight SQL interface (e.g. with ADBC drivers) 194 | - Self and cross-BoilStream writes with Airport extension (pre-compiled downloadable) 195 | 196 | ### Fixed 197 | 198 | - Graceful shutdown sequence fixed to avoid data loss with derived view processor 199 | - Derived topic id assignment and topic cache miss handling 200 | 201 | ## [0.7.1] - 2025-08-04 202 | 203 | ### Improvements / Features 204 | 205 | - Improved BI Tool support: Power BI compatibility 206 | 207 | ## [0.7.0] - 2025-08-03 208 | 209 | ### Improvements / Features 210 | 211 | - 1st tier derived topics (aka materialised views) support 212 | - Support for recursive derived topics 213 | - Data persistence layer tiered sticky load balancing for improved parquet locality 214 | - The metadata.duckdb database catalog schema changed (keying by u64 not varchar) 215 | - improved memory management with vector recycling, also switched from jemalloc to mimalloc 216 | - Embedded DuckDB now has more inbuilt core extensions 217 | - Linux and OSX x64 builds 218 | 219 | ### Fixed 220 | 221 | - improved FlightRPC client communications with retries 222 | 223 | ## [0.6.2] - 2025-07-27 224 | 225 | ### Fixed 226 | 227 | - **Derived view refresh**: Materialized views now automatically refresh within 1 second when created or dropped via SQL, eliminating the need to restart the agent 228 | - View changes made through the `boilstream.s3` schema are now immediately picked up by the streaming processor 229 | 230 | ### Technical Details 231 | 232 | - Added periodic cache invalidation (1s interval) to the derived view processor 233 | - Improved cache consistency between SQL operations and stream processing 234 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BoilStream - Stream to Gold Easily 🏆 2 | 3 | [BoilStream](https://wwww.boilstream.com/) is a small binary DuckDB server with steroids written in Rust (and a bit of C++). 4 | 5 | Download, start, and connect with any BI Tool with Postgres interface for real-time analytics - ingest with Kafka clients and connect from [DuckDB clients with Airport extension](https://duckdb.org/community_extensions/extensions/airport.html) (and generally with FlightSQL) for high-throughput and scalable real-time data ingestion. It streams Parquet to storage backends like S3 with DuckLake in realtime as compact, hive partitioned Parquet files. 6 | 7 | BoilStream supports: 8 | 9 | 1. 🚀 **High-performance zero-copy\* data ingestion**: Kafka (avro with schema), HTTPS Arrow (e.g. with Flechette JS from tens of thousands of Browsers concurrently) FlightRPC ([DuckDB Airport community extension](https://duckdb.org/community_extensions/extensions/airport.html) from DuckDB clients), FlightSQL (e.g. ADBC driver, or ADBC FlightSQL JDBC bridge driver) 10 | 2. 🚀 **1st Class Postgres compatible BI interface for real-time (streaming) Analytics** directly 1:1 mapped into DuckDB memory connections. Direct queries work with Power BI! See also [type compliance report](https://boilstream.com/test_report.html) 11 | 3. 🚀 **Local on-disk DuckDB database layer** with high ingestion throughput 12 | 4. 🚀 **Multiple "diskless" Parquet storage backends** like S3, GCP, Azuer, and Filesystem - when DuckDB client FlightRPC `INSERT` returns, **data is guaranteed to be on primary storage** (e.g. Minio or AWS S3). The data pipeline to S3 is completely diskless, so if you don't enable DuckDB local persistence layer, the disk is not used at all. 13 | 5. 🚀 **Creating ingestion topics and materialised realtime views** (derived topics) with special `boilstream.s3` schema - use `CREATE TABLE` and `CREATE TABLE derived_view AS SELECT col1 FROM boilstream.s3.my_topic` for managing topics/views 14 | 6. 🚀 **DuckLake integration:** S3 uploaded files are automatically added to DuckLake 15 | 7. 🚀 **Our novel never-ending DuckDB SQL real-time streaming queries** for processing materialised views very efficiently (see CTAS over `boilstream.s3` schema below) 16 | 8. 🚀 **Monitoring through prometheus compatible interface** along with an example Grafana Dashboard (see [`docker-compose.yml`](docker-compose.yml)) 17 | 9. 🚀 **Enterprise SSO with RBAC/ATAC as well as TLS and improved Postgres authentication** with [paid pro version](https://wwww.boilstream.com/) 18 | 19 | This repository contains free download links and docker compose file for running the optional auxiliary services, like Grafana monitoring and Minio S3 for testing. 20 | 21 | > \*) There is one data copy from kernel to userspace, which happens always unless you bypass kernel or use e.g. Linux XDP sockets to read raw data from the link directly. But then you also need to parse Ethernet and implement IP, TCP, TLS, gRPC, and Flight protocol stacks. Single port/core FlightRPC is already very efficient and reported to support +20GB/s data transfer speeds with single core. In BoilStream, data copying also happens when you convert the incoming Arrow format to Parquet files or to DuckDB on-disk database file - but that's all. The concurrent S3 Uploader and pre-allocated buffer pools ensure that the network copy reads from the Parquet writer output buffers directly. We even rotate the envelope vectors that carry the metadata along with the data references. 22 | 23 | ## No Backups Needed 24 | 25 | No backups needed as it streams your data to S3 with automatic compacted and optimised Parquet conversion. Ingested data schema is validated so you don't get bad data in. The data on S3 is ready for analytics and is Hive Partitioned (DuckLake integration also available). 26 | 27 | Based on topic configuration, data is also persisted onto local disk as DuckDB database files. The realtime Analytics postgres connection reads directly from these DuckDB database files while the data is ingested into them. 28 | 29 | BoilStream supports thousands of concurrent writers and GBs per second data ingestion rates (with single ingestion port), while using efficient stream aggregation from all the writers into per topic Parquet files. High throughput streaming creates S3 multipart upload files where Parquet row groups are uploaded concurrently (S3 multipart parts). The Parquet file is finalised when size/time threshold is reached. 30 | 31 | > If you configure S3 as the primary storage and S3 is down, data ingestion stalls and the `INSERT` statements will hang until S3 becomes available again. Data integrity (security) is number one priority. FlightRPC data is also schema validated both on control plane (matching schema) as well as on data plane (actual data matches and validates with the schema). 32 | 33 | > Secondary storage failures do not affect or stall data ingestion, like if you configure Filesystem as your primary and S3 as secondary. 34 | 35 | > Local DuckDB on-disk database persistence layer can be turned on/off and is independent of configured storage layers. You can also configure no Parquet storage layers and just ingest data onto DuckDB on-disk databases (or e.g. EBS volumes on cloud) 36 | 37 | > 2025-07-26: Currently, there is no DuckDB on-disk database file rotation or old data cleanup, but we will address this in the future release to avoid the common disk full scenario. For now, you can periodically delete old data. 38 | 39 | ## Postgres interface 40 | 41 | **You can run any BI Tool over the postgres interface on the standard port 5432** (configurable). We have tested with Power BI, DBeaver, Metabase, Superset, Grafana, psql, pgbench. 42 | 43 | You can see our type compliance report online at [https://boilstream.com/test_report.html](https://boilstream.com/test_report.html) 44 | 45 | > DuckDB itself does not have "server mode" and does not implement client-server paradigm. With BoilStream you can run DuckDB efficiently as a server too. 46 | 47 | BoilStream supports: 48 | 49 | 1. 🚀 Both text and binary encoded fields with extensive type support (time/date formats, JSON, uuid, List/Map/Struct, etc.) 50 | 2. 🚀 Cursor and transaction management with DuckDB's native streaming queries 51 | 3. 🚀 Comprehensive pg catalog for metadata discovery from BI Tools with postgres SQL syntax 52 | 53 | ## Kafka interface 54 | 55 | BoilStream has Confluent Schema with Avro data format Kafka interface support. Stream in data with Kafka by using the same topics/tables as with FlightRPC/FlightSQL. The same schema table has both arrow and avro schemas. 56 | 57 | ## Real-time SQL Streaming - never-ending SQL queries! 58 | 59 | We use our _innovative never ending continuous stream processing with DuckDB_ 🚀 . This avoids SQL parsing, Arrow Table registration, cleanup and other hassle present (micro) batch processing approaches. 60 | 61 | As the data flows in as Arrow data it goes through DuckDB stream processors that produce data for the derived views. These derived topic processors are initialised once with the specified SQL, but run as long as the data flows (unless you create SQL that finishes on purpose like with LIMIT). These streaming processors only work with DuckDB's physical streaming constructs (e.g. LAG etc.). 62 | 63 | > **For all proper window queries, we are adding support through the on-disk cached DuckDB databases. This way we can provide even hourly "batching" automatically.** But for now, you can already run queries over the Postgres interface if you like. 64 | 65 | ## Start 66 | 67 | ```bash 68 | # Download and start boilstream - if no configuration file is provided, it will generate an example one 69 | # https://www.boilstream.com/binaries/linux-aarch64/boilstream-0.7.18 70 | # https://www.boilstream.com/binaries/linux-x64/boilstream-0.7.18 71 | # https://www.boilstream.com/binaries/darwin-x64/boilstream-0.7.18 72 | curl -L -o boilstream https://www.boilstream.com/binaries/darwin-aarch64/boilstream-0.7.18 73 | chmod +x boilstream 74 | 75 | # SERVER_IP_ADDRESS is used on the Flight interface, use reachable IP address 76 | SERVER_IP_ADDRESS=1.2.3.4 ./boilstream 77 | 78 | # You can also use Docker images: 79 | # boilinginsights/boilstream:x64-linux-0.7.18 or boilinginsights/boilstream:aarch64-linux-0.7.18 80 | docker run -v ./config.yaml:/app/config.yaml \ 81 | -p 5432:5432 \ 82 | -p 50250:50250 \ 83 | -p 50051:50051 \ 84 | -e SERVER_IP_ADDRESS=1.2.3.4 boilinginsights/boilstream:aarch64-linux-0.7.18 85 | ``` 86 | 87 | > _You can use the accompanying docker-compose.yml file to start auxiliary containers for Grafana Dashboard and S3 Minio_ 88 | 89 | Connect through the postgres interface with your tool of choice, like [DBeaver](https://dbeaver.io/download/) that we have been extensively using. 90 | 91 | - See the [workbook.sql](workbook.sql) for full example 92 | - See the [demo_database.sql](demo_database.sql) for extensive type testing example, which we used with Power BI testing 93 | 94 | **The `boilstream.s3.` schema is specific for real-time streaming. Tables created to it become available on the FlightRPC side for ingestion. CTAS tables become materialised views (not writable from FlightRPC ingestion side)** 95 | 96 | ```sql 97 | -- Create topic 98 | CREATE TABLE boilstream.s3.people (name VARCHAR, age INT, tags VARCHAR[]); 99 | -- Derived topics aka **materialised real-time views** 100 | -- With their own S3 Parquet data as well as on-disk DuckDB database views like the main topic 101 | CREATE TABLE boilstream.s3.filtered_adults AS SELECT * FROM boilstream.s3.people WHERE age > 50; 102 | CREATE TABLE boilstream.s3.filtered_b AS SELECT * FROM boilstream.s3.people WHERE name LIKE 'b%'; 103 | CREATE TABLE boilstream.s3.filtered_a AS SELECT * FROM boilstream.s3.people WHERE name LIKE 'a%'; 104 | ``` 105 | 106 | Check existing topics and their metadata: 107 | 108 | ```sql 109 | -- topic metadata 110 | select * from boilstream.topics; 111 | select * from boilstream.topic_schemas; 112 | select * from boilstream.derived_views; 113 | ``` 114 | 115 | `ATTACH` BoilStream topics write end point to itself. 116 | 117 | > BoilStream can attach itself (has actually in-built Airport extension), but also other BoilStream servers or other Airport servers (FlightRPC `do_exchange` method) 118 | 119 | ```sql 120 | LOAD '/tmp/airport.duckdb_extension'; 121 | SELECT extension_name, loaded from duckdb_extensions() where loaded=true; 122 | ATTACH 'boilstream' AS data (TYPE AIRPORT, location 'grpc://localhost:50051/'); 123 | SHOW ALL TABLES; 124 | -- Write into "people" topic 125 | INSERT INTO data.s3.people 126 | SELECT 127 | 'boilstream_' || i::VARCHAR AS name, 128 | (i % 100) + 1 AS age, 129 | ['duckdb', 'ducklake'] AS tags 130 | FROM generate_series(1, 20000) as t(i); 131 | 132 | ``` 133 | 134 | ### With remote DuckDB clients 135 | 136 | Start ingesting data with DuckDB clients. **When DuckDB statement returns, data is guaranteed to be on S3!** 137 | 138 | ```sql 139 | INSTALL airport FROM community; 140 | LOAD airport; 141 | ATTACH 'boilstream' (TYPE AIRPORT, location 'grpc://localhost:50051/'); 142 | -- With pro-version, use TLS: 'grpc+tls://localhost:50051/' 143 | 144 | SHOW ALL TABLES; 145 | 146 | INSERT INTO boilstream.s3.people 147 | SELECT 148 | 'boilstream_' || i::VARCHAR AS name, 149 | (i % 100) + 1 AS age, 150 | ['duckdb', 'ducklake'] AS tags 151 | FROM generate_series(1, 20000) as t(i); 152 | ``` 153 | 154 | > The BoilStream configuration file `storage.backends.flush_interval_ms` (with backend type "s3") configuration option defines the S3 synchronization interval, which also completes the DuckDB INSERT transactions you run with the Airport extension from all the clients. The smaller the flush interval, the faster response times you get, but smaller fragmented Parquet files. You can send millions of rows or just one row and the query completes in these intervals as the storage backend signals all verifiably (S3) stored sequence numbers onto Parquet back to the data ingestion frontend which ensures that all data is successfully stored on S3 before returning success back to Airport clients. 155 | 156 | **Monitor your data with Grafana**: http://localhost:3000 (admin/admin) 157 | 158 | ## 📋 Requirements 159 | 160 | - Docker and Docker Compose 161 | - 8GB+ RAM recommended 162 | - OSX or Linux (Ubuntu 24+) 163 | - arm64 (we can build for OS/Arch on request) 164 | 165 | ## 🎯 Free Tier Limits 166 | 167 | > NOTE: BoilStream can ingest GBs per second, so you may hit the free tier limit quickly. Thus, use the rate limiter configuration on the configuration file. 168 | 169 | - **Data ingestion**: 40 GB per hour (you need to restart if you hit the limit) 170 | - **Max concurrent sessions**: Limited to 10 171 | - **No authentication**: No authentication or access control 172 | - **No TLS**: Runs on plain FlightRPC connection without TLS 173 | 174 | ## 📋 Changelog 175 | 176 | See [CHANGELOG.md](CHANGELOG.md) for version history and release notes. 177 | 178 | ## 🏗️ Architecture 179 | 180 | For complete documentation, visit: **[www.boilstream.com](https://www.boilstream.com)** and **[docs.boilstream.com](https://docs.boilstream.com)** 181 | 182 | BoilStream processes your data through: 183 | 184 | 1. **Flight RPC** - High-performance data ingestion with Apache Arrow and zero-copy implementation 185 | 2. **S3** - Automated Parquet storage with Hive partitioning 186 | 3. **Rate limiting** - Rate limiting support 187 | 4. **BI Tool Integration** - Postgres compatible interface for BI Tool and other integrations 188 | 5. **DuckLake** - Integration with [DuckLake](https://duckdb.org/2025/05/27/ducklake.html) 189 | 190 | Auxiliary services: 191 | 192 | 6. **Prometheus** - Metrics collection 193 | 7. **Grafana** - Real-time monitoring dashboards 194 | 195 | ## 🆙 Upgrading to Paid version 196 | 197 | - **Security**: FlightRPC with TLS, authentication and access control 198 | - **Uncapped**: No throughput limits, max concurrent sessions with single node 10k (configurable) 199 | 200 | ## 🆙 Upgrading to Enterprise version 201 | 202 | - **Multi-node**: Horizontal scaling by just adding more nodes 203 | - **Federated Authentication**: Integration with authentication providers 204 | 205 | Need higher limits or advanced features? Contact us at **[boilstream.com](https://www.boilstream.com)** 206 | -------------------------------------------------------------------------------- /audio-arrow-streamer.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Audio Arrow Streamer 7 | 151 | 410 | 411 | 412 |
413 |

🎤 Audio Arrow Streamer

414 | 415 |
416 |
417 | 418 | 419 |
420 |
421 | 422 | 423 |
424 |
425 | 426 | 427 |
428 |
429 | 430 | 431 |
432 |
433 | 434 |
435 | 436 | 437 |
438 | 439 |
Ready to start
440 | 441 |
442 |
443 |
0
444 |
Batches Sent
445 |
446 |
447 |
0 KB/s
448 |
Data Rate
449 |
450 |
451 |
0s
452 |
Duration
453 |
454 |
455 | 456 |
457 |
458 | 459 | -------------------------------------------------------------------------------- /config/grafana/provisioning/dashboards/ingestion-agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "grafana", 8 | "uid": "-- Grafana --" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "target": { 15 | "limit": 100, 16 | "matchAny": false, 17 | "tags": [], 18 | "type": "dashboard" 19 | }, 20 | "type": "dashboard" 21 | } 22 | ] 23 | }, 24 | "editable": true, 25 | "fiscalYearStartMonth": 0, 26 | "graphTooltip": 0, 27 | "id": 1, 28 | "links": [], 29 | "liveNow": false, 30 | "panels": [ 31 | { 32 | "datasource": { 33 | "type": "prometheus", 34 | "uid": "PBFA97CFB590B2093" 35 | }, 36 | "fieldConfig": { 37 | "defaults": { 38 | "color": { 39 | "mode": "palette-classic" 40 | }, 41 | "custom": { 42 | "axisCenteredZero": false, 43 | "axisColorMode": "text", 44 | "axisLabel": "", 45 | "axisPlacement": "auto", 46 | "barAlignment": 0, 47 | "drawStyle": "line", 48 | "fillOpacity": 10, 49 | "gradientMode": "none", 50 | "hideFrom": { 51 | "legend": false, 52 | "tooltip": false, 53 | "viz": false 54 | }, 55 | "lineInterpolation": "linear", 56 | "lineWidth": 2, 57 | "pointSize": 5, 58 | "scaleDistribution": { 59 | "type": "linear" 60 | }, 61 | "showPoints": "auto", 62 | "spanNulls": false, 63 | "stacking": { 64 | "group": "A", 65 | "mode": "none" 66 | }, 67 | "thresholdsStyle": { 68 | "mode": "off" 69 | } 70 | }, 71 | "mappings": [], 72 | "thresholds": { 73 | "mode": "absolute", 74 | "steps": [ 75 | { 76 | "color": "green", 77 | "value": null 78 | }, 79 | { 80 | "color": "red", 81 | "value": 80 82 | } 83 | ] 84 | }, 85 | "unit": "none", 86 | "min": 0 87 | }, 88 | "overrides": [] 89 | }, 90 | "gridPos": { 91 | "h": 8, 92 | "w": 8, 93 | "x": 0, 94 | "y": 0 95 | }, 96 | "id": 34, 97 | "options": { 98 | "legend": { 99 | "calcs": ["lastNotNull", "mean", "max"], 100 | "displayMode": "list", 101 | "placement": "bottom", 102 | "showLegend": true 103 | }, 104 | "tooltip": { 105 | "mode": "single", 106 | "sort": "none" 107 | } 108 | }, 109 | "pluginVersion": "10.0.0", 110 | "targets": [ 111 | { 112 | "datasource": { 113 | "type": "prometheus", 114 | "uid": "PBFA97CFB590B2093" 115 | }, 116 | "editorMode": "code", 117 | "expr": "pgwire_connections", 118 | "interval": "1s", 119 | "legendFormat": "Active Connections", 120 | "range": true, 121 | "refId": "A" 122 | } 123 | ], 124 | "title": "PostgreSQL Client Connections", 125 | "type": "timeseries" 126 | }, 127 | { 128 | "datasource": { 129 | "type": "prometheus", 130 | "uid": "PBFA97CFB590B2093" 131 | }, 132 | "fieldConfig": { 133 | "defaults": { 134 | "color": { 135 | "mode": "palette-classic" 136 | }, 137 | "custom": { 138 | "axisCenteredZero": false, 139 | "axisColorMode": "text", 140 | "axisLabel": "", 141 | "axisPlacement": "auto", 142 | "barAlignment": 0, 143 | "drawStyle": "line", 144 | "fillOpacity": 10, 145 | "gradientMode": "none", 146 | "hideFrom": { 147 | "legend": false, 148 | "tooltip": false, 149 | "viz": false 150 | }, 151 | "lineInterpolation": "linear", 152 | "lineWidth": 1, 153 | "pointSize": 5, 154 | "scaleDistribution": { 155 | "type": "linear" 156 | }, 157 | "showPoints": "auto", 158 | "spanNulls": false, 159 | "stacking": { 160 | "group": "A", 161 | "mode": "none" 162 | }, 163 | "thresholdsStyle": { 164 | "mode": "off" 165 | } 166 | }, 167 | "mappings": [], 168 | "thresholds": { 169 | "mode": "absolute", 170 | "steps": [ 171 | { 172 | "color": "green", 173 | "value": null 174 | }, 175 | { 176 | "color": "red", 177 | "value": 80 178 | } 179 | ] 180 | }, 181 | "unit": "s" 182 | }, 183 | "overrides": [] 184 | }, 185 | "gridPos": { 186 | "h": 8, 187 | "w": 8, 188 | "x": 8, 189 | "y": 0 190 | }, 191 | "id": 36, 192 | "options": { 193 | "legend": { 194 | "calcs": [ 195 | "mean", 196 | "max" 197 | ], 198 | "displayMode": "table", 199 | "placement": "right", 200 | "showLegend": true 201 | }, 202 | "tooltip": { 203 | "mode": "single", 204 | "sort": "none" 205 | } 206 | }, 207 | "targets": [ 208 | { 209 | "datasource": { 210 | "type": "prometheus", 211 | "uid": "PBFA97CFB590B2093" 212 | }, 213 | "editorMode": "code", 214 | "expr": "(rate(pgwire_query_duration_seconds_sum[15s]) / rate(pgwire_query_duration_seconds_count[15s])) or vector(0)", 215 | "interval": "1s", 216 | "legendFormat": "Avg", 217 | "range": true, 218 | "refId": "A" 219 | }, 220 | { 221 | "datasource": { 222 | "type": "prometheus", 223 | "uid": "PBFA97CFB590B2093" 224 | }, 225 | "editorMode": "code", 226 | "expr": "histogram_quantile(0.50, sum(rate(pgwire_query_duration_seconds_bucket[15s])) by (le)) or vector(0)", 227 | "interval": "1s", 228 | "legendFormat": "Median", 229 | "range": true, 230 | "refId": "B" 231 | }, 232 | { 233 | "datasource": { 234 | "type": "prometheus", 235 | "uid": "PBFA97CFB590B2093" 236 | }, 237 | "editorMode": "code", 238 | "expr": "histogram_quantile(0.95, sum(rate(pgwire_query_duration_seconds_bucket[15s])) by (le)) or vector(0)", 239 | "interval": "1s", 240 | "legendFormat": "P95", 241 | "range": true, 242 | "refId": "C" 243 | }, 244 | { 245 | "datasource": { 246 | "type": "prometheus", 247 | "uid": "PBFA97CFB590B2093" 248 | }, 249 | "editorMode": "code", 250 | "expr": "histogram_quantile(0.99, sum(rate(pgwire_query_duration_seconds_bucket[15s])) by (le)) or vector(0)", 251 | "interval": "1s", 252 | "legendFormat": "P99", 253 | "range": true, 254 | "refId": "D" 255 | } 256 | ], 257 | "title": "PostgreSQL Query Runtime", 258 | "type": "timeseries" 259 | }, 260 | { 261 | "datasource": { 262 | "type": "prometheus", 263 | "uid": "PBFA97CFB590B2093" 264 | }, 265 | "fieldConfig": { 266 | "defaults": { 267 | "color": { 268 | "mode": "palette-classic" 269 | }, 270 | "custom": { 271 | "axisCenteredZero": false, 272 | "axisColorMode": "text", 273 | "axisLabel": "", 274 | "axisPlacement": "auto", 275 | "barAlignment": 0, 276 | "drawStyle": "line", 277 | "fillOpacity": 30, 278 | "gradientMode": "none", 279 | "hideFrom": { 280 | "legend": false, 281 | "tooltip": false, 282 | "viz": false 283 | }, 284 | "lineInterpolation": "linear", 285 | "lineWidth": 2, 286 | "pointSize": 5, 287 | "scaleDistribution": { 288 | "type": "linear" 289 | }, 290 | "showPoints": "auto", 291 | "spanNulls": false, 292 | "stacking": { 293 | "group": "A", 294 | "mode": "none" 295 | }, 296 | "thresholdsStyle": { 297 | "mode": "off" 298 | } 299 | }, 300 | "mappings": [], 301 | "thresholds": { 302 | "mode": "absolute", 303 | "steps": [ 304 | { 305 | "color": "green", 306 | "value": null 307 | }, 308 | { 309 | "color": "red", 310 | "value": 80 311 | } 312 | ] 313 | }, 314 | "unit": "none", 315 | "min": 0 316 | }, 317 | "overrides": [] 318 | }, 319 | "gridPos": { 320 | "h": 8, 321 | "w": 8, 322 | "x": 16, 323 | "y": 0 324 | }, 325 | "id": 38, 326 | "options": { 327 | "legend": { 328 | "calcs": [ 329 | "lastNotNull" 330 | ], 331 | "displayMode": "list", 332 | "placement": "bottom", 333 | "showLegend": true 334 | }, 335 | "tooltip": { 336 | "mode": "multi", 337 | "sort": "desc" 338 | } 339 | }, 340 | "targets": [ 341 | { 342 | "datasource": { 343 | "type": "prometheus", 344 | "uid": "PBFA97CFB590B2093" 345 | }, 346 | "editorMode": "code", 347 | "expr": "rate(pgwire_query_duration_seconds_count[10s]) * 10", 348 | "interval": "1s", 349 | "legendFormat": "Queries/10s", 350 | "range": true, 351 | "refId": "A" 352 | } 353 | ], 354 | "title": "PostgreSQL Active Queries", 355 | "type": "timeseries" 356 | }, 357 | { 358 | "datasource": { 359 | "type": "prometheus", 360 | "uid": "PBFA97CFB590B2093" 361 | }, 362 | "fieldConfig": { 363 | "defaults": { 364 | "color": { 365 | "mode": "palette-classic" 366 | }, 367 | "custom": { 368 | "axisCenteredZero": false, 369 | "axisColorMode": "text", 370 | "axisLabel": "", 371 | "axisPlacement": "auto", 372 | "barAlignment": 0, 373 | "drawStyle": "line", 374 | "fillOpacity": 10, 375 | "gradientMode": "none", 376 | "hideFrom": { 377 | "legend": false, 378 | "tooltip": false, 379 | "viz": false 380 | }, 381 | "lineInterpolation": "linear", 382 | "lineWidth": 2, 383 | "pointSize": 5, 384 | "scaleDistribution": { 385 | "type": "linear" 386 | }, 387 | "showPoints": "auto", 388 | "spanNulls": false, 389 | "stacking": { 390 | "group": "A", 391 | "mode": "none" 392 | }, 393 | "thresholdsStyle": { 394 | "mode": "off" 395 | } 396 | }, 397 | "mappings": [], 398 | "thresholds": { 399 | "mode": "absolute", 400 | "steps": [ 401 | { 402 | "color": "green", 403 | "value": null 404 | }, 405 | { 406 | "color": "red", 407 | "value": 80 408 | } 409 | ] 410 | }, 411 | "unit": "none", 412 | "min": 0 413 | }, 414 | "overrides": [] 415 | }, 416 | "gridPos": { 417 | "h": 8, 418 | "w": 8, 419 | "x": 0, 420 | "y": 8 421 | }, 422 | "id": 40, 423 | "options": { 424 | "legend": { 425 | "calcs": ["lastNotNull", "mean", "max"], 426 | "displayMode": "list", 427 | "placement": "bottom", 428 | "showLegend": true 429 | }, 430 | "tooltip": { 431 | "mode": "single", 432 | "sort": "none" 433 | } 434 | }, 435 | "pluginVersion": "10.0.0", 436 | "targets": [ 437 | { 438 | "datasource": { 439 | "type": "prometheus", 440 | "uid": "PBFA97CFB590B2093" 441 | }, 442 | "editorMode": "code", 443 | "expr": "kafka_connections", 444 | "interval": "1s", 445 | "legendFormat": "Active Connections", 446 | "range": true, 447 | "refId": "A" 448 | } 449 | ], 450 | "title": "Kafka Client Connections", 451 | "type": "timeseries" 452 | }, 453 | { 454 | "datasource": { 455 | "type": "prometheus", 456 | "uid": "PBFA97CFB590B2093" 457 | }, 458 | "fieldConfig": { 459 | "defaults": { 460 | "color": { 461 | "mode": "palette-classic" 462 | }, 463 | "custom": { 464 | "axisCenteredZero": false, 465 | "axisColorMode": "text", 466 | "axisLabel": "", 467 | "axisPlacement": "auto", 468 | "barAlignment": 0, 469 | "drawStyle": "line", 470 | "fillOpacity": 10, 471 | "gradientMode": "none", 472 | "hideFrom": { 473 | "legend": false, 474 | "tooltip": false, 475 | "viz": false 476 | }, 477 | "lineInterpolation": "linear", 478 | "lineWidth": 1, 479 | "pointSize": 5, 480 | "scaleDistribution": { 481 | "type": "linear" 482 | }, 483 | "showPoints": "auto", 484 | "spanNulls": false, 485 | "stacking": { 486 | "group": "A", 487 | "mode": "none" 488 | }, 489 | "thresholdsStyle": { 490 | "mode": "off" 491 | } 492 | }, 493 | "mappings": [], 494 | "thresholds": { 495 | "mode": "absolute", 496 | "steps": [ 497 | { 498 | "color": "green", 499 | "value": null 500 | }, 501 | { 502 | "color": "red", 503 | "value": 80 504 | } 505 | ] 506 | }, 507 | "unit": "reqps" 508 | }, 509 | "overrides": [] 510 | }, 511 | "gridPos": { 512 | "h": 8, 513 | "w": 8, 514 | "x": 8, 515 | "y": 8 516 | }, 517 | "id": 41, 518 | "options": { 519 | "legend": { 520 | "calcs": [ 521 | "mean", 522 | "max" 523 | ], 524 | "displayMode": "table", 525 | "placement": "right", 526 | "showLegend": true 527 | }, 528 | "tooltip": { 529 | "mode": "single", 530 | "sort": "none" 531 | } 532 | }, 533 | "targets": [ 534 | { 535 | "datasource": { 536 | "type": "prometheus", 537 | "uid": "PBFA97CFB590B2093" 538 | }, 539 | "editorMode": "code", 540 | "expr": "rate(kafka_requests_total[10s])", 541 | "interval": "1s", 542 | "legendFormat": "Requests/sec", 543 | "range": true, 544 | "refId": "A" 545 | } 546 | ], 547 | "title": "Kafka Request Rate", 548 | "type": "timeseries" 549 | }, 550 | { 551 | "datasource": { 552 | "type": "prometheus", 553 | "uid": "PBFA97CFB590B2093" 554 | }, 555 | "fieldConfig": { 556 | "defaults": { 557 | "color": { 558 | "mode": "palette-classic" 559 | }, 560 | "custom": { 561 | "axisCenteredZero": false, 562 | "axisColorMode": "text", 563 | "axisLabel": "", 564 | "axisPlacement": "auto", 565 | "barAlignment": 0, 566 | "drawStyle": "line", 567 | "fillOpacity": 30, 568 | "gradientMode": "none", 569 | "hideFrom": { 570 | "legend": false, 571 | "tooltip": false, 572 | "viz": false 573 | }, 574 | "lineInterpolation": "linear", 575 | "lineWidth": 2, 576 | "pointSize": 5, 577 | "scaleDistribution": { 578 | "type": "linear" 579 | }, 580 | "showPoints": "auto", 581 | "spanNulls": false, 582 | "stacking": { 583 | "group": "A", 584 | "mode": "none" 585 | }, 586 | "thresholdsStyle": { 587 | "mode": "off" 588 | } 589 | }, 590 | "mappings": [], 591 | "thresholds": { 592 | "mode": "absolute", 593 | "steps": [ 594 | { 595 | "color": "green", 596 | "value": null 597 | }, 598 | { 599 | "color": "red", 600 | "value": 80 601 | } 602 | ] 603 | }, 604 | "unit": "Bps" 605 | }, 606 | "overrides": [] 607 | }, 608 | "gridPos": { 609 | "h": 8, 610 | "w": 8, 611 | "x": 16, 612 | "y": 8 613 | }, 614 | "id": 42, 615 | "options": { 616 | "legend": { 617 | "calcs": [ 618 | "mean", 619 | "max" 620 | ], 621 | "displayMode": "list", 622 | "placement": "bottom", 623 | "showLegend": true 624 | }, 625 | "tooltip": { 626 | "mode": "single", 627 | "sort": "none" 628 | } 629 | }, 630 | "targets": [ 631 | { 632 | "datasource": { 633 | "type": "prometheus", 634 | "uid": "PBFA97CFB590B2093" 635 | }, 636 | "editorMode": "code", 637 | "expr": "rate(kafka_bytes_received_total[10s])", 638 | "interval": "1s", 639 | "legendFormat": "Bytes/sec", 640 | "range": true, 641 | "refId": "A" 642 | } 643 | ], 644 | "title": "Kafka Data Volume", 645 | "type": "timeseries" 646 | }, 647 | { 648 | "datasource": { 649 | "type": "prometheus", 650 | "uid": "PBFA97CFB590B2093" 651 | }, 652 | "fieldConfig": { 653 | "defaults": { 654 | "color": { 655 | "mode": "palette-classic" 656 | }, 657 | "custom": { 658 | "axisCenteredZero": false, 659 | "axisColorMode": "text", 660 | "axisLabel": "", 661 | "axisPlacement": "auto", 662 | "barAlignment": 0, 663 | "drawStyle": "line", 664 | "fillOpacity": 40, 665 | "gradientMode": "none", 666 | "hideFrom": { 667 | "legend": false, 668 | "tooltip": false, 669 | "viz": false 670 | }, 671 | "lineInterpolation": "linear", 672 | "lineWidth": 1, 673 | "pointSize": 5, 674 | "scaleDistribution": { 675 | "type": "linear" 676 | }, 677 | "showPoints": "auto", 678 | "spanNulls": false, 679 | "stacking": { 680 | "group": "A", 681 | "mode": "normal" 682 | }, 683 | "thresholdsStyle": { 684 | "mode": "off" 685 | } 686 | }, 687 | "mappings": [], 688 | "thresholds": { 689 | "mode": "absolute", 690 | "steps": [ 691 | { 692 | "color": "green", 693 | "value": null 694 | }, 695 | { 696 | "color": "red", 697 | "value": 80 698 | } 699 | ] 700 | } 701 | }, 702 | "overrides": [] 703 | }, 704 | "gridPos": { 705 | "h": 8, 706 | "w": 8, 707 | "x": 0, 708 | "y": 16 709 | }, 710 | "id": 2, 711 | "options": { 712 | "legend": { 713 | "calcs": [ 714 | "mean", 715 | "max" 716 | ], 717 | "displayMode": "table", 718 | "placement": "right", 719 | "showLegend": true 720 | }, 721 | "tooltip": { 722 | "mode": "single", 723 | "sort": "none" 724 | } 725 | }, 726 | "targets": [ 727 | { 728 | "datasource": { 729 | "type": "prometheus", 730 | "uid": "PBFA97CFB590B2093" 731 | }, 732 | "editorMode": "code", 733 | "expr": "rate(flight_requests_total[15s])", 734 | "interval": "1s", 735 | "legendFormat": "Requests/sec", 736 | "range": true, 737 | "refId": "A" 738 | } 739 | ], 740 | "title": "Flight Request Rate", 741 | "type": "timeseries" 742 | }, 743 | { 744 | "datasource": { 745 | "type": "prometheus", 746 | "uid": "PBFA97CFB590B2093" 747 | }, 748 | "fieldConfig": { 749 | "defaults": { 750 | "color": { 751 | "mode": "palette-classic" 752 | }, 753 | "custom": { 754 | "axisCenteredZero": false, 755 | "axisColorMode": "text", 756 | "axisLabel": "", 757 | "axisPlacement": "auto", 758 | "barAlignment": 0, 759 | "drawStyle": "line", 760 | "fillOpacity": 0, 761 | "gradientMode": "none", 762 | "hideFrom": { 763 | "legend": false, 764 | "tooltip": false, 765 | "viz": false 766 | }, 767 | "lineInterpolation": "linear", 768 | "lineWidth": 1, 769 | "pointSize": 5, 770 | "scaleDistribution": { 771 | "type": "linear" 772 | }, 773 | "showPoints": "auto", 774 | "spanNulls": false, 775 | "stacking": { 776 | "group": "A", 777 | "mode": "none" 778 | }, 779 | "thresholdsStyle": { 780 | "mode": "off" 781 | } 782 | }, 783 | "mappings": [], 784 | "thresholds": { 785 | "mode": "absolute", 786 | "steps": [ 787 | { 788 | "color": "green", 789 | "value": null 790 | }, 791 | { 792 | "color": "red", 793 | "value": 80 794 | } 795 | ] 796 | }, 797 | "unit": "s" 798 | }, 799 | "overrides": [] 800 | }, 801 | "gridPos": { 802 | "h": 8, 803 | "w": 8, 804 | "x": 8, 805 | "y": 16 806 | }, 807 | "id": 4, 808 | "options": { 809 | "legend": { 810 | "calcs": [], 811 | "displayMode": "list", 812 | "placement": "bottom", 813 | "showLegend": true 814 | }, 815 | "tooltip": { 816 | "mode": "single", 817 | "sort": "none" 818 | } 819 | }, 820 | "targets": [ 821 | { 822 | "datasource": { 823 | "type": "prometheus", 824 | "uid": "PBFA97CFB590B2093" 825 | }, 826 | "editorMode": "code", 827 | "expr": "(rate(flight_request_duration_seconds_sum[15s]) / rate(flight_request_duration_seconds_count[15s])) or vector(0)", 828 | "interval": "1s", 829 | "legendFormat": "Avg", 830 | "range": true, 831 | "refId": "A" 832 | }, 833 | { 834 | "datasource": { 835 | "type": "prometheus", 836 | "uid": "PBFA97CFB590B2093" 837 | }, 838 | "editorMode": "code", 839 | "expr": "histogram_quantile(0.95, sum(rate(flight_request_duration_seconds_bucket[15s])) by (le)) or vector(0)", 840 | "interval": "1s", 841 | "legendFormat": "P95", 842 | "range": true, 843 | "refId": "B" 844 | }, 845 | { 846 | "datasource": { 847 | "type": "prometheus", 848 | "uid": "PBFA97CFB590B2093" 849 | }, 850 | "editorMode": "code", 851 | "expr": "histogram_quantile(0.99, sum(rate(flight_request_duration_seconds_bucket[15s])) by (le)) or vector(0)", 852 | "interval": "1s", 853 | "legendFormat": "P99", 854 | "range": true, 855 | "refId": "C" 856 | }, 857 | { 858 | "datasource": { 859 | "type": "prometheus", 860 | "uid": "PBFA97CFB590B2093" 861 | }, 862 | "editorMode": "code", 863 | "expr": "histogram_quantile(0.50, sum(rate(flight_request_duration_seconds_bucket[15s])) by (le)) or vector(0)", 864 | "interval": "1s", 865 | "legendFormat": "Median", 866 | "range": true, 867 | "refId": "D" 868 | } 869 | ], 870 | "title": "Flight Request Duration", 871 | "type": "timeseries" 872 | }, 873 | { 874 | "datasource": { 875 | "type": "prometheus", 876 | "uid": "PBFA97CFB590B2093" 877 | }, 878 | "fieldConfig": { 879 | "defaults": { 880 | "color": { 881 | "mode": "palette-classic" 882 | }, 883 | "custom": { 884 | "axisCenteredZero": false, 885 | "axisColorMode": "text", 886 | "axisLabel": "", 887 | "axisPlacement": "auto", 888 | "barAlignment": 0, 889 | "drawStyle": "line", 890 | "fillOpacity": 0, 891 | "gradientMode": "none", 892 | "hideFrom": { 893 | "legend": false, 894 | "tooltip": false, 895 | "viz": false 896 | }, 897 | "lineInterpolation": "linear", 898 | "lineWidth": 1, 899 | "pointSize": 5, 900 | "scaleDistribution": { 901 | "type": "linear" 902 | }, 903 | "showPoints": "auto", 904 | "spanNulls": false, 905 | "stacking": { 906 | "group": "A", 907 | "mode": "none" 908 | }, 909 | "thresholdsStyle": { 910 | "mode": "off" 911 | } 912 | }, 913 | "mappings": [], 914 | "thresholds": { 915 | "mode": "absolute", 916 | "steps": [ 917 | { 918 | "color": "green", 919 | "value": null 920 | }, 921 | { 922 | "color": "red", 923 | "value": 80 924 | } 925 | ] 926 | }, 927 | "unit": "bytes" 928 | }, 929 | "overrides": [] 930 | }, 931 | "gridPos": { 932 | "h": 8, 933 | "w": 8, 934 | "x": 16, 935 | "y": 16 936 | }, 937 | "id": 10, 938 | "options": { 939 | "legend": { 940 | "calcs": [], 941 | "displayMode": "list", 942 | "placement": "bottom", 943 | "showLegend": true 944 | }, 945 | "tooltip": { 946 | "mode": "single", 947 | "sort": "none" 948 | } 949 | }, 950 | "targets": [ 951 | { 952 | "datasource": { 953 | "type": "prometheus", 954 | "uid": "PBFA97CFB590B2093" 955 | }, 956 | "editorMode": "code", 957 | "expr": "pipeline_bytes_processed_total - (pipeline_bytes_processed_total offset 1s)", 958 | "interval": "1s", 959 | "legendFormat": "Arrow IPC (Bytes/sec)", 960 | "range": true, 961 | "refId": "A" 962 | }, 963 | { 964 | "datasource": { 965 | "type": "prometheus", 966 | "uid": "PBFA97CFB590B2093" 967 | }, 968 | "editorMode": "code", 969 | "expr": "s3_throughput_mbs * 1024 * 1024", 970 | "interval": "1s", 971 | "legendFormat": "S3 Uploads (Bytes/sec)", 972 | "range": true, 973 | "refId": "B" 974 | } 975 | ], 976 | "title": "Pipeline Throughput (Arrow IPC, S3 Uploads)", 977 | "type": "timeseries" 978 | }, 979 | { 980 | "datasource": { 981 | "type": "prometheus", 982 | "uid": "PBFA97CFB590B2093" 983 | }, 984 | "fieldConfig": { 985 | "defaults": { 986 | "color": { 987 | "mode": "palette-classic" 988 | }, 989 | "custom": { 990 | "axisCenteredZero": false, 991 | "axisColorMode": "text", 992 | "axisLabel": "", 993 | "axisPlacement": "auto", 994 | "barAlignment": 0, 995 | "drawStyle": "line", 996 | "fillOpacity": 0, 997 | "gradientMode": "none", 998 | "hideFrom": { 999 | "legend": false, 1000 | "tooltip": false, 1001 | "viz": false 1002 | }, 1003 | "lineInterpolation": "linear", 1004 | "lineWidth": 1, 1005 | "pointSize": 5, 1006 | "scaleDistribution": { 1007 | "type": "linear" 1008 | }, 1009 | "showPoints": "auto", 1010 | "spanNulls": false, 1011 | "stacking": { 1012 | "group": "A", 1013 | "mode": "none" 1014 | }, 1015 | "thresholdsStyle": { 1016 | "mode": "off" 1017 | } 1018 | }, 1019 | "mappings": [], 1020 | "thresholds": { 1021 | "mode": "absolute", 1022 | "steps": [ 1023 | { 1024 | "color": "green", 1025 | "value": null 1026 | }, 1027 | { 1028 | "color": "red", 1029 | "value": 80 1030 | } 1031 | ] 1032 | }, 1033 | "unit": "percent" 1034 | }, 1035 | "overrides": [] 1036 | }, 1037 | "gridPos": { 1038 | "h": 8, 1039 | "w": 8, 1040 | "x": 8, 1041 | "y": 24 1042 | }, 1043 | "id": 6, 1044 | "options": { 1045 | "legend": { 1046 | "calcs": [], 1047 | "displayMode": "list", 1048 | "placement": "bottom", 1049 | "showLegend": true 1050 | }, 1051 | "tooltip": { 1052 | "mode": "single", 1053 | "sort": "none" 1054 | } 1055 | }, 1056 | "targets": [ 1057 | { 1058 | "datasource": { 1059 | "type": "prometheus", 1060 | "uid": "PBFA97CFB590B2093" 1061 | }, 1062 | "editorMode": "code", 1063 | "expr": "window_queue_utilization", 1064 | "interval": "1s", 1065 | "legendFormat": "Queue Utilization", 1066 | "range": true, 1067 | "refId": "A" 1068 | } 1069 | ], 1070 | "title": "Queue Utilization", 1071 | "type": "timeseries" 1072 | }, 1073 | { 1074 | "datasource": { 1075 | "type": "prometheus", 1076 | "uid": "PBFA97CFB590B2093" 1077 | }, 1078 | "fieldConfig": { 1079 | "defaults": { 1080 | "color": { 1081 | "mode": "palette-classic" 1082 | }, 1083 | "custom": { 1084 | "axisCenteredZero": false, 1085 | "axisColorMode": "text", 1086 | "axisLabel": "", 1087 | "axisPlacement": "auto", 1088 | "barAlignment": 0, 1089 | "drawStyle": "line", 1090 | "fillOpacity": 0, 1091 | "gradientMode": "none", 1092 | "hideFrom": { 1093 | "legend": false, 1094 | "tooltip": false, 1095 | "viz": false 1096 | }, 1097 | "lineInterpolation": "linear", 1098 | "lineWidth": 1, 1099 | "pointSize": 5, 1100 | "scaleDistribution": { 1101 | "type": "linear" 1102 | }, 1103 | "showPoints": "auto", 1104 | "spanNulls": false, 1105 | "stacking": { 1106 | "group": "A", 1107 | "mode": "none" 1108 | }, 1109 | "thresholdsStyle": { 1110 | "mode": "off" 1111 | } 1112 | }, 1113 | "mappings": [], 1114 | "thresholds": { 1115 | "mode": "absolute", 1116 | "steps": [ 1117 | { 1118 | "color": "green", 1119 | "value": null 1120 | }, 1121 | { 1122 | "color": "red", 1123 | "value": 80 1124 | } 1125 | ] 1126 | }, 1127 | "unit": "none" 1128 | }, 1129 | "overrides": [] 1130 | }, 1131 | "gridPos": { 1132 | "h": 8, 1133 | "w": 8, 1134 | "x": 16, 1135 | "y": 24 1136 | }, 1137 | "id": 8, 1138 | "options": { 1139 | "legend": { 1140 | "calcs": [], 1141 | "displayMode": "list", 1142 | "placement": "bottom", 1143 | "showLegend": true 1144 | }, 1145 | "tooltip": { 1146 | "mode": "single", 1147 | "sort": "none" 1148 | } 1149 | }, 1150 | "targets": [ 1151 | { 1152 | "datasource": { 1153 | "type": "prometheus", 1154 | "uid": "PBFA97CFB590B2093" 1155 | }, 1156 | "editorMode": "code", 1157 | "expr": "window_backpressure", 1158 | "interval": "1s", 1159 | "legendFormat": "Backpressure", 1160 | "range": true, 1161 | "refId": "A" 1162 | } 1163 | ], 1164 | "title": "Backpressure Levels", 1165 | "type": "timeseries" 1166 | }, 1167 | { 1168 | "datasource": { 1169 | "type": "prometheus", 1170 | "uid": "PBFA97CFB590B2093" 1171 | }, 1172 | "fieldConfig": { 1173 | "defaults": { 1174 | "color": { 1175 | "mode": "palette-classic" 1176 | }, 1177 | "custom": { 1178 | "axisCenteredZero": false, 1179 | "axisColorMode": "text", 1180 | "axisLabel": "", 1181 | "axisPlacement": "auto", 1182 | "barAlignment": 0, 1183 | "drawStyle": "line", 1184 | "fillOpacity": 0, 1185 | "gradientMode": "none", 1186 | "hideFrom": { 1187 | "legend": false, 1188 | "tooltip": false, 1189 | "viz": false 1190 | }, 1191 | "lineInterpolation": "linear", 1192 | "lineWidth": 1, 1193 | "pointSize": 5, 1194 | "scaleDistribution": { 1195 | "type": "linear" 1196 | }, 1197 | "showPoints": "auto", 1198 | "spanNulls": false, 1199 | "stacking": { 1200 | "group": "A", 1201 | "mode": "none" 1202 | }, 1203 | "thresholdsStyle": { 1204 | "mode": "off" 1205 | } 1206 | }, 1207 | "mappings": [], 1208 | "thresholds": { 1209 | "mode": "absolute", 1210 | "steps": [ 1211 | { 1212 | "color": "green", 1213 | "value": null 1214 | }, 1215 | { 1216 | "color": "red", 1217 | "value": 80 1218 | } 1219 | ] 1220 | }, 1221 | "unit": "bytes" 1222 | }, 1223 | "overrides": [] 1224 | }, 1225 | "gridPos": { 1226 | "h": 8, 1227 | "w": 8, 1228 | "x": 16, 1229 | "y": 32 1230 | }, 1231 | "id": 12, 1232 | "options": { 1233 | "legend": { 1234 | "calcs": [], 1235 | "displayMode": "list", 1236 | "placement": "bottom", 1237 | "showLegend": true 1238 | }, 1239 | "tooltip": { 1240 | "mode": "single", 1241 | "sort": "none" 1242 | } 1243 | }, 1244 | "targets": [ 1245 | { 1246 | "datasource": { 1247 | "type": "prometheus", 1248 | "uid": "PBFA97CFB590B2093" 1249 | }, 1250 | "editorMode": "code", 1251 | "expr": "memory_current_bytes", 1252 | "interval": "1s", 1253 | "legendFormat": "Current RSS", 1254 | "range": true, 1255 | "refId": "A" 1256 | }, 1257 | { 1258 | "datasource": { 1259 | "type": "prometheus", 1260 | "uid": "PBFA97CFB590B2093" 1261 | }, 1262 | "editorMode": "code", 1263 | "expr": "memory_heap_allocated_bytes", 1264 | "interval": "1s", 1265 | "legendFormat": "Heap Allocated", 1266 | "range": true, 1267 | "refId": "B" 1268 | }, 1269 | { 1270 | "datasource": { 1271 | "type": "prometheus", 1272 | "uid": "PBFA97CFB590B2093" 1273 | }, 1274 | "editorMode": "code", 1275 | "expr": "topic_queue_memory_bytes", 1276 | "interval": "1s", 1277 | "legendFormat": "Topic Queue Memory", 1278 | "range": true, 1279 | "refId": "C" 1280 | }, 1281 | { 1282 | "datasource": { 1283 | "type": "prometheus", 1284 | "uid": "PBFA97CFB590B2093" 1285 | }, 1286 | "editorMode": "code", 1287 | "expr": "memory_arrow_bytes", 1288 | "interval": "1s", 1289 | "legendFormat": "Arrow RecordBatches", 1290 | "range": true, 1291 | "refId": "D" 1292 | } 1293 | ], 1294 | "title": "Memory Usage", 1295 | "type": "timeseries" 1296 | }, 1297 | { 1298 | "datasource": { 1299 | "type": "prometheus", 1300 | "uid": "PBFA97CFB590B2093" 1301 | }, 1302 | "fieldConfig": { 1303 | "defaults": { 1304 | "color": { 1305 | "mode": "palette-classic" 1306 | }, 1307 | "custom": { 1308 | "axisCenteredZero": false, 1309 | "axisColorMode": "text", 1310 | "axisLabel": "", 1311 | "axisPlacement": "auto", 1312 | "barAlignment": 0, 1313 | "drawStyle": "line", 1314 | "fillOpacity": 0, 1315 | "gradientMode": "none", 1316 | "hideFrom": { 1317 | "legend": false, 1318 | "tooltip": false, 1319 | "viz": false 1320 | }, 1321 | "lineInterpolation": "linear", 1322 | "lineWidth": 1, 1323 | "pointSize": 5, 1324 | "scaleDistribution": { 1325 | "type": "linear" 1326 | }, 1327 | "showPoints": "auto", 1328 | "spanNulls": false, 1329 | "stacking": { 1330 | "group": "A", 1331 | "mode": "none" 1332 | }, 1333 | "thresholdsStyle": { 1334 | "mode": "off" 1335 | } 1336 | }, 1337 | "mappings": [], 1338 | "thresholds": { 1339 | "mode": "absolute", 1340 | "steps": [ 1341 | { 1342 | "color": "green", 1343 | "value": null 1344 | }, 1345 | { 1346 | "color": "red", 1347 | "value": 80 1348 | } 1349 | ] 1350 | } 1351 | }, 1352 | "overrides": [] 1353 | }, 1354 | "gridPos": { 1355 | "h": 8, 1356 | "w": 8, 1357 | "x": 0, 1358 | "y": 40 1359 | }, 1360 | "id": 16, 1361 | "options": { 1362 | "legend": { 1363 | "calcs": [], 1364 | "displayMode": "list", 1365 | "placement": "bottom", 1366 | "showLegend": true 1367 | }, 1368 | "tooltip": { 1369 | "mode": "single", 1370 | "sort": "none" 1371 | } 1372 | }, 1373 | "targets": [ 1374 | { 1375 | "datasource": { 1376 | "type": "prometheus", 1377 | "uid": "PBFA97CFB590B2093" 1378 | }, 1379 | "editorMode": "code", 1380 | "expr": "rate(rate_limit_exceeded_total[15s])", 1381 | "interval": "1s", 1382 | "legendFormat": "Rejected", 1383 | "range": true, 1384 | "refId": "A" 1385 | }, 1386 | { 1387 | "datasource": { 1388 | "type": "prometheus", 1389 | "uid": "PBFA97CFB590B2093" 1390 | }, 1391 | "editorMode": "code", 1392 | "expr": "rate(rate_limit_accepted_total[15s])", 1393 | "interval": "1s", 1394 | "legendFormat": "Accepted", 1395 | "range": true, 1396 | "refId": "B" 1397 | } 1398 | ], 1399 | "title": "Rate Limiter Metrics", 1400 | "type": "timeseries" 1401 | }, 1402 | { 1403 | "datasource": { 1404 | "type": "prometheus", 1405 | "uid": "PBFA97CFB590B2093" 1406 | }, 1407 | "fieldConfig": { 1408 | "defaults": { 1409 | "color": { 1410 | "mode": "palette-classic" 1411 | }, 1412 | "custom": { 1413 | "axisCenteredZero": false, 1414 | "axisColorMode": "text", 1415 | "axisLabel": "", 1416 | "axisPlacement": "auto", 1417 | "barAlignment": 0, 1418 | "drawStyle": "line", 1419 | "fillOpacity": 0, 1420 | "gradientMode": "none", 1421 | "hideFrom": { 1422 | "legend": false, 1423 | "tooltip": false, 1424 | "viz": false 1425 | }, 1426 | "lineInterpolation": "linear", 1427 | "lineWidth": 1, 1428 | "pointSize": 5, 1429 | "scaleDistribution": { 1430 | "type": "linear" 1431 | }, 1432 | "showPoints": "auto", 1433 | "spanNulls": false, 1434 | "stacking": { 1435 | "group": "A", 1436 | "mode": "none" 1437 | }, 1438 | "thresholdsStyle": { 1439 | "mode": "off" 1440 | } 1441 | }, 1442 | "mappings": [], 1443 | "thresholds": { 1444 | "mode": "absolute", 1445 | "steps": [ 1446 | { 1447 | "color": "green", 1448 | "value": null 1449 | }, 1450 | { 1451 | "color": "red", 1452 | "value": 80 1453 | } 1454 | ] 1455 | } 1456 | }, 1457 | "overrides": [] 1458 | }, 1459 | "gridPos": { 1460 | "h": 8, 1461 | "w": 8, 1462 | "x": 8, 1463 | "y": 40 1464 | }, 1465 | "id": 14, 1466 | "options": { 1467 | "legend": { 1468 | "calcs": [], 1469 | "displayMode": "list", 1470 | "placement": "bottom", 1471 | "showLegend": true 1472 | }, 1473 | "tooltip": { 1474 | "mode": "single", 1475 | "sort": "none" 1476 | } 1477 | }, 1478 | "targets": [ 1479 | { 1480 | "datasource": { 1481 | "type": "prometheus", 1482 | "uid": "PBFA97CFB590B2093" 1483 | }, 1484 | "editorMode": "code", 1485 | "expr": "rate(window_queue_full_total[15s])", 1486 | "interval": "1s", 1487 | "legendFormat": "Low Latency Full", 1488 | "range": true, 1489 | "refId": "A" 1490 | }, 1491 | { 1492 | "datasource": { 1493 | "type": "prometheus", 1494 | "uid": "PBFA97CFB590B2093" 1495 | }, 1496 | "editorMode": "code", 1497 | "expr": "rate(backpressure_throttles_total[15s])", 1498 | "interval": "1s", 1499 | "legendFormat": "Throttled Requests", 1500 | "range": true, 1501 | "refId": "C" 1502 | } 1503 | ], 1504 | "title": "Queue Rejections", 1505 | "type": "timeseries" 1506 | } 1507 | ], 1508 | "refresh": "1s", 1509 | "schemaVersion": 38, 1510 | "style": "dark", 1511 | "tags": [], 1512 | "templating": { 1513 | "list": [] 1514 | }, 1515 | "time": { 1516 | "from": "now-15m", 1517 | "to": "now" 1518 | }, 1519 | "timepicker": { 1520 | "refresh_intervals": [ 1521 | "1s", 1522 | "5s", 1523 | "10s", 1524 | "30s", 1525 | "1m", 1526 | "5m", 1527 | "15m", 1528 | "30m", 1529 | "1h", 1530 | "2h", 1531 | "1d" 1532 | ] 1533 | }, 1534 | "timezone": "", 1535 | "title": "BoilStream Dashboard", 1536 | "uid": "b34a9d46-asd2-41f9-9c82-98af4b502a9d", 1537 | "version": 1, 1538 | "weekStart": "" 1539 | } --------------------------------------------------------------------------------