├── .gitignore
├── LICENSE
├── README.md
├── antennas-kafka
├── .env.example
├── .gitignore
├── Architecture.png
├── README.md
├── backend
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package.json
│ ├── src
│ │ ├── MaterializeClient
│ │ │ ├── SubscribeStream
│ │ │ │ └── index.ts
│ │ │ ├── TransformStream
│ │ │ │ └── index.ts
│ │ │ ├── WriteStream
│ │ │ │ └── index.ts
│ │ │ └── index.ts
│ │ └── app.ts
│ └── tsconfig.json
├── compose.yaml
├── helper
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ ├── app.ts
│ │ └── data.ts
│ └── tsconfig.json
└── microservice
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ └── app.ts
│ └── tsconfig.json
├── antennas-postgres
├── .env.example
├── .gitignore
├── Architecture.png
├── README.md
├── backend
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ ├── MaterializeClient
│ │ │ ├── SubscribeStream
│ │ │ │ └── index.ts
│ │ │ ├── TransformStream
│ │ │ │ └── index.ts
│ │ │ ├── WriteStream
│ │ │ │ └── index.ts
│ │ │ └── index.ts
│ │ └── app.ts
│ └── tsconfig.json
├── compose.yaml
├── frontend
│ ├── .dockerignore
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── craco.config.js
│ ├── package-lock.json
│ ├── package.json
│ ├── public
│ │ ├── favicon.ico
│ │ ├── index.html
│ │ ├── logo192.png
│ │ ├── logo512.png
│ │ ├── manifest.json
│ │ └── robots.txt
│ ├── src
│ │ ├── App.test.tsx
│ │ ├── App.tsx
│ │ ├── ColorModeSwitcher.tsx
│ │ ├── Logo.tsx
│ │ ├── components
│ │ │ └── AntennasMap
│ │ │ │ ├── ButtonSelection
│ │ │ │ └── index.js
│ │ │ │ ├── DotClone
│ │ │ │ └── index.js
│ │ │ │ └── index.tsx
│ │ ├── index.tsx
│ │ ├── link.ts
│ │ ├── logo.svg
│ │ ├── react-app-env.d.ts
│ │ ├── reportWebVitals.ts
│ │ ├── serviceWorker.ts
│ │ ├── setupTests.ts
│ │ ├── test-utils.tsx
│ │ └── theme.ts
│ ├── tsconfig.json
│ ├── webpack.config.js
│ └── yarn.lock
├── helper
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ └── app.ts
│ └── tsconfig.json
├── microservice
│ ├── .dockerignore
│ ├── .eslintrc.json
│ ├── Dockerfile
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ └── app.ts
│ └── tsconfig.json
└── postgres
│ ├── Dockerfile
│ ├── create.sh
│ ├── create.sql
│ ├── rollback.sql
│ ├── seed.sh
│ └── seed.sql
├── connection-examples
├── .gitignore
├── CONTRIBUTING.md
├── README.md
├── deno
│ ├── README.md
│ ├── connection.ts
│ ├── example.ts
│ ├── insert.ts
│ ├── query.ts
│ ├── source.ts
│ ├── state.ts
│ ├── subscribe.ts
│ └── view.ts
├── fastapi
│ └── README.md
├── go
│ ├── README.md
│ ├── connection.go
│ ├── go.mod
│ ├── go.sum
│ ├── insert.go
│ ├── query.go
│ ├── source.go
│ ├── state.go
│ ├── subscribe.go
│ └── view.go
├── java
│ ├── README.md
│ ├── connection.java
│ ├── insert.java
│ ├── query.java
│ ├── source.java
│ ├── subscribe.java
│ └── view.java
├── lua
│ ├── README.md
│ ├── connection.lua
│ ├── insert.lua
│ ├── query.lua
│ ├── source.lua
│ ├── state.lua
│ ├── subscribe.lua
│ ├── utils.lua
│ └── views.lua
├── nodejs
│ ├── README.md
│ ├── connection.js
│ ├── insert.js
│ ├── package.json
│ ├── query.js
│ ├── source.js
│ ├── state.js
│ ├── subscribe.js
│ └── view.js
├── php
│ ├── README.md
│ ├── connection.php
│ ├── insert.php
│ ├── query.php
│ ├── source.php
│ ├── state.php
│ ├── subscribe.php
│ └── views.php
├── python
│ ├── README.md
│ ├── connection.py
│ ├── insert.py
│ ├── pg8000
│ │ ├── README.md
│ │ ├── connection.py
│ │ ├── insert.py
│ │ ├── query.py
│ │ ├── source.py
│ │ ├── state.py
│ │ ├── subscribe.py
│ │ └── view.py
│ ├── query.py
│ ├── source.py
│ ├── state.py
│ ├── subscribe-psycopg3.py
│ ├── subscribe.py
│ └── view.py
├── ruby
│ ├── README.md
│ ├── connection.rb
│ ├── insert.rb
│ ├── query.rb
│ ├── source.rb
│ ├── state.rb
│ ├── subscribe.rb
│ └── view.rb
├── rust
│ ├── Cargo.toml
│ ├── README.md
│ └── src
│ │ ├── connection.rs
│ │ ├── insert.rs
│ │ ├── main.rs
│ │ ├── query.rs
│ │ ├── source.rs
│ │ ├── subscribe.rs
│ │ ├── table.rs
│ │ └── view.rs
├── scripts
│ └── init.sh
└── typescript
│ ├── README.md
│ ├── package.json
│ ├── src
│ ├── connection.ts
│ ├── insert.ts
│ ├── query.ts
│ ├── source.ts
│ ├── state.ts
│ ├── subscribe.ts
│ └── view.ts
│ └── tsconfig.json
├── dbt-get-started
├── .gitignore
├── .sqlfluff
├── .sqlfluffignore
├── README.md
├── dbt_project.yml
├── models
│ ├── .DS_Store
│ ├── ecommerce.yml
│ ├── marts
│ │ └── item_summary_5min.sql
│ ├── sources
│ │ ├── items.sql
│ │ └── purchases.sql
│ └── staging
│ │ ├── item_purchases.sql
│ │ └── item_summary.sql
└── profiles.yml
├── dbt-jaffle-shop
└── README.md
├── ecommerce-redpanda
├── .env.example
├── README.md
├── compose.yaml
├── connect
│ └── Dockerfile
├── demo.png
└── mysql
│ └── mysql_dbz.sh
├── ecommerce
├── .env.example
├── .gitignore
├── README.md
├── compose.yaml
├── demo.png
├── loadgen
│ ├── Dockerfile
│ ├── clear_purchases.py
│ ├── generate_load.py
│ └── requirements.txt
└── mysql
│ ├── mysql.cnf
│ ├── mysql_bootstrap.sql
│ └── mysql_dbz.sh
├── integrations
├── datadog
│ ├── README.md
│ ├── config.yaml
│ ├── dashboard.json
│ ├── datadog
│ │ ├── Dockerfile
│ │ └── conf.d
│ │ │ └── openmetrics.yaml
│ └── docker-compose.yaml
├── grafana
│ ├── cloud
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── agent.yaml
│ │ ├── config.yml.example
│ │ └── docker-compose.yaml
│ └── local
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── config.yml.example
│ │ ├── docker-compose.yaml
│ │ ├── misc
│ │ ├── dashboard.yaml
│ │ ├── dashboards
│ │ │ └── dashboard.json
│ │ └── datasources
│ │ │ └── prometheus.yml
│ │ └── prometheus.yml
└── terraform
│ ├── .gitignore
│ ├── README.md
│ ├── ec2-ssh-bastion
│ ├── .gitignore
│ ├── README.md
│ └── main.tf
│ ├── hashicorp-vault
│ ├── README.md
│ └── main.tf
│ ├── msk-privatelink
│ ├── README.md
│ └── main.tf
│ ├── secret-stores
│ └── README.md
│ └── sentinel-policies
│ ├── .gitignore
│ ├── README.md
│ ├── enforce-cluster-size.sentinel
│ ├── main.tf
│ └── restrict_materialize_cluster_size.rego
└── streamlit-subscribe
├── .env.example
├── README.md
├── requirements.txt
├── sensors.json
└── subscribe.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .mypy_cache
2 | __pycache__
3 | .env
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Materialize Demos
2 |
3 | [](https://materialize.com/s/chat)
4 |
5 | Materialize is the real-time data integration platform that enables you to use SQL to transform, deliver, and act on fast changing data.
6 |
7 | This repo is a collection of sample code that walks you through using Materialize for different use cases, and with different stacks. All demos assume that you have [signed up for a Materialize account](https://materialize.com/register/).
8 |
9 | ## Use Cases
10 |
11 |
12 |
13 |
14 | Demo |
15 | Stack |
16 | Description |
17 |
18 |
19 |
20 |
21 | antennas-kafka |
22 | Node.js, GraphQL, Kafka |
23 | Tracking key performance indicators for infrastructure monitoring |
24 |
25 |
26 | antennas-postgres |
27 | Node.js, GraphQL, Postgres |
28 |
29 |
30 | ecommerce |
31 | MySQL, Debezium, Kafka, Metabase |
32 | Building a streaming ETL pipeline for e-commerce analytics |
33 |
34 |
35 | ecommerce-redpanda |
36 | MySQL, Debezium, Redpanda, Metabase |
37 |
38 |
39 |
40 |
41 | ## Ecosystem
42 |
43 | ### dbt
44 |
45 |
66 |
67 | ## Integration Examples
68 |
69 | This is a collection of reference integration for various operational tools.
70 |
71 | - [datadog][./integrations/datadog]
72 | - [grafana][./integrations/grafana]
73 | - [terraform][./integrations/terraform]
74 |
75 |
76 | ## Connection Examples
77 |
78 | This is a collection of reference examples for common language-specific PostgreSQL drivers and PostgreSQL-compatible ORMs that have been tested with Materialize.
79 |
80 | - [PHP](./connection-examples/php)
81 | - [NodeJS](./connection-examples/nodejs)
82 | - [TypeScript](./connection-examples/typescript)
83 | - [Deno](./connection-examples/deno)
84 | - [Java](./connection-examples/java)
85 | - [Python](./connection-examples/python)
86 | - [FastAPI](./connection-examples/fastapi)
87 | - [Ruby](./connection-examples/ruby)
88 | - [Go](./connection-examples/go)
89 | - [Lua](./connection-examples/lua)
90 | - [Rust](./connection-examples/rust)
91 |
92 | ## Getting support
93 |
94 | If you run into a snag or need support as you explore the demos in this repo, join the Materialize [Slack community](https://materialize.com/s/chat) or [open an issue](https://github.com/MaterializeInc/demos/issues/new)!
95 |
--------------------------------------------------------------------------------
/antennas-kafka/.env.example:
--------------------------------------------------------------------------------
1 | MZ_HOST=
2 | MZ_PORT=6875
3 | MZ_USER=
4 | MZ_PASSWORD=
5 | MZ_DATABASE=materialize
6 |
7 | # Upstash Details
8 | KAFKA_BROKER=
9 | KAFKA_SASL_MECHANISM=SCRAM-SHA-256
10 | KAFKA_USERNAME=
11 | KAFKA_PASSWORD=
12 |
--------------------------------------------------------------------------------
/antennas-kafka/.gitignore:
--------------------------------------------------------------------------------
1 | .env
--------------------------------------------------------------------------------
/antennas-kafka/Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/antennas-kafka/Architecture.png
--------------------------------------------------------------------------------
/antennas-kafka/backend/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | RUN apt-get update && apt-get install --no-install-recommends -y wget \
7 | && apt-get clean \
8 | && rm -rf /var/lib/apt/lists/*
9 |
10 | ENV DOCKERIZE_VERSION v0.6.1
11 | RUN wget --progress=dot:giga https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
12 | && tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
13 | && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
14 |
15 | # Install app dependencies
16 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
17 | # where available (npm@5+)
18 | COPY package*.json ./
19 |
20 | RUN npm install
21 |
22 | # Bundle app source
23 | COPY . .
24 |
25 | EXPOSE 4000
26 | CMD [ "npm", "start" ]
27 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "backend",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@types/pg": "^8.6.4",
22 | "apollo-server-express": "^3.6.2",
23 | "express": "^4.17.3",
24 | "graphql": "^16.3.0",
25 | "graphql-ws": "^5.5.5",
26 | "kafkajs": "^1.16.0",
27 | "pg": "^8.7.3",
28 | "ws": "^8.4.2"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/src/MaterializeClient/SubscribeStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Readable} from 'stream';
2 | import {Client} from 'pg';
3 |
4 | /**
5 | * Thanks to Petros Angelatos
6 | * https://gist.github.com/petrosagg/804e5f009dee1cb8af688654ba396258
7 | * This class reads from a cursor in PostgreSQL
8 | */
9 | export default class SubscribeStream extends Readable {
10 | client: Client;
11 |
12 | cursorId: string;
13 |
14 | pendingReads: number;
15 |
16 | currentRows: Array;
17 |
18 | BreakException = {};
19 |
20 | intervalId: NodeJS.Timer;
21 |
22 | runningQuery: boolean;
23 |
24 | constructor(client: Client, cursorId: string) {
25 | super({
26 | highWaterMark: 1000,
27 | objectMode: true,
28 | });
29 | this.client = client;
30 | this.cursorId = cursorId;
31 | this.pendingReads = 0;
32 | this.runningQuery = false;
33 | }
34 |
35 | /**
36 | * Readable method to fetch subscribe data
37 | * @param n
38 | */
39 | _read(n: number): void {
40 | if (this.pendingReads <= 0) {
41 | this.client
42 | .query(`FETCH ${n} ${this.cursorId} WITH (TIMEOUT='1s');`)
43 | .then(({rows, rowCount}) => {
44 | if (rowCount === 0) {
45 | console.log('Empty results from subscribe. Staring interval read.');
46 | /**
47 | * Wait for data from the subscribe
48 | */
49 | this.intervalId = setInterval(() => this.intervalRead(n), 500);
50 | } else {
51 | /**
52 | * Process data
53 | */
54 | this.process(rows);
55 | }
56 | })
57 | .catch(this.catchClientErr);
58 | } else {
59 | /**
60 | * Process any additional rows
61 | */
62 | this.currentRows = this.currentRows.slice(
63 | this.currentRows.length - this.pendingReads,
64 | this.currentRows.length
65 | );
66 | try {
67 | this.currentRows.forEach((row) => {
68 | this.pendingReads -= 1;
69 | const backPressure = !this.push(row);
70 | if (backPressure) {
71 | throw this.BreakException;
72 | }
73 | });
74 | } catch (e) {
75 | if (e !== this.BreakException) throw e;
76 | }
77 | }
78 | }
79 |
80 | /**
81 | * Capture any error while fetching subscribe results
82 | * @param clientReasonErr
83 | */
84 | catchClientErr(clientReasonErr: any) {
85 | console.error('Error querying this cursor.');
86 | console.error(clientReasonErr);
87 |
88 | if (this.intervalId) {
89 | clearInterval(this.intervalId);
90 | }
91 |
92 | this.destroy(clientReasonErr);
93 | }
94 |
95 | /**
96 | * Process and push rows
97 | * @param rows
98 | */
99 | process(rows: Array): void {
100 | try {
101 | rows.forEach((row) => {
102 | this.pendingReads -= 1;
103 | const backPressure = !this.push(row);
104 | if (backPressure) {
105 | console.log('Oops. Backpressure.');
106 | throw this.BreakException;
107 | }
108 | });
109 | } catch (e) {
110 | if (e !== this.BreakException) throw e;
111 | }
112 | }
113 |
114 | /**
115 | * Interval fetching used when there are no results from the subscribe
116 | * Rather than pausing and waiting for results
117 | * Run a subscribe fetch every 500ms.
118 | * This is needed because if there is no update from the subscribe the pipe will close.
119 | * Another alternative is to send dummy data but this could end up filtering data all the time.
120 | * Another alternative is to push whenever is available rather than "poll" but how backpressure is handled?
121 | * @param n
122 | */
123 | intervalRead(n: number): void {
124 | if (this.runningQuery === false) {
125 | if (this.destroyed) {
126 | clearInterval(this.intervalId);
127 | return;
128 | }
129 |
130 | this.runningQuery = true;
131 | this.client
132 | .query(`FETCH ${n} ${this.cursorId} WITH (TIMEOUT='1s');`)
133 | .then(({rows, rowCount}) => {
134 | if (rowCount > 0) {
135 | this.process(rows);
136 | clearInterval(this.intervalId);
137 | console.log('New results from the subscribe. Finishing interval read.');
138 | } else {
139 | console.log('Nothing from interval read.');
140 | }
141 | })
142 | .catch(this.catchClientErr)
143 | .finally(() => {
144 | this.runningQuery = false;
145 | });
146 | }
147 | }
148 | }
149 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/src/MaterializeClient/TransformStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Transform} from 'stream';
2 |
3 | interface Antenna {
4 | antenna_id: string;
5 | geojson: string;
6 | performance: number;
7 | }
8 |
9 | /**
10 | * This class creates a batch of chunks. In this way every chunk is not a row but an array of rows.
11 | * This will improve the performance of the writing.
12 | * A timeout is needed in case the batch length is lower than the highwatermark for a long period of time.
13 | */
14 | export default class TransformStream extends Transform {
15 | batch = new Array();
16 |
17 | size: number;
18 |
19 | constructor() {
20 | super({
21 | highWaterMark: 100,
22 | objectMode: true,
23 | });
24 |
25 | this.cleanBatch();
26 | }
27 |
28 | cleanBatch() {
29 | this.batch = new Array();
30 | }
31 |
32 | _transform(row: any, encoding: string, callback: () => void) {
33 | const {mz_progressed: mzProgressed} = row;
34 |
35 | if (mzProgressed) {
36 | this.push(this.batch);
37 | this.cleanBatch();
38 | } else {
39 | this.batch.push(row);
40 | }
41 | callback();
42 | }
43 |
44 | _flush(callback: () => void) {
45 | if (this.batch.length) {
46 | this.push(this.batch);
47 | this.cleanBatch();
48 | }
49 | callback();
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/src/MaterializeClient/WriteStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Writable} from 'stream';
2 |
3 | /**
4 | * This class is in charge of writing every chunk (array of rows)
5 | * into a redis instance to send to all the users.
6 | */
7 | export default class WriteStream extends Writable {
8 | listener: (results: Array) => void;
9 |
10 | constructor(listener: (results: Array) => void) {
11 | super({
12 | highWaterMark: 1000,
13 | objectMode: true,
14 | });
15 |
16 | this.listener = listener;
17 | }
18 |
19 | _write(rows: Array, encoding: BufferEncoding, callback: (error?: Error) => void): void {
20 | if (rows && rows.length > 0) {
21 | this.listener(rows);
22 | }
23 |
24 | callback();
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/antennas-kafka/backend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-kafka/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | helper:
3 | container_name: helper
4 | build:
5 | context: ./helper
6 | init: true
7 | environment:
8 | MZ_HOST: ${MZ_HOST:-materialized}
9 | MZ_PORT: ${MZ_PORT:-6875}
10 | MZ_USER: ${MZ_USER:-materialize}
11 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
12 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
13 | KAFKA_BROKER: ${KAFKA_BROKER:-broker:29092}
14 | KAFKA_SASL_MECHANISM: ${KAFKA_SASL_MECHANISM:-plain}
15 | KAFKA_USERNAME: ${KAFKA_USERNAME:-admin}
16 | KAFKA_PASSWORD: ${KAFKA_PASSWORD:-admin-secret}
17 |
18 | backend:
19 | container_name: backend
20 | build:
21 | context: ./backend
22 | init: true
23 | ports:
24 | - 4000:4000
25 | depends_on:
26 | - helper
27 | environment:
28 | MZ_HOST: ${MZ_HOST:-materialized}
29 | MZ_PORT: ${MZ_PORT:-6875}
30 | MZ_USER: ${MZ_USER:-materialize}
31 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
32 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
33 | KAFKA_BROKER: ${KAFKA_BROKER:-broker:29092}
34 | KAFKA_SASL_MECHANISM: ${KAFKA_SASL_MECHANISM:-plain}
35 | KAFKA_USERNAME: ${KAFKA_USERNAME:-admin}
36 | KAFKA_PASSWORD: ${KAFKA_PASSWORD:-admin-secret}
37 |
38 | frontend:
39 | container_name: frontend
40 | build:
41 | context: ../antennas-postgres/frontend
42 | init: true
43 | ports:
44 | - 3000:3000
45 | depends_on:
46 | - backend
47 |
48 | microservice:
49 | container_name: microservice
50 | build:
51 | context: ./microservice
52 | init: true
53 | depends_on:
54 | - helper
55 | - backend
56 | environment:
57 | MZ_HOST: ${MZ_HOST:-materialized}
58 | MZ_PORT: ${MZ_PORT:-6875}
59 | MZ_USER: ${MZ_USER:-materialize}
60 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
61 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
62 | KAFKA_BROKER: ${KAFKA_BROKER:-broker:29092}
63 | KAFKA_SASL_MECHANISM: ${KAFKA_SASL_MECHANISM:-plain}
64 | KAFKA_USERNAME: ${KAFKA_USERNAME:-admin}
65 | KAFKA_PASSWORD: ${KAFKA_PASSWORD:-admin-secret}
66 |
--------------------------------------------------------------------------------
/antennas-kafka/helper/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-kafka/helper/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-kafka/helper/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | RUN apt-get update && apt-get install --no-install-recommends -y wget \
7 | && apt-get clean \
8 | && rm -rf /var/lib/apt/lists/*
9 |
10 | ENV DOCKERIZE_VERSION v0.6.1
11 | RUN wget --progress=dot:giga https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
12 | && tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
13 | && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
14 |
15 | # Install app dependencies
16 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
17 | # where available (npm@5+)
18 | COPY package*.json ./
19 |
20 | RUN npm install
21 |
22 | # Bundle app source
23 | COPY . .
24 |
25 | EXPOSE 4000
26 | CMD [ "npm", "start" ]
27 |
--------------------------------------------------------------------------------
/antennas-kafka/helper/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "backend",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@types/pg": "^8.6.4",
22 | "kafkajs": "^1.16.0",
23 | "pg": "^8.7.1"
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/antennas-kafka/helper/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-kafka/microservice/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-kafka/microservice/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-kafka/microservice/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | RUN apt-get update && apt-get install --no-install-recommends -y wget \
7 | && apt-get clean \
8 | && rm -rf /var/lib/apt/lists/*
9 |
10 | ENV DOCKERIZE_VERSION v0.6.1
11 | RUN wget --progress=dot:giga https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
12 | && tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
13 | && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz
14 |
15 | # Install app dependencies
16 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
17 | # where available (npm@5+)
18 | COPY package*.json ./
19 |
20 | RUN npm install
21 |
22 | # Bundle app source
23 | COPY . .
24 |
25 | CMD [ "npm", "start" ]
26 |
--------------------------------------------------------------------------------
/antennas-kafka/microservice/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "microservice",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@apollo/client": "^3.5.9",
22 | "@types/pg": "^8.6.4",
23 | "@types/uuid": "^8.3.4",
24 | "apollo-link-http": "^1.5.17",
25 | "dom": "^0.0.3",
26 | "graphql": "^15.8.0",
27 | "graphql-ws": "^5.5.5",
28 | "kafkajs": "^1.16.0",
29 | "node-fetch": "^2.6.7",
30 | "pg": "^8.7.1",
31 | "uuid": "^8.3.2",
32 | "ws": "^8.5.0"
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/antennas-kafka/microservice/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable", "dom"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-postgres/.env.example:
--------------------------------------------------------------------------------
1 | # Materialize Cloud Details
2 | MZ_HOST=
3 | MZ_PORT=6875
4 | MZ_USER=
5 | MZ_PASSWORD=
6 | MZ_DATABASE=materialize
7 |
8 | # Postgres details
9 | POSTGRES_HOST=
10 | POSTGRES_PASSWORD=pg_password
--------------------------------------------------------------------------------
/antennas-postgres/.gitignore:
--------------------------------------------------------------------------------
1 | .env
--------------------------------------------------------------------------------
/antennas-postgres/Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/antennas-postgres/Architecture.png
--------------------------------------------------------------------------------
/antennas-postgres/backend/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | # Install app dependencies
7 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
8 | # where available (npm@5+)
9 | COPY package*.json ./
10 |
11 | RUN npm install
12 |
13 | # Bundle app source
14 | COPY . .
15 |
16 | EXPOSE 4000
17 | CMD [ "npm", "start" ]
18 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "backend",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@types/pg": "^8.6.4",
22 | "apollo-server-express": "^3.6.2",
23 | "express": "^4.17.3",
24 | "graphql": "^16.3.0",
25 | "graphql-ws": "^5.5.5",
26 | "pg": "^8.7.3",
27 | "ws": "^8.4.2"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/src/MaterializeClient/SubscribeStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Readable} from 'stream';
2 | import {Client} from 'pg';
3 |
4 | /**
5 | * Thanks to Petros Angelatos
6 | * https://gist.github.com/petrosagg/804e5f009dee1cb8af688654ba396258
7 | * This class reads from a cursor in PostgreSQL
8 | */
9 | export default class SubscribeStream extends Readable {
10 | client: Client;
11 |
12 | cursorId: string;
13 |
14 | pendingReads: number;
15 |
16 | currentRows: Array;
17 |
18 | BreakException = {};
19 |
20 | intervalId: NodeJS.Timer;
21 |
22 | runningQuery: boolean;
23 |
24 | constructor(client: Client, cursorId: string) {
25 | super({
26 | highWaterMark: 1000,
27 | objectMode: true,
28 | });
29 | this.client = client;
30 | this.cursorId = cursorId;
31 | this.pendingReads = 0;
32 | this.runningQuery = false;
33 | }
34 |
35 | /**
36 | * Readable method to fetch subscribe data
37 | * @param n
38 | */
39 | _read(n: number): void {
40 | if (this.pendingReads <= 0) {
41 | this.client
42 | .query(`FETCH ${n} ${this.cursorId} WITH (TIMEOUT='1s');`)
43 | .then(({rows, rowCount}) => {
44 | if (rowCount === 0) {
45 | console.log('Empty results from subscribe. Staring interval read.');
46 | /**
47 | * Wait for data from the subscribe
48 | */
49 | this.intervalId = setInterval(() => this.intervalRead(n), 500);
50 | } else {
51 | /**
52 | * Process data
53 | */
54 | this.process(rows);
55 | }
56 | })
57 | .catch(this.catchClientErr);
58 | } else {
59 | /**
60 | * Process any additional rows
61 | */
62 | this.currentRows = this.currentRows.slice(
63 | this.currentRows.length - this.pendingReads,
64 | this.currentRows.length
65 | );
66 | try {
67 | this.currentRows.forEach((row) => {
68 | this.pendingReads -= 1;
69 | const backPressure = !this.push(row);
70 | if (backPressure) {
71 | throw this.BreakException;
72 | }
73 | });
74 | } catch (e) {
75 | if (e !== this.BreakException) throw e;
76 | }
77 | }
78 | }
79 |
80 | /**
81 | * Capture any error while fetching subscribe results
82 | * @param clientReasonErr
83 | */
84 | catchClientErr(clientReasonErr: any) {
85 | console.error('Error querying this cursor.');
86 | console.error(clientReasonErr);
87 |
88 | if (this.intervalId) {
89 | clearInterval(this.intervalId);
90 | }
91 |
92 | this.destroy(clientReasonErr);
93 | }
94 |
95 | /**
96 | * Process and push rows
97 | * @param rows
98 | */
99 | process(rows: Array): void {
100 | try {
101 | rows.forEach((row) => {
102 | this.pendingReads -= 1;
103 | const backPressure = !this.push(row);
104 | if (backPressure) {
105 | console.log('Oops. Backpressure.');
106 | throw this.BreakException;
107 | }
108 | });
109 | } catch (e) {
110 | if (e !== this.BreakException) throw e;
111 | }
112 | }
113 |
114 | /**
115 | * Interval fetching used when there are no results from the subscribe
116 | * Rather than pausing and waiting for results
117 | * Run a subscribe fetch every 500ms.
118 | * This is needed because if there is no update from the subscribe the pipe will close.
119 | * Another alternative is to send dummy data but this could end up filtering data all the time.
120 | * Another alternative is to push whenever is available rather than "poll" but how backpressure is handled?
121 | * @param n
122 | */
123 | intervalRead(n: number): void {
124 | if (this.runningQuery === false) {
125 | if (this.destroyed) {
126 | clearInterval(this.intervalId);
127 | return;
128 | }
129 |
130 | this.runningQuery = true;
131 | this.client
132 | .query(`FETCH ${n} ${this.cursorId} WITH (TIMEOUT='1s');`)
133 | .then(({rows, rowCount}) => {
134 | if (rowCount > 0) {
135 | this.process(rows);
136 | clearInterval(this.intervalId);
137 | console.log('New results from the subscribe. Finishing interval read.');
138 | } else {
139 | console.log('Nothing from interval read.');
140 | }
141 | })
142 | .catch(this.catchClientErr)
143 | .finally(() => {
144 | this.runningQuery = false;
145 | });
146 | }
147 | }
148 | }
--------------------------------------------------------------------------------
/antennas-postgres/backend/src/MaterializeClient/TransformStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Transform} from 'stream';
2 |
3 | interface Antenna {
4 | antenna_id: string;
5 | geojson: string;
6 | performance: number;
7 | }
8 |
9 | /**
10 | * This class creates a batch of chunks. In this way every chunk is not a row but an array of rows.
11 | * This will improve the performance of the writing.
12 | * A timeout is needed in case the batch length is lower than the highwatermark for a long period of time.
13 | */
14 | export default class TransformStream extends Transform {
15 | batch = new Array();
16 |
17 | size: number;
18 |
19 | constructor() {
20 | super({
21 | highWaterMark: 100,
22 | objectMode: true,
23 | });
24 |
25 | this.cleanBatch();
26 | }
27 |
28 | cleanBatch() {
29 | this.batch = new Array();
30 | }
31 |
32 | _transform(row: any, encoding: string, callback: () => void) {
33 | const {mz_progressed: mzProgressed} = row;
34 |
35 | if (mzProgressed) {
36 | this.push(this.batch);
37 | this.cleanBatch();
38 | } else {
39 | this.batch.push(row);
40 | }
41 | callback();
42 | }
43 |
44 | _flush(callback: () => void) {
45 | if (this.batch.length) {
46 | this.push(this.batch);
47 | this.cleanBatch();
48 | }
49 | callback();
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/src/MaterializeClient/WriteStream/index.ts:
--------------------------------------------------------------------------------
1 | import {Writable} from 'stream';
2 |
3 | /**
4 | * This class is in charge of writing every chunk (array of rows)
5 | * into a redis instance to send to all the users.
6 | */
7 | export default class WriteStream extends Writable {
8 | listener: (results: Array) => void;
9 |
10 | constructor(listener: (results: Array) => void) {
11 | super({
12 | highWaterMark: 1000,
13 | objectMode: true,
14 | });
15 |
16 | this.listener = listener;
17 | }
18 |
19 | _write(rows: Array, encoding: BufferEncoding, callback: (error?: Error) => void): void {
20 | if (rows && rows.length > 0) {
21 | this.listener(rows);
22 | }
23 |
24 | callback();
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/antennas-postgres/backend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-postgres/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | postgres:
3 | container_name: postgres
4 | build:
5 | context: ./postgres
6 | init: true
7 | ports:
8 | - 5432:5432
9 | restart: always
10 | environment:
11 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-pg_password}
12 | POSTGRES_HOST: ${POSTGRES_HOST:-postgres}
13 | command:
14 | - postgres
15 | - -c
16 | - wal_level=logical
17 | helper:
18 | container_name: helper
19 | build:
20 | context: ./helper
21 | environment:
22 | AUTOSETUP: ${AUTOSETUP}
23 | MZ_HOST: ${MZ_HOST:-materialized}
24 | MZ_PORT: ${MZ_PORT:-6875}
25 | MZ_USER: ${MZ_USER:-materialize}
26 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
27 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
28 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-pg_password}
29 | POSTGRES_HOST: ${POSTGRES_HOST:-postgres}
30 | init: true
31 | backend:
32 | container_name: backend
33 | build:
34 | context: ./backend
35 | init: true
36 | environment:
37 | MZ_HOST: ${MZ_HOST:-materialized}
38 | MZ_PORT: ${MZ_PORT:-6875}
39 | MZ_USER: ${MZ_USER:-materialize}
40 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
41 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
42 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-pg_password}
43 | POSTGRES_HOST: ${POSTGRES_HOST:-postgres}
44 | ports:
45 | - 4000:4000
46 | depends_on:
47 | - postgres
48 | - helper
49 | frontend:
50 | container_name: frontend
51 | build:
52 | context: ./frontend
53 | init: true
54 | ports:
55 | - 3000:3000
56 | depends_on:
57 | - backend
58 | microservice:
59 | container_name: microservice
60 | build:
61 | context: ./microservice
62 | environment:
63 | MZ_HOST: ${MZ_HOST:-materialized}
64 | MZ_PORT: ${MZ_PORT:-6875}
65 | MZ_USER: ${MZ_USER:-materialize}
66 | MZ_PASSWORD: ${MZ_PASSWORD:-materialize}
67 | MZ_DATABASE: ${MZ_DATABASE:-materialize}
68 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-pg_password}
69 | POSTGRES_HOST: ${POSTGRES_HOST:-postgres}
70 | init: true
71 | depends_on:
72 | - backend
73 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # production
12 | /build
13 |
14 | # misc
15 | .DS_Store
16 | .env.local
17 | .env.development.local
18 | .env.test.local
19 | .env.production.local
20 |
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | # Specify a base image
2 | FROM node:16
3 |
4 | # Create working directory and copy the app before running yarn install as the artifactory
5 | # credentials can be inside .npmrc
6 | WORKDIR /usr/src/app
7 | COPY . ./
8 |
9 | # Run yarn install - Clean cache - Build the project - Install serve command for yarn package manager
10 | RUN yarn install && yarn cache clean && yarn build && yarn global add serve
11 |
12 | # Start the application
13 | CMD serve -p 3000 ./build
14 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/README.md:
--------------------------------------------------------------------------------
1 | This project was bootstrapped with
2 | [Create React App](https://github.com/facebook/create-react-app).
3 |
4 | ## Available Scripts
5 |
6 | In the project directory, you can run:
7 |
8 | ### `yarn start`
9 |
10 | Runs the app in the development mode.
Open
11 | [http://localhost:3000](http://localhost:3000) to view it in the browser.
12 |
13 | The page will reload if you make edits.
You will also see any lint errors
14 | in the console.
15 |
16 | ### `yarn test`
17 |
18 | Launches the test runner in the interactive watch mode.
See the section
19 | about
20 | [running tests](https://facebook.github.io/create-react-app/docs/running-tests)
21 | for more information.
22 |
23 | ### `yarn build`
24 |
25 | Builds the app for production to the `build` folder.
It correctly bundles
26 | React in production mode and optimizes the build for the best performance.
27 |
28 | The build is minified and the filenames include the hashes.
Your app is
29 | ready to be deployed!
30 |
31 | See the section about
32 | [deployment](https://facebook.github.io/create-react-app/docs/deployment) for
33 | more information.
34 |
35 | ### `yarn eject`
36 |
37 | **Note: this is a one-way operation. Once you `eject`, you can’t go back!**
38 |
39 | If you aren’t satisfied with the build tool and configuration choices, you can
40 | `eject` at any time. This command will remove the single build dependency from
41 | your project.
42 |
43 | Instead, it will copy all the configuration files and the transitive
44 | dependencies (webpack, Babel, ESLint, etc) right into your project so you have
45 | full control over them. All of the commands except `eject` will still work, but
46 | they will point to the copied scripts so you can tweak them. At this point
47 | you’re on your own.
48 |
49 | You don’t have to ever use `eject`. The curated feature set is suitable for
50 | small and middle deployments, and you shouldn’t feel obligated to use this
51 | feature. However we understand that this tool wouldn’t be useful if you couldn’t
52 | customize it when you are ready for it.
53 |
54 | ## Learn More
55 |
56 | You can learn more in the
57 | [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
58 |
59 | To learn React, check out the [React documentation](https://reactjs.org/).
60 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/craco.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | babel: {
3 | loaderOptions: {
4 | ignore: ['./node_modules/mapbox-gl/dist/mapbox-gl.js'],
5 | },
6 | },
7 | };
8 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "frontend",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@apollo/client": "^3.5.8",
7 | "@chakra-ui/react": "^1.7.4",
8 | "@craco/craco": "^6.4.3",
9 | "@emotion/react": "^11.0.0",
10 | "@emotion/styled": "^11.0.0",
11 | "@testing-library/jest-dom": "^5.9.0",
12 | "@testing-library/react": "^10.2.1",
13 | "@testing-library/user-event": "^12.0.2",
14 | "@types/jest": "^25.0.0",
15 | "@types/node": "^12.0.0",
16 | "@types/react": "^16.9.0",
17 | "@types/react-dom": "^16.9.0",
18 | "framer-motion": "^4.0.0",
19 | "graphql": "^16.2.0",
20 | "graphql-ws": "^5.5.5",
21 | "mapbox-gl": "^2.7.0",
22 | "react": "^17.0.2",
23 | "react-dom": "^17.0.2",
24 | "react-icons": "^3.0.0",
25 | "react-map-gl": "^6.1.19",
26 | "react-scripts": "5.0.0",
27 | "typescript": "^4.3.5",
28 | "web-vitals": "^0.2.2"
29 | },
30 | "scripts": {
31 | "start": "craco start",
32 | "build": "craco build",
33 | "test": "craco test"
34 | },
35 | "eslintConfig": {
36 | "extends": "react-app"
37 | },
38 | "browserslist": {
39 | "production": [
40 | ">0.2%",
41 | "not dead",
42 | "not op_mini all"
43 | ],
44 | "development": [
45 | "last 1 chrome version",
46 | "last 1 firefox version",
47 | "last 1 safari version"
48 | ]
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/antennas-postgres/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
15 |
16 |
20 |
21 |
30 | Antennas Performance
31 |
32 |
33 |
34 |
35 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/logo192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/antennas-postgres/frontend/public/logo192.png
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/logo512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/antennas-postgres/frontend/public/logo512.png
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "short_name": "React App",
3 | "name": "Create React App Sample",
4 | "icons": [
5 | {
6 | "src": "favicon.ico",
7 | "sizes": "64x64 32x32 24x24 16x16",
8 | "type": "image/x-icon"
9 | },
10 | {
11 | "src": "logo192.png",
12 | "type": "image/png",
13 | "sizes": "192x192"
14 | },
15 | {
16 | "src": "logo512.png",
17 | "type": "image/png",
18 | "sizes": "512x512"
19 | }
20 | ],
21 | "start_url": ".",
22 | "display": "standalone",
23 | "theme_color": "#000000",
24 | "background_color": "#ffffff"
25 | }
26 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/public/robots.txt:
--------------------------------------------------------------------------------
1 | # https://www.robotstxt.org/robotstxt.html
2 | User-agent: *
3 | Disallow:
4 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/App.test.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import {screen} from '@testing-library/react';
3 | import {render} from './test-utils';
4 | import {App} from './App';
5 |
6 | test('renders learn react link', () => {
7 | render();
8 | const linkElement = screen.getByText(/learn chakra/i);
9 | expect(linkElement).toBeInTheDocument();
10 | });
11 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/App.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 | import {ChakraProvider, Box, Text} from '@chakra-ui/react';
3 |
4 | import {ApolloClient, InMemoryCache, ApolloProvider} from '@apollo/client';
5 | import AntennasMap from './components/AntennasMap';
6 | import link from './link';
7 | import theme from './theme';
8 |
9 | const client = new ApolloClient({
10 | uri: 'backend:4000/graphql',
11 | cache: new InMemoryCache(),
12 | link,
13 | });
14 |
15 | export const App = () => (
16 |
17 |
18 |
25 |
26 | 🗽 Manhattan 5G Antennas Performance
27 |
28 |
29 |
30 |
31 |
32 | );
33 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/ColorModeSwitcher.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 | import {useColorMode, useColorModeValue, IconButton, IconButtonProps} from '@chakra-ui/react';
3 | import {FaMoon, FaSun} from 'react-icons/fa';
4 |
5 | type ColorModeSwitcherProps = Omit;
6 |
7 | export const ColorModeSwitcher: React.FC = (props) => {
8 | const {toggleColorMode} = useColorMode();
9 | const text = useColorModeValue('dark', 'light');
10 | const SwitchIcon = useColorModeValue(FaMoon, FaSun);
11 |
12 | return (
13 | }
21 | aria-label={`Switch to ${text} mode`}
22 | {...props}
23 | />
24 | );
25 | };
26 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/Logo.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 | import {chakra, keyframes, ImageProps, forwardRef, usePrefersReducedMotion} from '@chakra-ui/react';
3 | import logo from './logo.svg';
4 |
5 | const spin = keyframes`
6 | from { transform: rotate(0deg); }
7 | to { transform: rotate(360deg); }
8 | `;
9 |
10 | export const Logo = forwardRef((props, ref) => {
11 | const prefersReducedMotion = usePrefersReducedMotion();
12 |
13 | const animation = prefersReducedMotion ? undefined : `${spin} infinite 20s linear`;
14 |
15 | return ;
16 | });
17 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/components/AntennasMap/ButtonSelection/index.js:
--------------------------------------------------------------------------------
1 | export const buildButtonSelection = (mapBox) => {
2 | mapBox.on('idle', () => {
3 | // If these two layers were not added to the map, abort
4 | if (
5 | !mapBox.getLayer('healthy-antennas-layer') ||
6 | !mapBox.getLayer('unhealthy-antennas-layer')
7 | ) {
8 | return;
9 | }
10 |
11 | // Enumerate ids of the layers.
12 | const toggleableLayerIds = ['healthy-antennas-layer', 'unhealthy-antennas-layer'];
13 |
14 | // Set up the corresponding toggle button for each layer.
15 | toggleableLayerIds.forEach((layerId) => {
16 | // Skip layers that already have a button set up.
17 | if (document.getElementById(layerId)) {
18 | return;
19 | }
20 |
21 | // Create a link.
22 | const link = document.createElement('a');
23 | link.id = layerId;
24 | link.href = '#';
25 | link.textContent = layerId;
26 | link.className = 'active';
27 |
28 | // Show or hide layer when the toggle is clicked.
29 | // eslint-disable-next-line no-loop-func
30 | link.onclick = function (e) {
31 | const clickedLayer = this.textContent;
32 | e.preventDefault();
33 | e.stopPropagation();
34 |
35 | const visibility = mapBox.getLayoutProperty(clickedLayer, 'visibility');
36 |
37 | // Toggle layer visibility by changing the layout object's visibility property.
38 | if (visibility === 'visible') {
39 | mapBox.setLayoutProperty(clickedLayer, 'visibility', 'none');
40 | this.className = '';
41 | } else {
42 | this.className = 'active';
43 | mapBox.setLayoutProperty(clickedLayer, 'visibility', 'visible');
44 | }
45 | };
46 |
47 | const layers = document.getElementById('menu');
48 | layers.appendChild(link);
49 | });
50 | });
51 | };
52 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/components/AntennasMap/DotClone/index.js:
--------------------------------------------------------------------------------
1 | export const buildPulsingDot = (map, color, size) => {
2 | return {
3 | width: size,
4 | height: size,
5 | data: new Uint8Array(size * size * 4),
6 |
7 | // get rendering context for the map canvas when layer is added to the map
8 | onAdd: function () {
9 | var canvas = document.createElement('canvas');
10 | canvas.width = this.width;
11 | canvas.height = this.height;
12 | this.context = canvas.getContext('2d');
13 | },
14 |
15 | // called once before every frame where the icon will be used
16 | render: function () {
17 | var duration = 1750;
18 | var t = (performance.now() % duration) / duration;
19 |
20 | var radius = (size / 2) * 0.3;
21 | var outerRadius = (size / 2) * 0.7 * t + radius;
22 | var context = this.context;
23 |
24 | // draw outer circle
25 | context.clearRect(0, 0, this.width, this.height);
26 | context.beginPath();
27 | context.arc(this.width / 2, this.height / 2, outerRadius, 0, Math.PI * 2);
28 | context.fillStyle = `rgba(${color}, ${1 - t})`;
29 | context.fill();
30 |
31 | // draw inner circle
32 | context.beginPath();
33 | context.arc(this.width / 2, this.height / 2, radius / 3, 0, Math.PI * 2);
34 | context.fillStyle = `rgba(${color}, 1)`;
35 | context.fill();
36 |
37 | // update this image's data with data from the canvas
38 | this.data = context.getImageData(0, 0, this.width, this.height).data;
39 |
40 | // continuously repaint the map, resulting in the smooth animation of the dot
41 | map.triggerRepaint();
42 |
43 | // return `true` to let the map know that the image was updated
44 | return true;
45 | },
46 | };
47 | };
48 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/index.tsx:
--------------------------------------------------------------------------------
1 | import {ColorModeScript} from '@chakra-ui/react';
2 | import * as React from 'react';
3 | import ReactDOM from 'react-dom';
4 | import {App} from './App';
5 | import reportWebVitals from './reportWebVitals';
6 | import * as serviceWorker from './serviceWorker';
7 |
8 | ReactDOM.render(
9 |
10 |
11 |
12 | ,
13 | document.getElementById('root')
14 | );
15 |
16 | // If you want your app to work offline and load faster, you can change
17 | // unregister() to register() below. Note this comes with some pitfalls.
18 | // Learn more about service workers: https://cra.link/PWA
19 | serviceWorker.unregister();
20 |
21 | // If you want to start measuring performance in your app, pass a function
22 | // to log results (for example: reportWebVitals(console.log))
23 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
24 | reportWebVitals();
25 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/link.ts:
--------------------------------------------------------------------------------
1 | // for Apollo Client v3:
2 | import {ApolloLink, Operation, FetchResult, Observable} from '@apollo/client/core';
3 |
4 | import {print} from 'graphql';
5 | import {createClient, ClientOptions, Client} from 'graphql-ws';
6 |
7 | class WebSocketLink extends ApolloLink {
8 | private client: Client;
9 |
10 | constructor(options: ClientOptions) {
11 | super();
12 | this.client = createClient(options);
13 | }
14 |
15 | public request(operation: Operation): Observable {
16 | return new Observable((sink) => {
17 | return this.client.subscribe(
18 | {...operation, query: print(operation.query)},
19 | {
20 | next: sink.next.bind(sink) as any,
21 | complete: sink.complete.bind(sink),
22 | error: sink.error.bind(sink),
23 | }
24 | );
25 | });
26 | }
27 | }
28 |
29 | export default new WebSocketLink({
30 | url: `ws://${window.location.hostname}:4000/graphql`,
31 | });
32 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/logo.svg:
--------------------------------------------------------------------------------
1 |
11 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/react-app-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/reportWebVitals.ts:
--------------------------------------------------------------------------------
1 | import {ReportHandler} from 'web-vitals';
2 |
3 | const reportWebVitals = (onPerfEntry?: ReportHandler) => {
4 | if (onPerfEntry && onPerfEntry instanceof Function) {
5 | import('web-vitals').then(({getCLS, getFID, getFCP, getLCP, getTTFB}) => {
6 | getCLS(onPerfEntry);
7 | getFID(onPerfEntry);
8 | getFCP(onPerfEntry);
9 | getLCP(onPerfEntry);
10 | getTTFB(onPerfEntry);
11 | });
12 | }
13 | };
14 |
15 | export default reportWebVitals;
16 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/setupTests.ts:
--------------------------------------------------------------------------------
1 | // jest-dom adds custom jest matchers for asserting on DOM nodes.
2 | // allows you to do things like:
3 | // expect(element).toHaveTextContent(/react/i)
4 | // learn more: https://github.com/testing-library/jest-dom
5 | import '@testing-library/jest-dom';
6 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/test-utils.tsx:
--------------------------------------------------------------------------------
1 | import * as React from 'react';
2 | import {render, RenderOptions} from '@testing-library/react';
3 | import {ChakraProvider, theme} from '@chakra-ui/react';
4 |
5 | const AllProviders = ({children}: {children?: React.ReactNode}) => (
6 | {children}
7 | );
8 |
9 | const customRender = (ui: React.ReactElement, options?: RenderOptions) =>
10 | render(ui, {wrapper: AllProviders, ...options});
11 |
12 | export {customRender as render};
13 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/src/theme.ts:
--------------------------------------------------------------------------------
1 | // 1. import `extendTheme` function
2 | import {extendTheme, ThemeConfig} from '@chakra-ui/react';
3 |
4 | // 2. Add your color mode config
5 | const config: ThemeConfig = {
6 | initialColorMode: 'dark',
7 | useSystemColorMode: false,
8 | };
9 |
10 | // 3. extend the theme
11 | const theme = extendTheme({config});
12 |
13 | export default theme;
14 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "es5",
4 | "lib": ["dom", "dom.iterable", "esnext"],
5 | "allowJs": true,
6 | "skipLibCheck": true,
7 | "esModuleInterop": true,
8 | "allowSyntheticDefaultImports": true,
9 | "strict": true,
10 | "forceConsistentCasingInFileNames": true,
11 | "noFallthroughCasesInSwitch": true,
12 | "module": "esnext",
13 | "moduleResolution": "node",
14 | "resolveJsonModule": true,
15 | "isolatedModules": true,
16 | "noEmit": true,
17 | "jsx": "react-jsx"
18 | },
19 | "include": ["src"]
20 | }
21 |
--------------------------------------------------------------------------------
/antennas-postgres/frontend/webpack.config.js:
--------------------------------------------------------------------------------
1 | // webpack.config.js
2 | module.export = {
3 | // ...
4 | resolve: {
5 | alias: {
6 | 'mapbox-gl': 'maplibre-gl',
7 | },
8 | },
9 | };
10 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | # Install app dependencies
7 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
8 | # where available (npm@5+)
9 | COPY package*.json ./
10 |
11 | RUN npm install
12 |
13 | # Bundle app source
14 | COPY . .
15 |
16 | EXPOSE 4000
17 | CMD [ "npm", "start" ]
18 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "backend",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@types/pg": "^8.6.4",
22 | "pg": "^8.7.1"
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/src/app.ts:
--------------------------------------------------------------------------------
1 | import {Pool} from 'pg';
2 |
3 | /**
4 | * Materialize Client
5 | */
6 | const mzHost = process.env.MZ_HOST || 'materialized';
7 | const mzPort = Number(process.env.MZ_PORT) || 6875;
8 | const mzUser = process.env.MZ_USER || 'materialize';
9 | const mzPassword = process.env.MZ_PASSWORD || 'materialize';
10 | const mzDatabase = process.env.MZ_DATABASE || 'materialize';
11 |
12 | /**
13 | * Create Materialize sources and materialized views
14 | * Before creating the views it will check if they aren't created already.
15 | */
16 | async function setUpMaterialize() {
17 | const pool = await new Pool({
18 | host: mzHost,
19 | port: mzPort,
20 | user: mzUser,
21 | password: mzPassword,
22 | database: mzDatabase,
23 | ssl: true,
24 | });
25 | const poolClient = await pool.connect();
26 |
27 | await poolClient.query(`
28 | CREATE SECRET IF NOT EXISTS postgres_password AS 'materialize';
29 | `);
30 |
31 | await poolClient.query(`
32 | CREATE CONNECTION pg_connection TO POSTGRES (
33 | HOST '${process.env.POSTGRES_HOST || 'postgres'}',
34 | PORT 5432,
35 | USER 'materialize',
36 | PASSWORD SECRET postgres_password,
37 | DATABASE 'postgres'
38 | );
39 | `);
40 |
41 | await poolClient.query(`
42 | CREATE SOURCE IF NOT EXISTS antennas_publication_source
43 | FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'antennas_publication_source')
44 | FOR ALL TABLES
45 | WITH (SIZE = '3xsmall');
46 | `);
47 |
48 | const {rowCount} = await pool.query(
49 | "SELECT * FROM mz_views WHERE name='antennas' OR name='antennas_performance';"
50 | );
51 |
52 | if (!rowCount) {
53 |
54 | await poolClient.query(`
55 | CREATE MATERIALIZED VIEW IF NOT EXISTS last_half_minute_performance_per_antenna AS
56 | SELECT A.antenna_id, A.geojson, AVG(AP.performance) as performance
57 | FROM antennas A JOIN antennas_performance AP ON (A.antenna_id = AP.antenna_id)
58 | WHERE (cast("updated_at" as timestamp) + INTERVAL '1 HOUR' ) > mz_now()
59 | GROUP BY A.antenna_id, A.geojson;
60 | `);
61 | }
62 |
63 | poolClient.release();
64 | }
65 |
66 | /**
67 | * Build a custom Postgres insert with a random performance and clients connected
68 | * @param antennaId Antenna Identifier
69 | * @returns
70 | */
71 | function buildQuery(antennaId: number) {
72 | return `
73 | INSERT INTO antennas_performance (antenna_id, clients_connected, performance, updated_at) VALUES (
74 | ${antennaId},
75 | ${Math.ceil(Math.random() * 100)},
76 | ${Math.random() * 10},
77 | now()
78 | );
79 | `;
80 | }
81 |
82 | /**
83 | * Generate data to Postgres indefinitely
84 | */
85 | const pgPass = process.env.POSTGRES_PASSWORD || 'pg_password';
86 | async function dataGenerator() {
87 | const pool = await new Pool({
88 | host: 'postgres',
89 | user: 'postgres',
90 | password: pgPass,
91 | });
92 |
93 | const poolClient = await pool.connect();
94 | setInterval(() => {
95 | const query = [1, 2, 3, 4, 5, 6, 7].map((antennaId) => buildQuery(antennaId)).join('\n');
96 |
97 | poolClient.query(query);
98 | }, 1000);
99 | }
100 |
101 | const {AUTOSETUP} = process.env;
102 |
103 | /**
104 | * If AUTOSETUP = true then run automatically the source creation, etc..
105 | */
106 | if (AUTOSETUP) {
107 | setUpMaterialize()
108 | .then(() => {
109 | console.log('Generating data.');
110 | dataGenerator();
111 | })
112 | .catch((err) => {
113 | console.error(err);
114 | });
115 | } else {
116 | console.log('Generating data.');
117 | dataGenerator();
118 | }
119 |
--------------------------------------------------------------------------------
/antennas-postgres/helper/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-postgres/microservice/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | npm-debug.log
3 |
--------------------------------------------------------------------------------
/antennas-postgres/microservice/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": true,
4 | "es2021": true
5 | },
6 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
7 | "parser": "@typescript-eslint/parser",
8 | "parserOptions": {
9 | "ecmaVersion": "latest",
10 | "sourceType": "module"
11 | },
12 | "plugins": ["@typescript-eslint"],
13 | "rules": {}
14 | }
15 |
--------------------------------------------------------------------------------
/antennas-postgres/microservice/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:16
2 |
3 | # Create app directory
4 | WORKDIR /usr/src/app
5 |
6 | # Install app dependencies
7 | # A wildcard is used to ensure both package.json AND package-lock.json are copied
8 | # where available (npm@5+)
9 | COPY package*.json ./
10 |
11 | RUN npm install
12 |
13 | # Bundle app source
14 | COPY . .
15 |
16 | CMD [ "npm", "start" ]
17 |
--------------------------------------------------------------------------------
/antennas-postgres/microservice/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "microservice",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "dist/app.js",
6 | "scripts": {
7 | "start": "tsc && node dist/app.js",
8 | "lint": "eslint . --ext .ts",
9 | "test": "echo \"Error: no test specified\" && exit 1"
10 | },
11 | "keywords": [],
12 | "author": "",
13 | "license": "ISC",
14 | "devDependencies": {
15 | "@typescript-eslint/eslint-plugin": "^5.10.1",
16 | "@typescript-eslint/parser": "^5.10.1",
17 | "eslint": "^8.7.0",
18 | "typescript": "^4.5.5"
19 | },
20 | "dependencies": {
21 | "@apollo/client": "^3.5.9",
22 | "@types/pg": "^8.6.4",
23 | "@types/uuid": "^8.3.4",
24 | "apollo-link-http": "^1.5.17",
25 | "dom": "^0.0.3",
26 | "graphql": "^15.8.0",
27 | "graphql-ws": "^5.5.5",
28 | "node-fetch": "^2.6.7",
29 | "pg": "^8.7.1",
30 | "uuid": "^8.3.2",
31 | "ws": "^8.5.0"
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/antennas-postgres/microservice/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "module": "commonjs",
4 | "esModuleInterop": true,
5 | "target": "es6",
6 | "moduleResolution": "node",
7 | "sourceMap": true,
8 | "outDir": "dist",
9 | "lib": ["esnext.asynciterable", "dom"]
10 | },
11 | "lib": ["es2015"]
12 | }
13 |
--------------------------------------------------------------------------------
/antennas-postgres/postgres/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM postgres:14.2-alpine
2 |
3 | # Automatically run by Postgres
4 | COPY ./create.sh /docker-entrypoint-initdb.d/
5 | COPY ./seed.sh /docker-entrypoint-initdb.d/
6 |
7 | # Create Tables, Publications and Roles
8 | COPY ./create.sql /scripts/
9 | COPY ./seed.sql /scripts/
10 |
--------------------------------------------------------------------------------
/antennas-postgres/postgres/create.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Creating tables, publications and roles"
4 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" -f /scripts/create.sql
5 |
--------------------------------------------------------------------------------
/antennas-postgres/postgres/create.sql:
--------------------------------------------------------------------------------
1 | -- Antennas table will contain the identifier and geojson for each antenna.
2 | CREATE TABLE antennas (
3 | antenna_id INT GENERATED ALWAYS AS IDENTITY,
4 | geojson JSON NOT NULL
5 | );
6 |
7 | -- Antennas performance table will contain every performance update available
8 | CREATE TABLE antennas_performance (
9 | antenna_id INT,
10 | clients_connected INT NOT NULL,
11 | performance INT NOT NULL,
12 | updated_at timestamp NOT NULL
13 | );
14 |
15 | -- Enable REPLICA for both tables
16 | ALTER TABLE antennas REPLICA IDENTITY FULL;
17 | ALTER TABLE antennas_performance REPLICA IDENTITY FULL;
18 |
19 | -- Create publication on the created tables
20 | CREATE PUBLICATION antennas_publication_source FOR TABLE antennas, antennas_performance;
21 |
22 | -- Create user and role to be used by Materialize
23 | CREATE ROLE materialize REPLICATION LOGIN PASSWORD 'materialize';
24 | GRANT SELECT ON antennas, antennas_performance TO materialize;
25 |
--------------------------------------------------------------------------------
/antennas-postgres/postgres/rollback.sql:
--------------------------------------------------------------------------------
1 | ----- Rollback:
2 | DROP PUBLICATION antennas_publication_source;
3 | DROP TABLE antennas_performance;
4 | DROP TABLE antennas;
5 | DROP ROLE materialize;
6 |
--------------------------------------------------------------------------------
/antennas-postgres/postgres/seed.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Creating tables, publications and roles"
4 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" -f /scripts/seed.sql
5 |
--------------------------------------------------------------------------------
/connection-examples/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | dist/
3 | Gemfile.lock
4 | package-lock.json
5 | Cargo.lock
6 | target/
7 | bin/
8 | _build/
9 | *.swp
10 | .vscode
11 | .DS_Store
12 |
--------------------------------------------------------------------------------
/connection-examples/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Materialize Connection Examples
2 |
3 | Thank you for your interest in Materialize connection examples!
4 | Contributions of many kinds are encouraged and most welcome.
5 |
6 | ## Pull requests
7 |
8 | We welcome pull requests from everyone.
9 |
10 | Fork, then clone the repo:
11 |
12 | ```
13 | git clone https://github.com//demos.git
14 | ```
15 |
16 | Create a branch for your edits:
17 |
18 | ```
19 | git checkout -b my-branch
20 |
21 | cd connection-examples
22 | ```
23 |
24 | ### Contributing a new example
25 |
26 | Run the init script to create the default secrets file:
27 |
28 | ```bash
29 | bash scripts/init.sh
30 | ```
31 |
32 | The script will prompt you for the following information:
33 | - `Programming language`: The name of the programming language or framework
34 | - `File extension`: The file extension for the language or framework
35 |
36 | The script will create a new directory with the name of the language or framework
37 | and add template files where you can add your code.
38 |
39 | Once you have added your code, commit your changes and push to your fork:
40 |
41 | ```
42 | git add .
43 | git commit -m "Add my example"
44 | git push origin my-branch
45 | ```
46 |
47 | That's it — you're ready to submit a pull request!
48 |
49 | ## Getting support
50 |
51 | If you run into a snag or need support as you prepare your contribution, join the Materialize [Slack community](https://materialize.com/s/chat) or [open an issue](https://github.com/MaterializeInc/connection-examples/issues/new)!
52 |
--------------------------------------------------------------------------------
/connection-examples/README.md:
--------------------------------------------------------------------------------
1 | # Materialize connection examples
2 |
3 | [](https://materialize.com/s/chat)
4 |
5 | [Materialize](https://github.com/MaterializeInc/materialize) is a streaming database for real-time applications. It is wire-compatible with PostgreSQL, which means that you can connect to a Materialize instance using your favorite client libraries, ORM frameworks and other third-party tools that support PostgreSQL.
6 |
7 | This is a collection of reference examples for common language-specific PostgreSQL drivers and PostgreSQL-compatible ORMs that have been tested with Materialize.
8 |
9 | ## Client libraries and Frameworks
10 |
11 | - [PHP](./php)
12 | - [NodeJS](./nodejs)
13 | - [TypeScript](./typescript)
14 | - [Deno](./deno)
15 | - [Java](./java)
16 | - [Python](./python)
17 | - [FastAPI](./fastapi)
18 | - [Ruby](./ruby)
19 | - [Go](./go)
20 | - [Lua](./lua)
21 | - [Rust](./rust)
22 |
23 | ## Helpful resources:
24 |
25 | * [`CREATE SOURCE`](https://materialize.com/docs/sql/create-source/) - syntax for creating new connections to upstream data sources.
26 | * [`CREATE MATERIALIZED VIEW`](https://materialize.com/docs/sql/create-materialized-view/) - syntax for creating an incrementally updating materialized view.
27 | * [`SELECT`](https://materialize.com/docs/sql/select) - syntax for querying materialized views.
28 | * [`SUBSCRIBE`](https://materialize.com/docs/sql/subscribe/) - syntax for subscribing to changes in a materialized view or query via a long-lived PostgreSQL transaction.
29 | * [Materialize Demos](https://github.com/MaterializeInc/demos)
30 |
31 | ## Getting support
32 |
33 | If you run into a snag or need support as you explore the examples in this repo, join the [Materialize Slack community](https://materialize.com/s/chat) or [open an issue](https://github.com/MaterializeInc/connection-examples/issues)!
34 |
--------------------------------------------------------------------------------
/connection-examples/deno/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Deno example
2 |
3 | You connect to Materialize the same way you [connect to PostgreSQL with `deno-postgres`](https://deno.land/x/postgres).
4 |
5 | To run the example, run the following command:
6 |
7 | ```
8 | deno run --allow-net --allow-env --allow-read --allow-write --unstable connection.ts
9 | ```
10 |
11 | ### Examples:
12 |
13 | - [Connection](./connection.ts)
14 | - [Stream](./subscribe.ts)
15 | - [Query](./query.ts)
16 | - [Insert data into tables](./insert.ts)
17 | - [Manage sources](./source.ts)
18 | - [Manage Views](./view.ts)
--------------------------------------------------------------------------------
/connection-examples/deno/connection.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | database: "materialize",
6 | password: "APP_SPECIFIC_PASSWORD",
7 | hostname: "MATERIALIZE_HOST",
8 | port: 6875,
9 | ssl: true,
10 | })
11 |
12 | /*
13 | Alternatively, you can use the following syntax to connect to Materialize:
14 | const client = new Client('postgres://MATERIALIZE_USERNAME@MATERIALIZE_HOST:6875/materialize')
15 | */
16 | const main = async ({ response }: { response: any }) => {
17 | try {
18 | await client.connect()
19 | /* Work with Materialize */
20 | } catch (err) {
21 | console.error(err);
22 | response.status = 500
23 | response.body = {
24 | success: false,
25 | msg: err.toString()
26 | }
27 | } finally {
28 | await client.end()
29 | }
30 | }
31 |
32 | export { main }
33 |
34 | // Call the main function
35 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/example.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 | const client = new Client({
3 | user: "MATERIALIZE_USERNAME",
4 | database: "materialize",
5 | password: "APP_SPECIFIC_PASSWORD",
6 | hostname: "MATERIALIZE_HOST",
7 | port: 6875,
8 | ssl: true,
9 | })
10 | const main = async ({ response }: { response: any }) => {
11 | try {
12 | await client.connect()
13 | /* Work with Materialize */
14 |
15 | } catch (err) {
16 | response.status = 500
17 | response.body = {
18 | success: false,
19 | msg: err.toString()
20 | }
21 | } finally {
22 | await client.end()
23 | }
24 | }
25 | export { main }
26 |
27 | // Call the main function
28 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/insert.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | database: "materialize",
6 | password: "APP_SPECIFIC_PASSWORD",
7 | hostname: "MATERIALIZE_HOST",
8 | port: 6875,
9 | ssl: true,
10 | })
11 |
12 | const main = async ({ response }: { response: any }) => {
13 | try {
14 | await client.connect()
15 |
16 | await client.queryObject(
17 | "INSERT INTO countries(code, name) VALUES($1, $2)",
18 | ['GH', 'GHANA'],
19 | );
20 |
21 | const result = await client.queryObject("SELECT * FROM countries")
22 | console.log(result.rows)
23 | } catch (err) {
24 | console.error(err.toString())
25 | } finally {
26 | await client.end()
27 | }
28 | }
29 | export { main }
30 |
31 | // Call the main function
32 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/query.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | database: "materialize",
6 | password: "APP_SPECIFIC_PASSWORD",
7 | hostname: "MATERIALIZE_HOST",
8 | port: 6875,
9 | ssl: true,
10 | })
11 |
12 | const main = async ({ response }: { response: any }) => {
13 | try {
14 | await client.connect()
15 | const result = await client.queryObject("SELECT * FROM counter_sum")
16 | console.log(result.rows)
17 | } catch (err) {
18 | console.error(err.toString())
19 | } finally {
20 | await client.end()
21 | }
22 | }
23 | export { main }
24 |
25 | // Call the main function
26 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/source.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | database: "materialize",
6 | password: "APP_SPECIFIC_PASSWORD",
7 | hostname: "MATERIALIZE_HOST",
8 | port: 6875,
9 | ssl: true,
10 | })
11 |
12 | const main = async ({ response }: { response: any }) => {
13 | try {
14 | await client.connect()
15 |
16 | await client.queryObject(
17 | `CREATE SOURCE IF NOT EXISTS counter
18 | FROM LOAD GENERATOR COUNTER
19 | (TICK INTERVAL '500ms')
20 | WITH (SIZE = '3xsmall');`
21 | );
22 |
23 | const result = await client.queryObject("SHOW SOURCES")
24 | console.log(result.rows)
25 | } catch (err) {
26 | console.error(err.toString())
27 | } finally {
28 | await client.end()
29 | }
30 | }
31 | export { main }
32 |
33 | // Call the main function
34 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/state.ts:
--------------------------------------------------------------------------------
1 | export interface Update {
2 | value: T;
3 | diff: number;
4 | }
5 |
6 | export default class State {
7 | private state: Map;
8 | private timestamp: number;
9 | private valid: boolean;
10 | private history: Array> | undefined;
11 |
12 | constructor(collectHistory?: boolean) {
13 | this.state = new Map();
14 | this.timestamp = 0;
15 | this.valid = true;
16 | if (collectHistory) {
17 | this.history = [];
18 | }
19 | }
20 |
21 | getState(): Readonly> {
22 | const list: Array = new Array();
23 |
24 | Array.from(this.state.entries()).forEach(([key, value]) => {
25 | const clone = JSON.parse(key);
26 | let i = 0;
27 | while (i< value) {
28 | list.push(clone);
29 | i++;
30 | };
31 | });
32 |
33 | return list;
34 | }
35 |
36 | getHistory(): Array> | undefined {
37 | return this.history;
38 | }
39 |
40 | private validate(timestamp: number) {
41 | if (!this.valid) {
42 | throw new Error("Invalid state.");
43 | } else if (timestamp < this.timestamp) {
44 | console.error("Invalid timestamp.");
45 | this.valid = false;
46 | throw new Error(
47 | `Update with timestamp (${timestamp}) is lower than the last timestamp (${
48 | this.timestamp
49 | }). Invalid state.`
50 | );
51 | }
52 | }
53 |
54 | private process({ value: _value, diff }: Update) {
55 | // Count value starts as a NaN
56 | const value = JSON.stringify(_value);
57 | const count = this.state.has(value) ? (this.state.get(value) as number + diff) : diff;
58 |
59 | if (count <= 0) {
60 | this.state.delete(value);
61 | } else {
62 | this.state.set(value, count);
63 | }
64 |
65 |
66 | if (this.history) {
67 | this.history.push({ value: _value, diff });
68 | }
69 | }
70 |
71 | update(updates: Array>, timestamp: number) {
72 | if (updates.length > 0) {
73 | this.validate(timestamp);
74 | this.timestamp = timestamp;
75 | updates.forEach(this.process.bind(this));
76 | }
77 | }
78 | };
--------------------------------------------------------------------------------
/connection-examples/deno/subscribe.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 | import { Update } from "./state";
3 | import State, { Update } from "./state.ts";
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | hostname: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true,
12 | });
13 |
14 | interface CounterSum {
15 | sum: number;
16 | }
17 |
18 | const main = async ({ response }: { response: any }) => {
19 | try {
20 | await client.connect()
21 |
22 | await client.queryObject('BEGIN');
23 | await client.queryObject('DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);');
24 |
25 | while (true) {
26 | let buffer: Array> = [];
27 | const state = new State();
28 |
29 | // Loop indefinitely
30 | while (true) {
31 | const { rows } = await client.queryObject('FETCH ALL c');
32 | rows.forEach(row => {
33 | // Map row fields
34 | const {
35 | mz_timestamp: ts,
36 | mz_progressed: progress,
37 | mz_diff: diff,
38 | sum,
39 | } = row;
40 |
41 | // When a progress is detected, get the last values
42 | if (progress) {
43 | if (buffer.length > 0) {
44 | try {
45 | state.update(buffer, ts);
46 | } catch (err) {
47 | console.error(err);
48 | } finally {
49 | console.log("State: ", state.getState());
50 | buffer.splice(0, buffer.length);
51 | }
52 | }
53 | } else {
54 | buffer.push({ value: { sum }, diff });
55 | }
56 | });
57 | }
58 | }
59 | } catch (err) {
60 | console.error(err.toString())
61 | } finally {
62 | await client.end()
63 | }
64 | }
65 |
66 | export { main }
67 |
68 | // Call the main function
69 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/deno/view.ts:
--------------------------------------------------------------------------------
1 | import { Client } from "https://deno.land/x/postgres/mod.ts";
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | database: "materialize",
6 | password: "APP_SPECIFIC_PASSWORD",
7 | hostname: "MATERIALIZE_HOST",
8 | port: 6875,
9 | ssl: true,
10 | })
11 |
12 | const main = async ({ response }: { response: any }) => {
13 | try {
14 | await client.connect()
15 |
16 | await client.queryObject(
17 | `CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
18 | SELECT sum(counter)
19 | FROM counter;`
20 | );
21 |
22 | const result = await client.queryObject("SHOW VIEWS")
23 | console.log(result.rows)
24 | } catch (err) {
25 | console.error(err.toString())
26 | } finally {
27 | await client.end()
28 | }
29 | }
30 | export { main }
31 |
32 | // Call the main function
33 | main({ response: {} })
--------------------------------------------------------------------------------
/connection-examples/fastapi/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + FastAPI Examples
2 |
3 | - [Basic FastAPI example](https://github.com/chuck-alt-delete/fastapi_psycopg3_example)
4 | - [A FastAPI example that uses Server Sent Events (SSE) powered by Materialize `SUBSCRIBE` queries](https://github.com/chuck-alt-delete/mz-auction-house)
5 |
--------------------------------------------------------------------------------
/connection-examples/go/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Go Example
2 |
3 | You connect to Materialize the same way you [connect to PostgreSQL with `pgx`](https://pkg.go.dev/github.com/jackc/pgx#ConnConfig).
4 |
5 |
6 | Add pgx to your Go modules:
7 |
8 | ```
9 | go get github.com/jackc/pgx/v4
10 | ```
11 |
12 | Run the example:
13 |
14 | ```
15 | go run connection.go
16 |
17 | # Output:
18 | # Connected to Materialize!
19 | ```
20 |
21 | ### Examples:
22 |
23 | - [Connection](./connection.go)
24 | - [Stream](./subscribe.go)
25 | - [Query](./query.go)
26 | - [Insert data into tables](./insert.go)
27 | - [Manage sources](./source.go)
28 | - [Manage Views](./view.go)
--------------------------------------------------------------------------------
/connection-examples/go/connection.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | )
9 |
10 | func main() {
11 |
12 | ctx := context.Background()
13 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?ssl_mode=require"
14 |
15 | conn, err := pgx.Connect(ctx, connStr)
16 | if err != nil {
17 | fmt.Println(err)
18 | } else {
19 | fmt.Println("Connected to Materialize!")
20 | }
21 | defer conn.Close(context.Background())
22 | }
23 |
--------------------------------------------------------------------------------
/connection-examples/go/go.mod:
--------------------------------------------------------------------------------
1 | module example
2 |
3 | go 1.18
4 |
5 | require github.com/jackc/pgx/v4 v4.16.1
6 |
7 | require (
8 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect
9 | github.com/jackc/pgconn v1.12.1 // indirect
10 | github.com/jackc/pgio v1.0.0 // indirect
11 | github.com/jackc/pgpassfile v1.0.0 // indirect
12 | github.com/jackc/pgproto3/v2 v2.3.0 // indirect
13 | github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
14 | github.com/jackc/pgtype v1.11.0 // indirect
15 | golang.org/x/crypto v0.1.0 // indirect
16 | golang.org/x/text v0.4.0 // indirect
17 | )
18 |
--------------------------------------------------------------------------------
/connection-examples/go/insert.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | )
9 |
10 | func main() {
11 |
12 | ctx := context.Background()
13 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
14 |
15 | conn, err := pgx.Connect(ctx, connStr)
16 | if err != nil {
17 | fmt.Println(err)
18 | } else {
19 | fmt.Println("Connected to Materialize!")
20 | }
21 |
22 | insertSQL := "INSERT INTO countries (code, name) VALUES ($1, $2)"
23 |
24 | _, err = conn.Exec(ctx, insertSQL, "GH", "GHANA")
25 | if err != nil {
26 | fmt.Println(err)
27 | }
28 |
29 | defer conn.Close(context.Background())
30 | }
31 |
--------------------------------------------------------------------------------
/connection-examples/go/query.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | )
9 |
10 | func main() {
11 |
12 | ctx := context.Background()
13 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
14 |
15 | conn, err := pgx.Connect(ctx, connStr)
16 | if err != nil {
17 | fmt.Println(err)
18 | } else {
19 | fmt.Println("Connected to Materialize!")
20 | }
21 |
22 | rows, err := conn.Query(ctx, "SELECT * FROM countries")
23 | if err != nil {
24 | fmt.Println(err)
25 | }
26 |
27 | type result struct {
28 | Code string
29 | Name string
30 | }
31 |
32 | for rows.Next() {
33 | var r result
34 | err = rows.Scan(&r.Code, &r.Name)
35 | if err != nil {
36 | fmt.Println(err)
37 | }
38 | // operate on result
39 | fmt.Printf("%s %s\n", r.Code, r.Name)
40 | }
41 |
42 | defer conn.Close(context.Background())
43 | }
44 |
--------------------------------------------------------------------------------
/connection-examples/go/source.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | )
9 |
10 | func main() {
11 |
12 | ctx := context.Background()
13 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
14 |
15 | conn, err := pgx.Connect(ctx, connStr)
16 | if err != nil {
17 | fmt.Println(err)
18 | } else {
19 | fmt.Println("Connected to Materialize!")
20 | }
21 |
22 | createSourceSQL := `CREATE SOURCE IF NOT EXISTS counter
23 | FROM LOAD GENERATOR COUNTER
24 | (TICK INTERVAL '500ms')
25 | WITH (SIZE = '3xsmall');`
26 |
27 | _, err = conn.Exec(ctx, createSourceSQL)
28 | if err != nil {
29 | fmt.Println(err)
30 | }
31 |
32 | defer conn.Close(context.Background())
33 | }
34 |
--------------------------------------------------------------------------------
/connection-examples/go/state.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | )
8 |
9 | type SingleValue struct {
10 | Value interface{} `json:"value"`
11 | }
12 |
13 | type Update struct {
14 | value interface{} `json:"value"`
15 | diff int64 `json:"diff"`
16 | }
17 |
18 | type State struct {
19 | state map[string]int64
20 | timestamp int64
21 | valid bool
22 | history []Update
23 | }
24 |
25 | func NewState(collectHistory bool) *State {
26 | state := make(map[string]int64)
27 | history := []Update{}
28 |
29 | return &State{
30 | state: state,
31 | timestamp: 0,
32 | valid: true,
33 | history: history,
34 | }
35 | }
36 |
37 | func (s *State) getState() []interface{} {
38 | list := []interface{}{}
39 |
40 | for key, value := range s.state {
41 | clone := make(map[string]interface{})
42 | err := json.Unmarshal([]byte(key), &clone)
43 | if err != nil {
44 | fmt.Println(err);
45 | continue
46 | }
47 |
48 | for i := int64(0); i < value; i++ {
49 | list = append(list, clone["value"])
50 | }
51 | }
52 |
53 | return list
54 | }
55 |
56 | func (s *State) getHistory() []Update {
57 | return s.history
58 | }
59 |
60 | func (s *State) validate(timestamp int64) error {
61 | if !s.valid {
62 | return errors.New("Invalid state.")
63 | } else if timestamp < s.timestamp {
64 | s.valid = false
65 | return errors.New("Invalid timestamp.")
66 | }
67 | return nil
68 | }
69 |
70 | func (s *State) process(update Update) {
71 | var sv = SingleValue { Value: update.value }
72 | value, err := json.Marshal(sv)
73 | // fmt.Println(sv, value);
74 | if err != nil {
75 | fmt.Println(err);
76 | return
77 | }
78 |
79 | count, ok := s.state[string(value)]
80 | if !ok {
81 | count = 0
82 | }
83 |
84 | count += update.diff
85 |
86 | if count <= 0 {
87 | delete(s.state, string(value))
88 | } else {
89 | s.state[string(value)] = count
90 | }
91 |
92 | if s.history != nil {
93 | s.history = append(s.history, update)
94 | }
95 | }
96 |
97 | func (s *State) Update(updates []Update, timestamp int64) error {
98 | if len(updates) > 0 {
99 | err := s.validate(timestamp)
100 | if err != nil {
101 | return err
102 | }
103 | s.timestamp = timestamp
104 | for _, update := range updates {
105 | s.process(update)
106 | }
107 | }
108 | return nil
109 | }
110 |
--------------------------------------------------------------------------------
/connection-examples/go/subscribe.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | "database/sql"
9 | )
10 |
11 | type Sum struct {
12 | sum float64 `json:"sum"`
13 | }
14 |
15 | func main() {
16 |
17 | ctx := context.Background()
18 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
19 |
20 | conn, err := pgx.Connect(ctx, connStr)
21 | if err != nil {
22 | fmt.Println(err)
23 | } else {
24 | fmt.Println("Connected to Materialize!")
25 | }
26 |
27 | tx, err := conn.Begin(ctx)
28 | if err != nil {
29 | fmt.Println(err)
30 | return
31 | }
32 | defer tx.Rollback(ctx)
33 |
34 | _, err = tx.Exec(ctx, "DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);")
35 | if err != nil {
36 | fmt.Println(err)
37 | return
38 | }
39 |
40 | // Define a struct to hold the data returned from the query
41 | type subscribeResult struct {
42 | MzTimestamp int64
43 | MzProgress bool
44 | MzDiff sql.NullInt64
45 | Sum sql.NullFloat64
46 | }
47 |
48 | state := NewState(false)
49 | var buffer []Update
50 | for {
51 | rows, err := tx.Query(ctx, "FETCH ALL c")
52 | if err != nil {
53 | fmt.Println(err)
54 | tx.Rollback(ctx)
55 | return
56 | }
57 |
58 | for rows.Next() {
59 | var r subscribeResult
60 |
61 | if err := rows.Scan(&r.MzTimestamp, &r.MzProgress, &r.MzDiff, &r.Sum); err != nil {
62 | fmt.Println(err)
63 | tx.Rollback(ctx)
64 | return
65 | }
66 |
67 | if r.MzProgress {
68 | state.Update(buffer, r.MzTimestamp)
69 | fmt.Println(state.getState())
70 |
71 | // Clean buffer
72 | buffer = []Update{}
73 | } else {
74 | var update = Update{ diff: r.MzDiff.Int64, value: map[string]float64{"sum": r.Sum.Float64}}
75 | buffer = append(buffer, update)
76 | }
77 | }
78 | }
79 |
80 | err = tx.Commit(ctx)
81 | if err != nil {
82 | fmt.Println(err)
83 | }
84 |
85 | defer conn.Close(context.Background())
86 | }
87 |
--------------------------------------------------------------------------------
/connection-examples/go/view.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/jackc/pgx/v4"
8 | )
9 |
10 | func main() {
11 |
12 | ctx := context.Background()
13 | connStr := "postgres://MATERIALIZE_USERNAME:APP_SPECIFIC_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
14 |
15 | conn, err := pgx.Connect(ctx, connStr)
16 | if err != nil {
17 | fmt.Println(err)
18 | } else {
19 | fmt.Println("Connected to Materialize!")
20 | }
21 |
22 | createViewSQL := `CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
23 | SELECT sum(counter)
24 | FROM counter;`
25 | _, err = conn.Exec(ctx, createViewSQL)
26 | if err != nil {
27 | fmt.Println(err)
28 | }
29 |
30 | defer conn.Close(context.Background())
31 | }
32 |
--------------------------------------------------------------------------------
/connection-examples/java/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Java Example
2 |
3 | You connect to Materialize the same way you connect to [PostgreSQL with the JDBC driver](https://jdbc.postgresql.org/documentation/head/connect.html).
4 |
5 | ### Examples:
6 |
7 | - [Connection](./connection.java)
8 | - [Stream](./subscribe.java)
9 | - [Query](./query.java)
10 | - [Insert data into tables](./insert.java)
11 | - [Manage sources](./source.java)
12 | - [Manage Views](./view.java)
--------------------------------------------------------------------------------
/connection-examples/java/connection.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 |
6 | public class App {
7 |
8 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
9 | private final String user = "MATERIALIZE_USERNAME";
10 | private final String password = "MATERIALIZE_PASSWORD";
11 |
12 | /**
13 | * Connect to Materialize
14 | *
15 | * @return a Connection object
16 | */
17 | public Connection connect() {
18 | Properties props = new Properties();
19 | props.setProperty("user", user);
20 | props.setProperty("password", password);
21 | props.setProperty("ssl","true");
22 | Connection conn = null;
23 | try {
24 | conn = DriverManager.getConnection(url, props);
25 | System.out.println("Connected to Materialize successfully!");
26 | } catch (SQLException e) {
27 | System.out.println(e.getMessage());
28 | }
29 |
30 | return conn;
31 | }
32 |
33 | public static void main(String[] args) {
34 | App app = new App();
35 | app.connect();
36 | }
37 | }
--------------------------------------------------------------------------------
/connection-examples/java/insert.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 | import java.sql.ResultSet;
6 | import java.sql.Statement;
7 | import java.sql.PreparedStatement;
8 |
9 | public class App {
10 |
11 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
12 | private final String user = "MATERIALIZE_USERNAME";
13 | private final String password = "MATERIALIZE_PASSWORD";
14 |
15 | /**
16 | * Connect to Materialize
17 | *
18 | * @return a Connection object
19 | */
20 | public Connection connect() throws SQLException {
21 | Properties props = new Properties();
22 | props.setProperty("user", user);
23 | props.setProperty("password", password);
24 | props.setProperty("ssl","true");
25 |
26 | return DriverManager.getConnection(url, props);
27 |
28 | }
29 |
30 | public void insert() {
31 |
32 | try (Connection conn = connect()) {
33 | String code = "GH";
34 | String name = "Ghana";
35 | PreparedStatement st = conn.prepareStatement("INSERT INTO countries(code, name) VALUES(?, ?)");
36 | st.setString(1, code);
37 | st.setString(2, name);
38 | int rowsDeleted = st.executeUpdate();
39 | System.out.println(rowsDeleted + " rows inserted.");
40 | st.close();
41 | } catch (SQLException ex) {
42 | System.out.println(ex.getMessage());
43 | }
44 | }
45 |
46 | public static void main(String[] args) {
47 | App app = new App();
48 | app.insert();
49 | }
50 | }
--------------------------------------------------------------------------------
/connection-examples/java/query.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 | import java.sql.ResultSet;
6 | import java.sql.Statement;
7 |
8 | public class App {
9 |
10 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
11 | private final String user = "MATERIALIZE_USERNAME";
12 | private final String password = "MATERIALIZE_PASSWORD";
13 |
14 | /**
15 | * Connect to Materialize
16 | *
17 | * @return a Connection object
18 | */
19 | public Connection connect() throws SQLException {
20 | Properties props = new Properties();
21 | props.setProperty("user", user);
22 | props.setProperty("password", password);
23 | props.setProperty("ssl","true");
24 |
25 | return DriverManager.getConnection(url, props);
26 |
27 | }
28 |
29 | public void query() {
30 |
31 | String SQL = "SELECT * FROM my_view";
32 |
33 | try (Connection conn = connect();
34 | Statement stmt = conn.createStatement();
35 | ResultSet rs = stmt.executeQuery(SQL)) {
36 | while (rs.next()) {
37 | System.out.println(rs.getString("my_column"));
38 | }
39 | } catch (SQLException ex) {
40 | System.out.println(ex.getMessage());
41 | }
42 | }
43 |
44 | public static void main(String[] args) {
45 | App app = new App();
46 | app.query();
47 | }
48 | }
--------------------------------------------------------------------------------
/connection-examples/java/source.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 | import java.sql.ResultSet;
6 | import java.sql.Statement;
7 | import java.sql.PreparedStatement;
8 |
9 | public class App {
10 |
11 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
12 | private final String user = "MATERIALIZE_USERNAME";
13 | private final String password = "MATERIALIZE_PASSWORD";
14 |
15 | /**
16 | * Connect to Materialize
17 | *
18 | * @return a Connection object
19 | */
20 | public Connection connect() throws SQLException {
21 | Properties props = new Properties();
22 | props.setProperty("user", user);
23 | props.setProperty("password", password);
24 | props.setProperty("ssl","true");
25 |
26 | return DriverManager.getConnection(url, props);
27 |
28 | }
29 |
30 | public void source() {
31 |
32 | String SQL = "CREATE SOURCE counter FROM "
33 | + "LOAD GENERATOR COUNTER";
34 |
35 | try (Connection conn = connect()) {
36 | Statement st = conn.createStatement();
37 | st.execute(SQL);
38 | System.out.println("Source created.");
39 | st.close();
40 | } catch (SQLException ex) {
41 | System.out.println(ex.getMessage());
42 | }
43 | }
44 |
45 | public static void main(String[] args) {
46 | App app = new App();
47 | app.source();
48 | }
49 | }
--------------------------------------------------------------------------------
/connection-examples/java/subscribe.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 | import java.sql.ResultSet;
6 | import java.sql.Statement;
7 |
8 | public class App {
9 |
10 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
11 | private final String user = "MATERIALIZE_USERNAME";
12 | private final String password = "MATERIALIZE_PASSWORD";
13 |
14 | /**
15 | * Connect to Materialize
16 | *
17 | * @return a Connection object
18 | */
19 | public Connection connect() throws SQLException {
20 | Properties props = new Properties();
21 | props.setProperty("user", user);
22 | props.setProperty("password", password);
23 | props.setProperty("ssl","true");
24 |
25 | return DriverManager.getConnection(url, props);
26 |
27 | }
28 |
29 | public void subscribe() {
30 | try (Connection conn = connect()) {
31 |
32 | Statement stmt = conn.createStatement();
33 | stmt.execute("BEGIN");
34 | stmt.execute("DECLARE c CURSOR FOR SUBSCRIBE my_view");
35 | while (true) {
36 | ResultSet rs = stmt.executeQuery("FETCH ALL c");
37 | if(rs.next()) {
38 | System.out.println(rs.getString(1) + " " + rs.getString(2) + " " + rs.getString(3));
39 | }
40 | }
41 | } catch (SQLException ex) {
42 | System.out.println(ex.getMessage());
43 | }
44 | }
45 |
46 | public static void main(String[] args) {
47 | App app = new App();
48 | app.subscribe();
49 | }
50 | }
--------------------------------------------------------------------------------
/connection-examples/java/view.java:
--------------------------------------------------------------------------------
1 | import java.sql.Connection;
2 | import java.sql.DriverManager;
3 | import java.sql.SQLException;
4 | import java.util.Properties;
5 | import java.sql.ResultSet;
6 | import java.sql.Statement;
7 | import java.sql.PreparedStatement;
8 |
9 | public class App {
10 |
11 | private final String url = "jdbc:postgresql://MATERIALIZE_HOST:6875/materialize";
12 | private final String user = "MATERIALIZE_USERNAME";
13 | private final String password = "MATERIALIZE_PASSWORD";
14 |
15 | /**
16 | * Connect to Materialize
17 | *
18 | * @return a Connection object
19 | */
20 | public Connection connect() throws SQLException {
21 | Properties props = new Properties();
22 | props.setProperty("user", user);
23 | props.setProperty("password", password);
24 | props.setProperty("ssl","true");
25 |
26 | return DriverManager.getConnection(url, props);
27 |
28 | }
29 |
30 | public void view() {
31 | String SQL = "CREATE VIEW market_orders_2 AS "
32 | + "SELECT "
33 | + " val->>'symbol' AS symbol, "
34 | + " (val->'bid_price')::float AS bid_price "
35 | + "FROM (SELECT text::jsonb AS val FROM market_orders_raw_2)";
36 |
37 | try (Connection conn = connect()) {
38 | Statement st = conn.createStatement();
39 | st.execute(SQL);
40 | System.out.println("View created.");
41 | st.close();
42 | } catch (SQLException ex) {
43 | System.out.println(ex.getMessage());
44 | }
45 | }
46 |
47 | public static void main(String[] args) {
48 | App app = new App();
49 | app.view();
50 | }
51 | }
--------------------------------------------------------------------------------
/connection-examples/lua/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Lua Example
2 |
3 | This example uses [luasql](https://keplerproject.github.io/luasql/index.html)
4 | and was tested on Lua `v5.4`
5 |
6 | Install the Postgres driver using LuaRocks:
7 |
8 | `luarocks install luasql-postgres`
9 |
10 | There are a few helper functions in `utils.lua` for iterating over rows
11 | and dumpings a table to stdout.
12 |
13 | ### Examples:
14 |
15 | - [Connection](./connection.lua)
16 | - [Stream](./subscribe.lua)
17 | - [Query](./query.lua)
18 | - [Insert data into tables](./insert.lua)
19 | - [Manage sources](./source.lua)
20 | - [Manage Views](./view.lua)
21 |
--------------------------------------------------------------------------------
/connection-examples/lua/connection.lua:
--------------------------------------------------------------------------------
1 | local driver = require "luasql.postgres"
2 | local env = assert (driver.postgres())
3 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
4 |
5 |
--------------------------------------------------------------------------------
/connection-examples/lua/insert.lua:
--------------------------------------------------------------------------------
1 | local utils = require("utils")
2 | local driver = require "luasql.postgres"
3 | local env = assert (driver.postgres())
4 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
5 |
6 | assert (con:execute([[
7 | CREATE TABLE IF NOT EXISTS countries (name TEXT, code TEXT);
8 | ]]))
9 |
10 | local list = {
11 | { name="United States", code="US", },
12 | { name="Canada", code="CA", },
13 | { name="Mexico", code="MX", },
14 | { name="Germany", code="DE", },
15 | }
16 |
17 | for _, p in pairs (list) do
18 | local _ = assert (con:execute(string.format([[
19 | INSERT INTO countries
20 | VALUES ('%s', '%s')]], p.name, p.code)
21 | ))
22 | end
23 |
24 | con:commit()
25 |
26 | for count in utils.rows(con, 'SELECT COUNT(*) FROM countries;') do
27 | print(string.format("%s", count))
28 | end
29 |
--------------------------------------------------------------------------------
/connection-examples/lua/query.lua:
--------------------------------------------------------------------------------
1 | local driver = require "luasql.postgres"
2 | local env = assert (driver.postgres())
3 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
4 |
5 | local function rows (connection, sql_statement)
6 | local cursor = assert (connection:execute (sql_statement))
7 | return function ()
8 | return cursor:fetch()
9 | end
10 | end
11 |
12 | for symbol, avg in rows(con, "SELECT * FROM avg_bid") do
13 | print(string.format("%s:%s", symbol, avg))
14 | end
15 |
16 | con:close()
17 | env:close()
18 |
--------------------------------------------------------------------------------
/connection-examples/lua/source.lua:
--------------------------------------------------------------------------------
1 | local dump = require('utils').dump
2 | local driver = require "luasql.postgres"
3 | local env = assert (driver.postgres())
4 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
5 |
6 | con:execute[[
7 | CREATE SOURCE IF NOT EXISTS counter
8 | FROM LOAD GENERATOR COUNTER
9 | (TICK INTERVAL '500ms')
10 | WITH (SIZE = '3xsmall')
11 | ]]
12 |
13 | local cur = assert (con:execute"SHOW SOURCES")
14 | local row = cur:fetch({}, 'a')
15 | while row do
16 | print(dump(row))
17 | row = cur:fetch({}, 'a')
18 | end
19 |
20 | cur:close()
21 | con:close()
22 | env:close()
23 |
--------------------------------------------------------------------------------
/connection-examples/lua/state.lua:
--------------------------------------------------------------------------------
1 | local json = require "cjson"
2 | local State = {}
3 |
4 | function State:new(collectHistory)
5 | local obj = {
6 | state = {},
7 | timestamp = 0,
8 | valid = true,
9 | history = collectHistory and {} or nil
10 | };
11 |
12 | setmetatable(obj, self)
13 | self.__index = self
14 | return obj
15 | end
16 |
17 | function State:getState()
18 | local list = {}
19 |
20 | for key, value in pairs(self.state) do
21 | local clone = json.decode(key)['value']
22 | for i = 1, value do
23 | table.insert(list, clone)
24 | end
25 | end
26 |
27 | return list
28 | end
29 |
30 | function State:getHistory()
31 | return self.history
32 | end
33 |
34 | function State:validate(timestamp)
35 | if not self.valid then
36 | error("Invalid state.")
37 | elseif tonumber(timestamp) < self.timestamp then
38 | print("Invalid timestamp.")
39 | self.valid = false
40 | error(string.format("Update with timestamp (%d) is lower than the last timestamp (%d). Invalid state.", timestamp, self.timestamp))
41 | end
42 | end
43 |
44 | function State:process(update)
45 | local value = json.encode({ value = update.value })
46 | local count = self.state[value] or 0
47 | count = count + update.diff
48 |
49 | if count <= 0 then
50 | self.state[value] = nil
51 | else
52 | self.state[value] = count
53 | end
54 |
55 | if self.history then
56 | table.insert(self.history, update)
57 | end
58 | end
59 |
60 | function State:update(updates, timestamp)
61 | if #updates > 0 then
62 | self:validate(timestamp)
63 | self.timestamp = timestamp
64 | for _, update in ipairs(updates) do
65 | self:process(update)
66 | end
67 | end
68 | end
69 |
70 | return State
71 |
--------------------------------------------------------------------------------
/connection-examples/lua/subscribe.lua:
--------------------------------------------------------------------------------
1 | local utils = require "utils"
2 | local driver = require "luasql.postgres"
3 | local env = assert (driver.postgres())
4 | local State = require("state")
5 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
6 | con:setautocommit(false)
7 |
8 | assert (con:execute("DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);"))
9 |
10 | while(true) do
11 | local buffer = {};
12 | local state = State:new(false)
13 |
14 | for mz_timestamp, mz_progressed, mz_diff, sum in utils.rows(con,"FETCH ALL c") do
15 | -- Map row fields
16 | local ts = mz_timestamp
17 | local progress = mz_progressed
18 | local diff = mz_diff
19 |
20 | -- When a progress is detected, get the last values
21 | if progress ~= 'f' then
22 | if updated then
23 | updated = false;
24 |
25 | -- Update state
26 | state:update(buffer, ts);
27 | buffer = {};
28 |
29 | -- Print state
30 | print("Sum: ", table.concat(state:getState(), ','));
31 | end
32 | else
33 | -- Update the state with the last data
34 | updated = true
35 | local update = {
36 | value = sum,
37 | diff = tonumber(diff)
38 | }
39 | table.insert(buffer, update);
40 | end
41 | end
42 | end
43 |
44 | con:commit()
45 | con:close()
46 |
47 | con:close()
48 | env:close()
49 |
--------------------------------------------------------------------------------
/connection-examples/lua/utils.lua:
--------------------------------------------------------------------------------
1 | local utils = {}
2 |
3 | local function rows (connection, sql_statement)
4 | local cursor = assert (connection:execute (sql_statement))
5 | return function ()
6 | return cursor:fetch()
7 | end
8 | end
9 |
10 | -- https://stackoverflow.com/questions/9168058/how-to-dump-a-table-to-console
11 | local function dump(o)
12 | if type(o) == 'table' then
13 | local s = '{ '
14 | for k,v in pairs(o) do
15 | if type(k) ~= 'number' then k = '"'..k..'"' end
16 | s = s .. '['..k..'] = ' .. dump(v) .. ','
17 | end
18 | return s .. '} '
19 | else
20 | return tostring(o)
21 | end
22 | end
23 |
24 | utils.rows = rows
25 | utils.dump = dump
26 |
27 | return utils
28 |
--------------------------------------------------------------------------------
/connection-examples/lua/views.lua:
--------------------------------------------------------------------------------
1 | local dump = require('utils').dump
2 | local driver = require "luasql.postgres"
3 | local env = assert (driver.postgres())
4 | local con = assert(env:connect("postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"))
5 |
6 | con:execute[[
7 | CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
8 | SELECT sum(counter)
9 | FROM counter
10 | ]]
11 |
12 | local cur = assert (con:execute"SHOW VIEWS")
13 | local row = cur:fetch({}, 'a')
14 | while row do
15 | print(dump(row))
16 | row = cur:fetch({}, 'a')
17 | end
18 |
19 | cur:close()
20 | con:close()
21 | env:close()
22 |
--------------------------------------------------------------------------------
/connection-examples/nodejs/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + NodeJS Example
2 |
3 | You connect to Materialize the same way you connect to [PostgreSQL with `node-postgres`](https://node-postgres.com/features/connecting).
4 |
5 | Install the dependencies:
6 |
7 | ```
8 | npm install pg
9 | ```
10 |
11 | ### Examples:
12 |
13 | - [Connection](./connection.js)
14 | - [Stream](./subscribe.js)
15 | - [Query](./query.js)
16 | - [Insert data into tables](./insert.js)
17 | - [Manage sources](./source.js)
18 | - [Manage Views](./view.js)
--------------------------------------------------------------------------------
/connection-examples/nodejs/connection.js:
--------------------------------------------------------------------------------
1 | const { Client } = require('pg');
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | password: "MATERIALIZE_PASSWORD",
6 | host: "MATERIALIZE_HOST",
7 | port: 6875,
8 | database: 'materialize',
9 | ssl: true
10 | });
11 |
12 | /*
13 | Alternatively, you can use the following syntax:
14 | const client = new Client('postgres://materialize@localhost:6875/materialize');
15 | */
16 |
17 | async function main() {
18 | await client.connect();
19 | /* Work with Materialize */
20 | }
21 |
22 | main();
23 |
--------------------------------------------------------------------------------
/connection-examples/nodejs/insert.js:
--------------------------------------------------------------------------------
1 | const { Client } = require('pg');
2 |
3 | const client = new Client({
4 | user: MATERIALIZE_USERNAME,
5 | password: MATERIALIZE_PASSWORD,
6 | host: MATERIALIZE_HOST,
7 | port: 6875,
8 | database: 'materialize',
9 | ssl: true
10 | });
11 |
12 | const text = 'INSERT INTO countries(code, name) VALUES($1, $2);';
13 | const values = ['GH', 'GHANA'];
14 |
15 | async function main() {
16 | await client.connect();
17 | const res = await client.query(text, values);
18 | console.log(res);
19 | }
20 |
21 | main();
--------------------------------------------------------------------------------
/connection-examples/nodejs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "pg": "^8.7.3"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/connection-examples/nodejs/query.js:
--------------------------------------------------------------------------------
1 | const { Client } = require('pg');
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | password: "MATERIALIZE_PASSWORD",
6 | host: "MATERIALIZE_HOST",
7 | port: 6875,
8 | database: 'materialize',
9 | ssl: true
10 | });
11 |
12 | async function main() {
13 | await client.connect();
14 | const res = await client.query('SELECT * FROM counter_sum');
15 | console.log(res.rows);
16 | };
17 |
18 | main();
--------------------------------------------------------------------------------
/connection-examples/nodejs/source.js:
--------------------------------------------------------------------------------
1 | const { Client } = require('pg');
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | password: "MATERIALIZE_PASSWORD",
6 | host: "MATERIALIZE_HOST",
7 | port: 6875,
8 | database: 'materialize',
9 | ssl: true
10 | });
11 |
12 | async function main() {
13 | await client.connect();
14 | const res = await client.query(
15 | `CREATE SOURCE IF NOT EXISTS counter
16 | FROM LOAD GENERATOR COUNTER
17 | (TICK INTERVAL '500ms')
18 | WITH (SIZE = '3xsmall');`
19 | );
20 | console.log(res);
21 | }
22 |
23 | main();
--------------------------------------------------------------------------------
/connection-examples/nodejs/state.js:
--------------------------------------------------------------------------------
1 | class State {
2 | #state;
3 | #timestamp;
4 | #valid;
5 | #history;
6 |
7 | constructor(collectHistory) {
8 | this.#state = new Map();
9 | this.#timestamp = 0;
10 | this.#valid = true;
11 | if (collectHistory) {
12 | this.#history = [];
13 | }
14 | }
15 |
16 | getState() {
17 | const list = new Array();
18 |
19 | Array.from(this.#state.entries()).forEach(([key, value]) => {
20 | const clone = JSON.parse(key);
21 | let i = 0;
22 | while (i< value) {
23 | list.push(clone);
24 | i++;
25 | };
26 | });
27 |
28 | return list;
29 | }
30 |
31 | getHistory() {
32 | return this.#history;
33 | }
34 |
35 | #validate(timestamp) {
36 | if (!this.#valid) {
37 | throw new Error("Invalid state.");
38 | } else if (timestamp < this.#timestamp) {
39 | console.error("Invalid timestamp.");
40 | this.#valid = false;
41 | throw new Error(
42 | `Update with timestamp (${timestamp}) is lower than the last timestamp (${
43 | this.#timestamp
44 | }). Invalid state.`
45 | );
46 | }
47 | }
48 |
49 | #process({ value: _value, diff }) {
50 | // Count value starts as a NaN
51 | const value = JSON.stringify(_value);
52 | const count = this.#state.has(value) ? (this.#state.get(value) + diff) : diff;
53 |
54 | if (count <= 0) {
55 | this.#state.delete(value);
56 | } else {
57 | this.#state.set(value, count);
58 | }
59 |
60 |
61 | if (this.#history) {
62 | this.#history.push({ value: _value, diff });
63 | }
64 | }
65 |
66 | update(updates, timestamp) {
67 | if (updates.length > 0) {
68 | this.#validate(timestamp);
69 | this.#timestamp = timestamp;
70 | updates.forEach(this.#process.bind(this));
71 | }
72 | }
73 | };
74 |
75 | module.exports = State;
--------------------------------------------------------------------------------
/connection-examples/nodejs/subscribe.js:
--------------------------------------------------------------------------------
1 | const { Client } = require("pg");
2 | const State = require("./state");
3 |
4 | async function main() {
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | password: "MATERIALIZE_PASSWORD",
8 | host: "MATERIALIZE_HOST",
9 | port: 6875,
10 | database: 'materialize',
11 | ssl: true
12 | });
13 | await client.connect();
14 |
15 | await client.query("BEGIN");
16 | await client.query("DECLARE c CURSOR FOR SUBSCRIBE counter_sum WITH (PROGRESS)");
17 |
18 | let buffer = [];
19 | const state = new State();
20 |
21 | // Loop indefinitely
22 | while (true) {
23 | const { rows } = await client.query('FETCH ALL c');
24 | rows.forEach(row => {
25 | // Map row fields
26 | const {
27 | mz_timestamp: ts,
28 | mz_progressed: progress,
29 | mz_diff: diff,
30 | sum,
31 | } = row;
32 |
33 | // When a progress is detected, get the last values
34 | if (progress) {
35 | if (buffer.length > 0) {
36 | try {
37 | state.update(buffer, ts);
38 | } catch (err) {
39 | console.error(err);
40 | } finally {
41 | console.log("State: ", state.getState());
42 | buffer.splice(0, buffer.length);
43 | }
44 | }
45 | } else {
46 | buffer.push({ value: { sum }, diff });
47 | }
48 | });
49 | }
50 | }
51 |
52 | main();
--------------------------------------------------------------------------------
/connection-examples/nodejs/view.js:
--------------------------------------------------------------------------------
1 | const { Client } = require("pg");
2 |
3 | const client = new Client({
4 | user: "MATERIALIZE_USERNAME",
5 | password: "MATERIALIZE_PASSWORD",
6 | host: "MATERIALIZE_HOST",
7 | port: 6875,
8 | database: "materialize",
9 | ssl: true,
10 | });
11 |
12 | async function main() {
13 | await client.connect();
14 | const res = await client.query(
15 | `CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
16 | SELECT sum(counter)
17 | FROM counter;`
18 | );
19 | console.log(res);
20 | }
21 |
22 | main();
23 |
--------------------------------------------------------------------------------
/connection-examples/php/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + PHP Example
2 |
3 | You connect to Materialize the same way you [connect to PostgreSQL with `PDO_PGSQL`](https://www.php.net/manual/en/ref.pdo-pgsql.connection.php).
4 |
5 | ### Examples:
6 |
7 | - [Connection](./connection.php)
8 | - [Stream](./subscribe.php)
9 | - [Query](./query.php)
10 | - [Insert data into tables](./insert.php)
11 | - [Manage sources](./source.php)
12 | - [Manage Views](./views.php)
--------------------------------------------------------------------------------
/connection-examples/php/connection.php:
--------------------------------------------------------------------------------
1 | PDO::ERRMODE_EXCEPTION]
14 | );
15 | } catch (PDOException $e) {
16 | die($e->getMessage());
17 | }
18 | }
19 |
20 | $connection = connect('MATERIALIZE_HOST', 6875, 'materialize', 'MATERIALIZE_USERNAME', 'MATERIALIZE_PASSWORD');
21 |
22 | echo 'Connected to Materialize!';
23 |
--------------------------------------------------------------------------------
/connection-examples/php/insert.php:
--------------------------------------------------------------------------------
1 | prepare($sql);
7 | $statement->execute(['United States', 'US']);
8 | $statement->execute(['Canada', 'CA']);
9 | $statement->execute(['Mexico', 'MX']);
10 | $statement->execute(['Germany', 'DE']);
11 |
12 | $countStmt = "SELECT COUNT(*) FROM countries";
13 | $count = $connection->query($countStmt);
14 | while (($row = $count->fetch(PDO::FETCH_ASSOC)) !== false) {
15 | var_dump($row);
16 | }
--------------------------------------------------------------------------------
/connection-examples/php/query.php:
--------------------------------------------------------------------------------
1 | query($sql);
7 |
8 | while (($row = $statement->fetch(PDO::FETCH_ASSOC)) !== false) {
9 | var_dump($row);
10 | }
--------------------------------------------------------------------------------
/connection-examples/php/source.php:
--------------------------------------------------------------------------------
1 | prepare($sql);
11 | $statement->execute();
12 |
13 | $sources = "SHOW SOURCES";
14 | $statement = $connection->query($sources);
15 | $result = $statement->fetchAll(PDO::FETCH_ASSOC);
16 | var_dump($result);
--------------------------------------------------------------------------------
/connection-examples/php/state.php:
--------------------------------------------------------------------------------
1 | value = $value;
9 | $this->diff = $diff;
10 | }
11 |
12 | public function getValue() {
13 | return $this->value;
14 | }
15 |
16 | public function getDiff() {
17 | return $this->diff;
18 | }
19 | }
20 |
21 | class State {
22 | private $state;
23 | private $timestamp;
24 | private $valid;
25 | private $history;
26 |
27 | public function __construct($collectHistory = false) {
28 | $this->state = array();
29 | $this->timestamp = 0;
30 | $this->valid = true;
31 |
32 | // Define $history as a local variable
33 | $this->history = null;
34 | if ($collectHistory) {
35 | $this->history = array();
36 | }
37 | }
38 |
39 | public function get_history() {
40 | return $this->history;
41 | }
42 |
43 | public function get_state() {
44 | $list = array();
45 |
46 | foreach ($this->state as $key => $value) {
47 | $clone = json_decode($key);
48 | $i = 0;
49 | while ($i < $value) {
50 | $list[] = $clone;
51 | $i++;
52 | }
53 | }
54 |
55 | return $list;
56 | }
57 |
58 | public function validate($timestamp) {
59 | if (!$this->valid) {
60 | throw new Exception("Invalid state.");
61 | } elseif ($timestamp < $this->timestamp) {
62 | echo "Invalid timestamp.";
63 | $this->valid = false;
64 | throw new Exception("Update with timestamp ($timestamp) is lower than the last timestamp ({$this->timestamp}). Invalid state.");
65 | }
66 | }
67 |
68 | public function process(Update $update) {
69 | $value = json_encode($update->getValue());
70 | $diff = $update->getDiff();
71 |
72 | if (isset($this->state[$value])) {
73 | $count = $this->state[$value] + $diff;
74 | } else {
75 | $count = $diff;
76 | }
77 |
78 | if ($count <= 0) {
79 | unset($this->state[$value]);
80 | } else {
81 | $this->state[$value] = $count;
82 | }
83 |
84 | // Add the update to the history array
85 | if ($this->history !== null) {
86 | $this->history[] = $update;
87 | }
88 | }
89 |
90 | public function update($updates, $timestamp) {
91 | if (count($updates) > 0) {
92 | $this->validate($timestamp);
93 | $this->timestamp = $timestamp;
94 |
95 | foreach ($updates as $update) {
96 | $this->process($update);
97 | }
98 | }
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/connection-examples/php/subscribe.php:
--------------------------------------------------------------------------------
1 | beginTransaction();
8 | // Declare a cursor
9 | $statement = $connection->prepare('DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);');
10 | // Execute the statement
11 | $statement->execute();
12 |
13 | // Create a new State object
14 | $state = new State(true);
15 |
16 | // Buffer updates
17 | $buffer = array();
18 |
19 | /* Fetch all of the remaining rows in the result set */
20 | while (true) {
21 | $subscribe = $connection->prepare('FETCH ALL c');
22 | $subscribe->execute();
23 | $result = $subscribe->fetchAll(PDO::FETCH_ASSOC);
24 |
25 | // Iterate over the results using a foreach loop
26 | foreach ($result as $row) {
27 | // Access the values of each column in the row using the column name
28 | $ts = $row['mz_timestamp'];
29 | $progressed = $row['mz_progressed'];
30 | $diff = $row['mz_diff'];
31 | $sum = $row['sum'];
32 |
33 | if ($progressed == 1) {
34 | $state->update($buffer, $ts);
35 | $buffer = array();
36 | print_r($state->get_state());
37 | } else {
38 | array_push($buffer, new Update(
39 | array('sum' => $sum),
40 | $diff
41 | ));
42 | }
43 | }
44 | }
--------------------------------------------------------------------------------
/connection-examples/php/views.php:
--------------------------------------------------------------------------------
1 | prepare($sql);
10 | $statement->execute();
11 |
12 | $views = "SHOW VIEWS";
13 | $statement = $connection->query($views);
14 | $result = $statement->fetchAll(PDO::FETCH_ASSOC);
15 | var_dump($result);
--------------------------------------------------------------------------------
/connection-examples/python/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Python Example
2 |
3 | You connect to Materialize the same way you [connect to PostgreSQL with `psycopg2`](https://www.psycopg.org/docs/usage.html).
4 |
5 | To install [`psycopg2`](https://pypi.org/project/psycopg2/) run:
6 |
7 | ```
8 | pip install psycopg2
9 | ```
10 |
11 | ### Examples:
12 |
13 | - [Connection](./connection.py)
14 | - [Stream](./subscribe.py)
15 | - [Query](./query.py)
16 | - [Insert data into tables](./insert.py)
17 | - [Manage sources](./source.py)
18 | - [Manage Views](./view.py)
19 |
20 | ### `pg8000` Example
21 |
22 | Alternatively, you can use [`pg8000`](https://pypi.org/project/pg8000/) to connect to Materialize.
23 |
24 | To install [`pg8000`](https://pypi.org/project/pg8000/) run:
25 |
26 | ```
27 | pip install pg8000
28 | ```
29 |
30 | - [pg8000](./pg8000)
--------------------------------------------------------------------------------
/connection-examples/python/connection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 |
6 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
7 | # Alternative syntax:
8 | # dsn = "postgresql://MATERIALIZE_USERNAME:MATERIALIZE_PASSWORD@MATERIALIZE_HOST:6875/materialize?sslmode=require"
9 | conn = psycopg2.connect(dsn)
10 |
--------------------------------------------------------------------------------
/connection-examples/python/insert.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 |
6 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
7 | conn = psycopg2.connect(dsn)
8 |
9 | cur = conn.cursor()
10 | cur.execute("INSERT INTO countries (name, code) VALUES (%s, %s)", ('United States', 'US'))
11 | cur.execute("INSERT INTO countries (name, code) VALUES (%s, %s)", ('Canada', 'CA'))
12 | cur.execute("INSERT INTO countries (name, code) VALUES (%s, %s)", ('Mexico', 'MX'))
13 | cur.execute("INSERT INTO countries (name, code) VALUES (%s, %s)", ('Germany', 'DE'))
14 | conn.commit()
15 | cur.close()
16 |
17 | with conn.cursor() as cur:
18 | cur.execute("SELECT COUNT(*) FROM countries;")
19 | print(cur.fetchone())
20 |
21 | conn.close()
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Python `pg8000` Example
2 |
3 | You can use [`pg8000`](https://pypi.org/project/pg8000/) to connect to Materialize.
4 |
5 | To install [`pg8000`](https://pypi.org/project/pg8000/) run:
6 |
7 | ```
8 | pip install pg8000
9 | ```
10 |
11 |
12 | ### Examples:
13 |
14 | - [Connection](./connection.py)
15 | - [Stream](./subscribe.py)
16 | - [Query](./query.py)
17 | - [Insert data into tables](./insert.py)
18 | - [Manage sources](./source.py)
19 | - [Manage Views](./view.py)
20 |
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/connection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 |
6 | ssl_context = ssl.create_default_context()
7 |
8 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
9 |
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/insert.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 |
6 | ssl_context = ssl.create_default_context()
7 |
8 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
9 |
10 | # Insert data
11 | print('Insert data')
12 | insert_query = "INSERT INTO countries (name, code) VALUES ('United States', 'US')"
13 | conn.run(insert_query)
14 | insert_query = "INSERT INTO countries (name, code) VALUES ('Canada', 'CA')"
15 | conn.run(insert_query)
16 | print('Select data')
17 | countries = conn.run("SELECT * FROM countries;")
18 | print(countries)
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/query.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 |
6 | ssl_context = ssl.create_default_context()
7 |
8 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
9 |
10 | countries = conn.run("SELECT * FROM countries;")
11 | print(countries)
12 |
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/source.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 |
6 | ssl_context = ssl.create_default_context()
7 |
8 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
9 |
10 | conn.run("""CREATE SOURCE IF NOT EXISTS counter
11 | FROM LOAD GENERATOR COUNTER
12 | (TICK INTERVAL '500ms')
13 | WITH (SIZE = '3xsmall');""")
14 | clusters = conn.run("SHOW SOURCES")
15 | print(clusters)
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/state.py:
--------------------------------------------------------------------------------
1 | from typing import TypeVar, List, Union
2 | import json
3 |
4 | T = TypeVar('T')
5 |
6 | class Update:
7 | def __init__(self, value: T, diff: int):
8 | self.value = value
9 | self.diff = diff
10 |
11 | class State:
12 | def __init__(self, collect_history: bool = False):
13 | self.state = {}
14 | self.timestamp = 0
15 | self.valid = True
16 | self.history = [] if collect_history else None
17 |
18 | def get_state(self) -> List[T]:
19 | result = []
20 | for key, value in self.state.items():
21 | clone = json.loads(key)
22 | for i in range(int(value)):
23 | result.append(clone)
24 | return result
25 |
26 | def get_history(self):
27 | return self.history
28 |
29 | def validate(self, timestamp: int):
30 | if not self.valid:
31 | raise Exception("Invalid state.")
32 | elif timestamp < self.timestamp:
33 | print("Invalid timestamp.")
34 | self.valid = False
35 | raise Exception(f"Update with timestamp ({timestamp}) is lower than the last timestamp ({self.timestamp}). Invalid state.")
36 |
37 | def process(self, update: Update):
38 | value = json.dumps(update['value'])
39 | count = self.state.get(value, 0) + update['diff']
40 |
41 | if count <= 0:
42 | del self.state[value]
43 | else:
44 | self.state[value] = count
45 |
46 | if self.history is not None:
47 | self.history.append(update)
48 |
49 | def update(self, updates: List[Update], timestamp: int):
50 | if len(updates) > 0:
51 | self.validate(timestamp)
52 | self.timestamp = timestamp
53 | for update in updates:
54 | self.process(update)
55 |
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/subscribe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 | from state import State
6 |
7 | ssl_context = ssl.create_default_context()
8 |
9 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
10 |
11 | conn.run("BEGIN")
12 | conn.run("DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);")
13 | state = State()
14 | updated = False
15 |
16 | # infinite loop to keep the cursor open
17 | while True:
18 | results = conn.run("FETCH ALL FROM c")
19 | for row in results:
20 | # Map row fields
21 | ts = row[0]
22 | progress = row[1]
23 | diff = row[2]
24 |
25 | # When a progress is detected, get the last values
26 | if progress:
27 | if updated:
28 | updated = False
29 | print(state.get_state())
30 | else:
31 | rowData = { "sum": int(row[3]) }
32 |
33 | # Update the state with the last data
34 | updated = True
35 | try:
36 | state.update([{
37 | 'value': rowData,
38 | 'diff': float(diff),
39 | }], float(ts))
40 | except Exception as err:
41 | print(err)
42 |
43 |
44 |
--------------------------------------------------------------------------------
/connection-examples/python/pg8000/view.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import pg8000.native
4 | import ssl
5 |
6 | ssl_context = ssl.create_default_context()
7 |
8 | conn = pg8000.connect(host="MATERIALIZE_HOST", port=6875, user="MATERIALIZE_USERNAME", password="MATERIALIZE_PASSWORD", database="materialize", ssl_context=ssl_context)
9 |
10 | print('Create a view: counter_sum')
11 | create_view_query = """CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
12 | SELECT sum(counter)
13 | FROM counter;"""
14 | conn.run(create_view_query)
15 |
16 | print('Select data from view')
17 | country_codes = conn.run("SELECT * FROM country_codes;")
18 | print(country_codes)
--------------------------------------------------------------------------------
/connection-examples/python/query.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 |
6 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
7 | conn = psycopg2.connect(dsn)
8 |
9 | with conn.cursor() as cur:
10 | cur.execute("SELECT * FROM my_view;")
11 | for row in cur:
12 | print(row)
--------------------------------------------------------------------------------
/connection-examples/python/source.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 |
6 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
7 | conn = psycopg2.connect(dsn)
8 | conn.autocommit = True
9 |
10 | with conn.cursor() as cur:
11 | cur.execute("""CREATE SOURCE IF NOT EXISTS counter
12 | FROM LOAD GENERATOR COUNTER
13 | (TICK INTERVAL '500ms')
14 | WITH (SIZE = '3xsmall');""")
15 |
16 | with conn.cursor() as cur:
17 | cur.execute("SHOW SOURCES")
18 | print(cur.fetchone())
--------------------------------------------------------------------------------
/connection-examples/python/state.py:
--------------------------------------------------------------------------------
1 | from typing import TypeVar, List, Union
2 | import json
3 |
4 | T = TypeVar('T')
5 |
6 | class Update:
7 | def __init__(self, value: T, diff: int):
8 | self.value = value
9 | self.diff = diff
10 |
11 | class State:
12 | def __init__(self, collect_history: bool = False):
13 | self.state = {}
14 | self.timestamp = 0
15 | self.valid = True
16 | self.history = [] if collect_history else None
17 |
18 | def get_state(self) -> List[T]:
19 | result = []
20 | for key, value in self.state.items():
21 | clone = json.loads(key)
22 | for i in range(int(value)):
23 | result.append(clone)
24 | return result
25 |
26 | def get_history(self):
27 | return self.history
28 |
29 | def validate(self, timestamp: int):
30 | if not self.valid:
31 | raise Exception("Invalid state.")
32 | elif timestamp < self.timestamp:
33 | print("Invalid timestamp.")
34 | self.valid = False
35 | raise Exception(f"Update with timestamp ({timestamp}) is lower than the last timestamp ({self.timestamp}). Invalid state.")
36 |
37 | def process(self, update: Update):
38 | value = json.dumps(update['value'])
39 | count = self.state.get(value, 0) + update['diff']
40 |
41 | if count <= 0:
42 | del self.state[value]
43 | else:
44 | self.state[value] = count
45 |
46 | if self.history is not None:
47 | self.history.append(update)
48 |
49 | def update(self, updates: List[Update], timestamp: int):
50 | if len(updates) > 0:
51 | self.validate(timestamp)
52 | self.timestamp = timestamp
53 | for update in updates:
54 | self.process(update)
55 |
--------------------------------------------------------------------------------
/connection-examples/python/subscribe-psycopg3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg
4 | import sys
5 | from state import State
6 |
7 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
8 | conn = psycopg.connect(dsn)
9 |
10 | with conn.cursor() as cur:
11 | state = State()
12 | updated = False
13 | for row in cur.stream("SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);"):
14 | # Map row fields
15 | ts = row[0]
16 | progress = row[1]
17 | diff = row[2]
18 |
19 | # When a progress is detected, get the last values
20 | if progress:
21 | if updated:
22 | updated = False
23 | print(state.get_state())
24 | else:
25 | rowData = { "sum": int(row[3]) }
26 |
27 | # Update the state with the last data
28 | updated = True
29 | try:
30 | state.update([{
31 | 'value': rowData,
32 | 'diff': float(diff),
33 | }], float(ts))
34 | except Exception as err:
35 | print(err)
--------------------------------------------------------------------------------
/connection-examples/python/subscribe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 | from state import State
6 |
7 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
8 | conn = psycopg2.connect(dsn)
9 |
10 | with conn.cursor() as cur:
11 | cur.execute("DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);")
12 |
13 | state = State()
14 | updated = False
15 | while True:
16 | cur.execute("FETCH ALL c")
17 | for row in cur:
18 | # Map row fields
19 | ts = row[0]
20 | progress = row[1]
21 | diff = row[2]
22 |
23 | # When a progress is detected, get the last values
24 | if progress:
25 | if updated:
26 | updated = False
27 | print(state.get_state())
28 | else:
29 | rowData = { "sum": int(row[3]) }
30 |
31 | # Update the state with the last data
32 | updated = True
33 | try:
34 | state.update([{
35 | 'value': rowData,
36 | 'diff': float(diff),
37 | }], float(ts))
38 | except Exception as err:
39 | print(err)
--------------------------------------------------------------------------------
/connection-examples/python/view.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import psycopg2
4 | import sys
5 |
6 | dsn = "user=MATERIALIZE_USERNAME password=MATERIALIZE_PASSWORD host=MATERIALIZE_HOST port=6875 dbname=materialize sslmode=require"
7 | conn = psycopg2.connect(dsn)
8 | conn.autocommit = True
9 |
10 | with conn.cursor() as cur:
11 | cur.execute("""CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
12 | SELECT sum(counter)
13 | FROM counter;""")
14 |
15 | with conn.cursor() as cur:
16 | cur.execute("SHOW VIEWS")
17 | print(cur.fetchone())
--------------------------------------------------------------------------------
/connection-examples/ruby/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Ruby Example
2 |
3 | You connect to Materialize the same way you [connect to PostgreSQL with `pg`](https://github.com/ged/ruby-pg). If you don’t have a `pg` gem, you can install it with:
4 |
5 | ```
6 | gem install pg
7 | ```
8 |
9 | ### Examples:
10 |
11 | - [Connection](./connection.rb)
12 | - [Stream](./subscribe.rb)
13 | - [Query](./query.rb)
14 | - [Insert data into tables](./insert.rb)
15 | - [Manage sources](./source.rb)
16 | - [Manage Views](./view.rb)
17 |
--------------------------------------------------------------------------------
/connection-examples/ruby/connection.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 |
3 | conn = PG.connect(
4 | host: "MATERIALIZE_HOST",
5 | port: 6875,
6 | dbname: "materialize",
7 | user: "MATERIALIZE_USERNAME",
8 | password: "MATERIALIZE_PASSWORD",
9 | sslmode: 'require'
10 | )
--------------------------------------------------------------------------------
/connection-examples/ruby/insert.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 |
3 | conn = PG.connect(
4 | host: "MATERIALIZE_HOST",
5 | port: 6875,
6 | dbname: "materialize",
7 | user: "MATERIALIZE_USERNAME",
8 | password: "MATERIALIZE_PASSWORD",
9 | sslmode: 'require'
10 | )
11 |
12 | conn.exec("INSERT INTO my_table (my_column) VALUES ('some_value')")
13 |
14 | res = conn.exec('SELECT * FROM my_table')
15 |
16 | res.each do |row|
17 | puts row
18 | end
--------------------------------------------------------------------------------
/connection-examples/ruby/query.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 |
3 | conn = PG.connect(
4 | host: "MATERIALIZE_HOST",
5 | port: 6875,
6 | dbname: "materialize",
7 | user: "MATERIALIZE_USERNAME",
8 | password: "MATERIALIZE_PASSWORD",
9 | sslmode: 'require'
10 | )
11 |
12 | res = conn.exec('SELECT * FROM counter_sum')
13 |
14 | res.each do |row|
15 | puts row
16 | end
--------------------------------------------------------------------------------
/connection-examples/ruby/source.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 |
3 | conn = PG.connect(
4 | host: "MATERIALIZE_HOST",
5 | port: 6875,
6 | dbname: "materialize",
7 | user: "MATERIALIZE_USERNAME",
8 | password: "MATERIALIZE_PASSWORD",
9 | sslmode: 'require'
10 | )
11 |
12 | # Create a source
13 | src = conn.exec(
14 | "CREATE SOURCE IF NOT EXISTS counter
15 | FROM LOAD GENERATOR COUNTER
16 | (TICK INTERVAL '500ms')
17 | WITH (SIZE = '3xsmall')
18 | "
19 | );
20 |
21 | puts src.inspect
22 |
23 | # Show the source
24 | res = conn.exec("SHOW SOURCES")
25 | res.each do |row|
26 | puts row
27 | end
--------------------------------------------------------------------------------
/connection-examples/ruby/state.rb:
--------------------------------------------------------------------------------
1 | class State
2 | Update = Struct.new(:value, :diff)
3 |
4 | def initialize(collect_history)
5 | @state = {}
6 | @timestamp = 0
7 | @valid = true
8 | @history = collect_history ? [] : nil
9 | end
10 |
11 | def get_state
12 | list = []
13 |
14 | @state.each do |key, value|
15 | clone = JSON.parse(key)
16 | value.times { list << clone }
17 | end
18 |
19 | list.freeze
20 | end
21 |
22 | def get_history
23 | @history
24 | end
25 |
26 | private
27 |
28 | def validate(timestamp)
29 | raise "Invalid state." unless @valid
30 |
31 | if timestamp < @timestamp
32 | puts "Invalid timestamp."
33 | @valid = false
34 | raise "Update with timestamp (#{timestamp}) is lower than the last timestamp (#{@timestamp}). Invalid state."
35 | end
36 | end
37 |
38 | def process(update)
39 | # Count value starts as a NaN
40 | value = JSON.generate(update[:value])
41 | count = @state[value].to_i + update[:diff]
42 |
43 | if count <= 0
44 | @state.delete(value)
45 | else
46 | @state[value] = count
47 | end
48 |
49 | @history&.push(update)
50 | end
51 |
52 | public
53 |
54 | def update(updates, timestamp)
55 | return if updates.empty?
56 |
57 | validate(timestamp)
58 | @timestamp = timestamp
59 | updates.each { |update| process(update) }
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/connection-examples/ruby/subscribe.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 | require './state'
3 |
4 | conn = PG.connect(
5 | host: "MATERIALIZE_HOST",
6 | port: 6875,
7 | dbname: "materialize",
8 | user: "MATERIALIZE_USERNAME",
9 | password: "MATERIALIZE_PASSWORD",
10 | sslmode: 'require'
11 | )
12 | conn.exec('BEGIN')
13 | conn.exec('DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS)')
14 |
15 | updated = false
16 | state = State.new(false)
17 | buffer = []
18 |
19 | # Loop indefinitely
20 | loop do
21 | conn.exec('FETCH c') do |result|
22 | result.each do |row|
23 | # Map row fields
24 | ts = row["mz_timestamp"]
25 | progress = row["mz_progressed"]
26 | diff = row["mz_diff"]
27 | rowData = { sum: row["sum"] }
28 |
29 | # When a progress is detected, get the state
30 | if progress == 't'
31 | if updated
32 | updated = false
33 |
34 | state.update(buffer, ts.to_i)
35 | buffer = []
36 | puts state.get_state
37 | end
38 | else
39 | # Update the state with the last data
40 | updated = true
41 | buffer.push({ value: rowData, diff: diff.to_i })
42 | end
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/connection-examples/ruby/view.rb:
--------------------------------------------------------------------------------
1 | require 'pg'
2 |
3 | conn = PG.connect(
4 | host: "MATERIALIZE_HOST",
5 | port: 6875,
6 | dbname: "materialize",
7 | user: "MATERIALIZE_USERNAME",
8 | password: "MATERIALIZE_PASSWORD",
9 | sslmode: 'require'
10 | )
11 |
12 | # Create a view
13 | view = conn.exec(
14 | "CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
15 | SELECT sum(counter)
16 | FROM counter;"
17 | );
18 | puts view.inspect
19 |
20 | # Show the view
21 | res = conn.exec("SHOW VIEWS")
22 | res.each do |row|
23 | puts row
24 | end
--------------------------------------------------------------------------------
/connection-examples/rust/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "rust-materialize"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7 |
8 | [dependencies]
9 |
10 | postgres = "0.19.3"
--------------------------------------------------------------------------------
/connection-examples/rust/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + Rust Example
2 |
3 | You can connect to Materialize the same way you connect to [PostgreSQL with the crate](https://crates.io/crates/postgres).
4 |
5 | Add the dependencies:
6 |
7 | ```
8 | [dependencies]
9 |
10 | postgres = "0.19.3"
11 | ```
12 |
13 | Run examples:
14 | ```bash
15 | cargo run
16 | ```
17 | ### Examples:
18 |
19 | - [Connection](./src/connection.rs)
20 | - [Stream](./src/subscribe.rs)
21 | - [Query](./src/query.rs)
22 | - [Insert data into tables](./src/insert.rs)
23 | - [Manage sources](./src/source.rs)
24 | - [Manage Views](./src/view.rs)
--------------------------------------------------------------------------------
/connection-examples/rust/src/connection.rs:
--------------------------------------------------------------------------------
1 | use postgres::{Client, NoTls, Error};
2 |
3 | /// Create a client using localhost
4 | pub(crate) fn create_client() -> Result {
5 | let config = "postgres://materialize@localhost:6875/materialize";
6 | Client::connect(config, NoTls)
7 | }
8 |
9 | // ----------------------------------
10 | // Alternative way to create a client
11 | // ----------------------------------
12 | // pub(crate) fn create_client_with_config() -> Result {
13 | // Config::new()
14 | // .host("localhost")
15 | // .port(6875)
16 | // .dbname("materialize")
17 | // .user("materialize")
18 | // .connect(NoTls)
19 | // }
--------------------------------------------------------------------------------
/connection-examples/rust/src/insert.rs:
--------------------------------------------------------------------------------
1 | use postgres::{Error};
2 |
3 | use crate::connection::create_client;
4 |
5 | /// Insert data into the table
6 | pub(crate) fn insert() -> Result {
7 | let mut client = create_client().expect("Error creating client.");
8 |
9 | let code = "GH";
10 | let name = "Ghana";
11 |
12 | client.execute("INSERT INTO countries(code, name) VALUES($1, $2)", &[&code, &name])
13 | }
--------------------------------------------------------------------------------
/connection-examples/rust/src/main.rs:
--------------------------------------------------------------------------------
1 | use insert::insert;
2 | use query::run_query;
3 | use source::create_source;
4 | use table::create_table;
5 | use crate::view::create_materialized_view;
6 | use subscribe::subscribe;
7 |
8 | mod insert;
9 | mod query;
10 | mod source;
11 | mod subscribe;
12 | mod view;
13 | mod connection;
14 | mod table;
15 |
16 |
17 | fn main() {
18 | create_source().expect("Error creating source.");
19 | println!("Source created.");
20 | create_materialized_view().expect("Error creating view.");
21 | println!("View created.");
22 | create_table().expect("Error creating table.");
23 | println!("Table created.");
24 | insert().expect("Error running insert.");
25 | println!("Data inserted.");
26 |
27 | println!("Running query: ");
28 | run_query();
29 |
30 | println!("Running subscribe: ");
31 | subscribe();
32 | }
33 |
--------------------------------------------------------------------------------
/connection-examples/rust/src/query.rs:
--------------------------------------------------------------------------------
1 | use crate::connection::create_client;
2 |
3 | /// Run a query over the table.
4 | pub(crate) fn run_query () {
5 | let mut client = create_client().expect("Error creating client.");
6 |
7 | let results = client.query("SELECT code, name FROM countries;", &[]).expect("Error running query.");
8 |
9 | for row in results {
10 | println!("{:} - {:}", row.get::(0), row.get::(1));
11 | };
12 | }
13 |
--------------------------------------------------------------------------------
/connection-examples/rust/src/source.rs:
--------------------------------------------------------------------------------
1 | use postgres::{Error};
2 |
3 | use crate::connection::create_client;
4 |
5 | /// Creates a PUBNUB source
6 | pub(crate) fn create_source() -> Result {
7 | let mut client = create_client().expect("Error creating client.");
8 |
9 | client.execute("
10 | CREATE SOURCE IF NOT EXISTS market_orders_raw FROM PUBNUB
11 | SUBSCRIBE KEY 'sub-c-4377ab04-f100-11e3-bffd-02ee2ddab7fe'
12 | CHANNEL 'pubnub-market-orders'
13 | ", &[])
14 | }
--------------------------------------------------------------------------------
/connection-examples/rust/src/subscribe.rs:
--------------------------------------------------------------------------------
1 | use crate::connection::create_client;
2 |
3 | /// Run a subscribe over the PUBNUB materialized view
4 | pub(crate) fn subscribe() {
5 | let mut client = create_client().expect("Error creating client.");
6 | let mut transaction = client.transaction().expect("Error creating transaction.");
7 | transaction.execute("DECLARE c CURSOR FOR SUBSCRIBE (SELECT symbol, bid_price::text FROM market_orders) WITH (SNAPSHOT = false);", &[]).expect("Error creating cursor.");
8 |
9 | loop {
10 | let results = transaction.query("FETCH ALL c;", &[]).expect("Error running fetch.");
11 | for row in results {
12 | println!("{:} - {:}", row.get::(2), row.get::(3));
13 | }
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/connection-examples/rust/src/table.rs:
--------------------------------------------------------------------------------
1 | use postgres::{Error};
2 |
3 | use crate::connection::create_client;
4 |
5 | /// Create a simple table
6 | pub(crate) fn create_table() -> Result {
7 | let mut client = create_client().expect("Error creating client.");
8 |
9 | client.execute("
10 | CREATE TABLE IF NOT EXISTS countries(code TEXT, name TEXT);
11 | ", &[])
12 | }
--------------------------------------------------------------------------------
/connection-examples/rust/src/view.rs:
--------------------------------------------------------------------------------
1 | use postgres::{Error};
2 |
3 | use crate::connection::create_client;
4 |
5 | /// Creates a materialized view over the PUBNUB source
6 | pub(crate) fn create_materialized_view() -> Result {
7 | let mut client = create_client().expect("Error creating client.");
8 |
9 | client.execute("
10 | CREATE MATERIALIZED VIEW IF NOT EXISTS market_orders AS
11 | SELECT
12 | val->>'symbol' AS symbol,
13 | (val->'bid_price')::float AS bid_price
14 | FROM (SELECT text::jsonb AS val FROM market_orders_raw)
15 | ", &[])
16 | }
--------------------------------------------------------------------------------
/connection-examples/scripts/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ##
4 | # Run the file with bash init.sh
5 | ##
6 |
7 | # File list
8 | files="connection insert query source subscribe views"
9 |
10 | # Function to ask for language or framework name
11 | function init() {
12 | read -p "The name of the language or framework: [eg. python] " name
13 | while [[ -z ${name} ]]; do
14 | read -p "The name of the language or framework: [eg. python] " name
15 | done
16 | echo ${name}
17 | slug=$(slugify ${name})
18 | echo "Folder name: ${slug}"
19 |
20 | # Check if folder already exists
21 | if [[ -d ${slug} ]]; then
22 | echo "Folder already exists: ${slug}"
23 | exit 1
24 | fi
25 |
26 | read -p "The file extension: [eg. py] " extension
27 | while [[ -z ${extension} ]]; do
28 | read -p "The file extension: [eg. py] " extension
29 | done
30 | echo "File extension: ${extension}"
31 |
32 | mkdir ${slug}
33 | echo "Folder created: ${slug}"
34 |
35 | # Create the files
36 | for file in ${files}; do
37 | touch ${slug}/${file}.${extension}
38 | echo "File created: ${slug}/${file}.${extension}"
39 | done
40 | echo "# Materialize + ${name} Example" > ${slug}/README.md
41 |
42 | echo "Folder structure:"
43 | ls -l ${slug}
44 |
45 | echo "Complete!"
46 |
47 | }
48 |
49 | # Function to slugify the language name
50 | function slugify() {
51 | echo $1 | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/-/g' | sed -E 's/^-+|-+$//g'
52 | }
53 |
54 | # Init the script
55 | init
56 |
--------------------------------------------------------------------------------
/connection-examples/typescript/README.md:
--------------------------------------------------------------------------------
1 | # Materialize + TypeScript Example
2 |
3 | You connect to Materialize the same way you connect to [PostgreSQL with `node-postgres`](https://node-postgres.com/features/connecting).
4 |
5 | Install dependencies and build:
6 | ```bash
7 | # Requires npm
8 | $ npm run build # npm i && tsc
9 | ```
10 |
11 | Run examples
12 | ```bash
13 | $ node "./dist/connection.js"
14 | $ node "./dist/source.js"
15 | $ node "./dist/view.js"
16 | $ node "./dist/subscribe.js"
17 | $ node "./dist/query.js"
18 | $ node "./dist/insert.js"
19 | ```
20 |
21 | ### Examples:
22 |
23 | - [Connection](./src/connection.ts)
24 | - [Stream](./src/subscribe.ts)
25 | - [Query](./src/query.ts)
26 | - [Insert data into tables](./insert.ts)
27 | - [Manage sources](./source.ts)
28 | - [Manage Views](./view.ts)
--------------------------------------------------------------------------------
/connection-examples/typescript/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ts-mz-connector",
3 | "version": "0.0.1",
4 | "main": "./dist/index.js",
5 | "type": "module",
6 | "files": [
7 | "./dist",
8 | "./src"
9 | ],
10 | "description": "Materialize connector example for Typescript",
11 | "scripts": {
12 | "build": "npm i && tsc"
13 | },
14 | "devDependencies": {
15 | "@types/pg": "8.6.5",
16 | "ts-node": "10.9.1",
17 | "tslib": "2.4.0",
18 | "typescript": "4.7.4"
19 | },
20 | "dependencies": {
21 | "pg": "8.7.3"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/connection-examples/typescript/src/connection.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | const { Client } = pkg;
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | host: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true
12 | });
13 |
14 | /*
15 | Alternatively, you can use the connection string format:
16 | const client = new Client('postgres://materialize@localhost:6875/materialize');
17 | */
18 |
19 | async function main() {
20 | try {
21 | await client.connect();
22 |
23 | /* Work with Materialize */
24 | } catch(e) {
25 | console.log("error: ", e);
26 | } finally {
27 | await client.end();
28 | }
29 | }
30 |
31 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/src/insert.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | const { Client } = pkg;
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | host: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true
12 | });
13 |
14 | // const createTable = 'CREATE TABLE countries (code text NOT NULL, name text NOT NULL);';
15 | const text = 'INSERT INTO countries(code, name) VALUES($1, $2);';
16 | const values = ['GH', 'GHANA'];
17 |
18 | async function main() {
19 | try {
20 | await client.connect();
21 | const res = await client.query(text, values);
22 | console.log(res);
23 | } catch(e) {
24 | console.log("error: ", e);
25 | } finally {
26 | await client.end();
27 | }
28 | }
29 |
30 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/src/query.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | const { Client } = pkg;
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | host: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true
12 | });
13 |
14 | async function main() {
15 | try {
16 | await client.connect();
17 | const res = await client.query('SELECT * FROM counter_sum');
18 | console.log(res.rows);
19 | } catch(e) {
20 | console.log("error: ", e);
21 | } finally {
22 | client.end();
23 | }
24 | };
25 |
26 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/src/source.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | const { Client } = pkg;
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | host: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true
12 | });
13 |
14 | async function main() {
15 | try {
16 | await client.connect();
17 | const res = await client.query(
18 | `CREATE SOURCE IF NOT EXISTS counter
19 | FROM LOAD GENERATOR COUNTER
20 | (TICK INTERVAL '500ms')
21 | WITH (SIZE = '3xsmall');`
22 | );
23 | console.log(res);
24 | } catch(e) {
25 | console.log("error: ", e);
26 | } finally {
27 | client.end();
28 | }
29 | }
30 |
31 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/src/state.ts:
--------------------------------------------------------------------------------
1 | export interface Update {
2 | value: T;
3 | diff: number;
4 | }
5 |
6 | export default class State {
7 | private state: Map;
8 | private timestamp: number;
9 | private valid: boolean;
10 | private history: Array> | undefined;
11 |
12 | constructor(collectHistory?: boolean) {
13 | this.state = new Map();
14 | this.timestamp = 0;
15 | this.valid = true;
16 | if (collectHistory) {
17 | this.history = [];
18 | }
19 | }
20 |
21 | getState(): Readonly> {
22 | const list: Array = new Array();
23 |
24 | Array.from(this.state.entries()).forEach(([key, value]) => {
25 | const clone = JSON.parse(key);
26 | let i = 0;
27 | while (i< value) {
28 | list.push(clone);
29 | i++;
30 | };
31 | });
32 |
33 | return list;
34 | }
35 |
36 | getHistory(): Array> | undefined {
37 | return this.history;
38 | }
39 |
40 | private validate(timestamp: number) {
41 | if (!this.valid) {
42 | throw new Error("Invalid state.");
43 | } else if (timestamp < this.timestamp) {
44 | console.error("Invalid timestamp.");
45 | this.valid = false;
46 | throw new Error(
47 | `Update with timestamp (${timestamp}) is lower than the last timestamp (${
48 | this.timestamp
49 | }). Invalid state.`
50 | );
51 | }
52 | }
53 |
54 | private process({ value: _value, diff }: Update) {
55 | // Count value starts as a NaN
56 | const value = JSON.stringify(_value);
57 | const count = this.state.has(value) ? (this.state.get(value) as number + diff) : diff;
58 |
59 | if (count <= 0) {
60 | this.state.delete(value);
61 | } else {
62 | this.state.set(value, count);
63 | }
64 |
65 |
66 | if (this.history) {
67 | this.history.push({ value: _value, diff });
68 | }
69 | }
70 |
71 | update(updates: Array>, timestamp: number) {
72 | if (updates.length > 0) {
73 | this.validate(timestamp);
74 | this.timestamp = timestamp;
75 | updates.forEach(this.process.bind(this));
76 | }
77 | }
78 | };
79 |
--------------------------------------------------------------------------------
/connection-examples/typescript/src/subscribe.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | import State, { Update } from './state';
4 | const { Client } = pkg;
5 |
6 | const client = new Client({
7 | user: "MATERIALIZE_USERNAME",
8 | database: "materialize",
9 | password: "APP_SPECIFIC_PASSWORD",
10 | host: "MATERIALIZE_HOST",
11 | port: 6875,
12 | ssl: true
13 | });
14 |
15 | interface CounterSum {
16 | sum: number;
17 | }
18 |
19 | async function main() {
20 | try {
21 | await client.connect();
22 |
23 | await client.query('BEGIN');
24 | await client.query('DECLARE c CURSOR FOR SUBSCRIBE (SELECT sum FROM counter_sum) WITH (PROGRESS);');
25 |
26 | const state = new State();
27 | const buffer: Array> = [];
28 |
29 | // Loop indefinitely
30 | while (true) {
31 | const { rows } = await client.query('FETCH ALL c');
32 | rows.forEach(row => {
33 | // Map row fields
34 | const {
35 | mz_timestamp: ts,
36 | mz_progressed: progress,
37 | mz_diff: diff,
38 | sum,
39 | } = row;
40 |
41 | // When a progress is detected, get the last values
42 | if (progress) {
43 | if (buffer.length > 0) {
44 | try {
45 | state.update(buffer, ts);
46 | } catch (err) {
47 | console.error(err);
48 | } finally {
49 | buffer.splice(0, buffer.length);
50 | }
51 | }
52 | } else {
53 | buffer.push({ value: { sum }, diff });
54 | }
55 | });
56 | }
57 | } catch (e) {
58 | console.log("error: ", e);
59 | } finally {
60 | client.end(); // only on error
61 | }
62 | }
63 |
64 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/src/view.ts:
--------------------------------------------------------------------------------
1 | // node 14+ cjs named exports not found in pg
2 | import pkg from 'pg';
3 | const { Client } = pkg;
4 |
5 | const client = new Client({
6 | user: "MATERIALIZE_USERNAME",
7 | database: "materialize",
8 | password: "APP_SPECIFIC_PASSWORD",
9 | host: "MATERIALIZE_HOST",
10 | port: 6875,
11 | ssl: true
12 | });
13 |
14 | async function main() {
15 | try {
16 | await client.connect();
17 | const res = await client.query(
18 | `CREATE MATERIALIZED VIEW IF NOT EXISTS counter_sum AS
19 | SELECT sum(counter)
20 | FROM counter;`
21 | );
22 | console.log(res);
23 | } catch (e) {
24 | console.log("error: ", e);
25 | } finally {
26 | client.end();
27 | }
28 | }
29 |
30 | await main();
--------------------------------------------------------------------------------
/connection-examples/typescript/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "strict": true,
4 | "target": "ES2022",
5 | "module": "ES2022",
6 | "incremental": true,
7 | "sourceMap": true,
8 | "noUnusedLocals": true,
9 | "moduleResolution": "Node",
10 | "esModuleInterop": true,
11 | "importHelpers": true,
12 | "allowSyntheticDefaultImports": true,
13 | "forceConsistentCasingInFileNames": true,
14 | "declaration": true,
15 | "outDir": "dist",
16 | "declarationMap": true,
17 | "useUnknownInCatchVariables": false
18 | },
19 | "include": ["src/**/*"],
20 | "exclude": ["node_modules"]
21 | }
22 |
--------------------------------------------------------------------------------
/dbt-get-started/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | dbt_modules/
3 | logs/
4 | dbt_packages
5 |
--------------------------------------------------------------------------------
/dbt-get-started/.sqlfluff:
--------------------------------------------------------------------------------
1 | [sqlfluff]
2 | templater = dbt
3 |
--------------------------------------------------------------------------------
/dbt-get-started/.sqlfluffignore:
--------------------------------------------------------------------------------
1 | dbt_packages/
2 | macros/
3 | target/
4 |
--------------------------------------------------------------------------------
/dbt-get-started/dbt_project.yml:
--------------------------------------------------------------------------------
1 | name: 'mz_get_started'
2 | version: '1.0.0'
3 | config-version: 2
4 |
5 | profile: 'mz_get_started'
6 |
7 | model-paths: ['models']
8 |
9 | target-path: 'target'
10 | clean-targets:
11 | - 'target'
12 | - 'dbt_modules'
13 |
14 | tests:
15 | mz_get_started:
16 | +store_failures: true
17 | +schema: 'etl_failure'
18 |
--------------------------------------------------------------------------------
/dbt-get-started/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/dbt-get-started/models/.DS_Store
--------------------------------------------------------------------------------
/dbt-get-started/models/ecommerce.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | models:
4 | - name: purchases
5 | description: aa
6 | columns:
7 | - name: id
8 | description: ''
9 | - name: user_id
10 | description: ''
11 | - name: item_id
12 | description: ''
13 | - name: status
14 | description: ''
15 | - name: quantity
16 | description: ''
17 | - name: purchase_price
18 | description: ''
19 | - name: deleted
20 | description: ''
21 | - name: created_at
22 | description: ''
23 | - name: updated_at
24 | description: ''
25 | - name: items
26 | description: ''
27 | columns:
28 | - name: id
29 | description: ''
30 | - name: name
31 | description: ''
32 | - name: category
33 | description: ''
34 | - name: price
35 | description: ''
36 | - name: inventory
37 | description: ''
38 | - name: inventory_updated_at
39 | description: ''
40 | - name: created_at
41 | description: ''
42 | - name: updated_at
43 | description: ''
44 | - name: item_purchases
45 | description: ''
46 | columns:
47 | - name: item_id
48 | description: ''
49 | - name: items_sold
50 | description: ''
51 | - name: revenue
52 | description: ''
53 | - name: orders
54 | description: ''
55 | - name: latest_order
56 | description: ''
57 | - name: item_summary
58 | description: ''
59 | columns:
60 | - name: item_id
61 | description: ''
62 | - name: item_name
63 | description: ''
64 | tests:
65 | - not_null
66 | - name: item_category
67 | description: ''
68 | - name: items_sold
69 | description: ''
70 | - name: revenue
71 | description: ''
72 | - name: orders
73 | description: ''
74 | - name: latest_order
75 | description: ''
76 | - name: item_summary_5min
77 | description: ''
78 | columns:
79 | - name: item_id
80 | description: ''
81 | - name: item_name
82 | description: ''
83 | - name: item_category
84 | description: ''
85 | - name: items_sold
86 | description: ''
87 | - name: revenue
88 | description: ''
89 | - name: orders
90 | description: ''
91 | - name: latest_order
92 | description: ''
93 |
--------------------------------------------------------------------------------
/dbt-get-started/models/marts/item_summary_5min.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='view') }}
2 |
3 | SELECT
4 | item_id,
5 | item_name,
6 | item_category,
7 | items_sold,
8 | revenue,
9 | orders,
10 | latest_order
11 | FROM {{ ref('item_summary') }}
12 | WHERE
13 | mz_now() >= latest_order
14 | AND mz_now() < latest_order + INTERVAL '5' MINUTE
15 |
--------------------------------------------------------------------------------
/dbt-get-started/models/sources/items.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='source', indexes=[{'columns': ['id']}]) }}
2 |
3 | CREATE SOURCE {{ this }}
4 | FROM KAFKA CONNECTION {{ target.schema }}.kafka_connection (
5 | TOPIC 'mysql.shop.items'
6 | )
7 | FORMAT AVRO
8 | USING CONFLUENT SCHEMA REGISTRY CONNECTION {{ target.schema }}.csr_connection
9 | ENVELOPE DEBEZIUM
10 | WITH (SIZE = '3xsmall')
11 |
--------------------------------------------------------------------------------
/dbt-get-started/models/sources/purchases.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='source') }}
2 |
3 | CREATE SOURCE {{ this }}
4 | FROM KAFKA CONNECTION {{ target.schema }}.kafka_connection (
5 | TOPIC 'mysql.shop.purchases'
6 | )
7 | FORMAT AVRO
8 | USING CONFLUENT SCHEMA REGISTRY CONNECTION {{ target.schema }}.csr_connection
9 | ENVELOPE DEBEZIUM
10 | WITH (SIZE = '3xsmall')
11 |
--------------------------------------------------------------------------------
/dbt-get-started/models/staging/item_purchases.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='view', indexes=[{'columns': ['item_id']}]) }}
2 |
3 | SELECT
4 | item_id,
5 | SUM(quantity) AS items_sold,
6 | SUM(purchase_price) AS revenue,
7 | COUNT(id) AS orders,
8 | MAX(created_at::timestamp) AS latest_order
9 | FROM {{ ref('purchases') }}
10 | GROUP BY item_id
11 |
--------------------------------------------------------------------------------
/dbt-get-started/models/staging/item_summary.sql:
--------------------------------------------------------------------------------
1 | {{ config(materialized='materializedview') }}
2 |
3 | SELECT
4 | ip.item_id AS item_id,
5 | i.name AS item_name,
6 | i.category AS item_category,
7 | ip.latest_order AS latest_order,
8 | SUM(ip.items_sold) AS items_sold,
9 | SUM(ip.revenue) AS revenue,
10 | SUM(ip.orders) AS orders
11 | FROM {{ ref('item_purchases') }} AS ip
12 | INNER JOIN {{ ref('items') }} AS i ON ip.item_id = i.id
13 | GROUP BY item_id, item_name, item_category, latest_order
14 |
--------------------------------------------------------------------------------
/dbt-get-started/profiles.yml:
--------------------------------------------------------------------------------
1 | mz_get_started:
2 | outputs:
3 | dev:
4 | type: materialize
5 | host: "{{ env_var('MZ_HOST') }}"
6 | port: 6875
7 | user: "{{ env_var('MZ_USER') }}"
8 | password: "{{ env_var('MZ_PASSWORD') }}"
9 | database: materialize
10 | cluster: quickstart
11 | schema: qck
12 | target: dev
--------------------------------------------------------------------------------
/ecommerce-redpanda/.env.example:
--------------------------------------------------------------------------------
1 | # Local MySQL instace details
2 | MYSQL_PASSWORD=I957DO9cYXp6JDEv
3 |
--------------------------------------------------------------------------------
/ecommerce-redpanda/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | mysql:
3 | image: mysql/mysql-server:8.0.27
4 | ports:
5 | - 3306:3306
6 | environment:
7 | - MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
8 | - MYSQL_USER=mysqluser
9 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
10 | volumes:
11 | - ../ecommerce/mysql/mysql.cnf:/etc/mysql/conf.d
12 | - ../ecommerce/mysql/mysql_bootstrap.sql:/docker-entrypoint-initdb.d/mysql_bootstrap.sql
13 | healthcheck: {test: mysql -p$$MYSQL_PASSWORD -e 'select 1', interval: 1s, start_period: 60s}
14 | redpanda:
15 | image: docker.vectorized.io/vectorized/redpanda:v21.11.2
16 | command:
17 | - redpanda start
18 | - --overprovisioned
19 | - --smp 1
20 | - --memory 1G
21 | - --reserve-memory 0M
22 | - --node-id 0
23 | - --check=false
24 | - --kafka-addr 0.0.0.0:9092
25 | - --advertise-kafka-addr ${EXTERNAL_IP:-redpanda}:9092
26 | - --pandaproxy-addr 0.0.0.0:8082
27 | - --advertise-pandaproxy-addr ${EXTERNAL_IP:-redpanda}:8082
28 | - --set redpanda.enable_transactions=true
29 | - --set redpanda.enable_idempotence=true
30 | ports:
31 | - 9092:9092
32 | - 8081:8081
33 | - 8082:8082
34 | healthcheck: {test: curl -f localhost:9644/v1/status/ready, interval: 1s, start_period: 30s}
35 | debezium:
36 | build: ./connect
37 | environment:
38 | BOOTSTRAP_SERVERS: ${EXTERNAL_IP:-redpanda}:9092
39 | GROUP_ID: 1
40 | CONFIG_STORAGE_TOPIC: connect_configs
41 | OFFSET_STORAGE_TOPIC: connect_offsets
42 | KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
43 | VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
44 | CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://redpanda:8081
45 | CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://redpanda:8081
46 | volumes:
47 | - ${PWD}/mysql:/data
48 | ports:
49 | - 8083:8083
50 | healthcheck: {test: curl -f localhost:8083, interval: 1s, start_period: 120s}
51 | depends_on:
52 | redpanda: {condition: service_healthy}
53 | mysql: {condition: service_healthy}
54 | debezium_deploy:
55 | image: debezium/connect:2.4
56 | depends_on:
57 | debezium: {condition: service_healthy}
58 | environment:
59 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
60 | - KAFKA_ADDR=${EXTERNAL_IP:-redpanda}:9092
61 | volumes:
62 | - ./mysql/mysql_dbz.sh:/mysql_dbz.sh
63 | entrypoint: [bash, -c, /mysql_dbz.sh]
64 | metabase:
65 | image: materialize/metabase:1.0.3
66 | ports:
67 | - 3030:3000
68 | loadgen:
69 | build: ../ecommerce/loadgen
70 | init: true
71 | environment:
72 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
73 | - KAFKA_ADDR=${EXTERNAL_IP:-redpanda}:9092
74 | - CONFLUENT_BROKER_HOST=${EXTERNAL_IP:-redpanda}:9092
75 | depends_on:
76 | mysql: {condition: service_healthy}
77 | debezium: {condition: service_healthy}
78 | redpanda: {condition: service_healthy}
79 |
--------------------------------------------------------------------------------
/ecommerce-redpanda/connect/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debezium/connect-base:2.4
2 |
3 | #
4 | # Set up the plugins directory ...
5 | #
6 | ENV CONFLUENT_VERSION=7.0.1 \
7 | AVRO_VERSION=1.10.1 \
8 | GUAVA_VERSION=31.0.1-jre
9 |
10 | RUN docker-maven-download confluent kafka-connect-avro-converter "$CONFLUENT_VERSION" fd03a1436f29d39e1807e2fb6f8e415a && \
11 | docker-maven-download confluent kafka-connect-avro-data "$CONFLUENT_VERSION" d27f30e9eca4ef1129289c626e9ce1f1 && \
12 | docker-maven-download confluent kafka-avro-serializer "$CONFLUENT_VERSION" c72420603422ef54d61f493ca338187c && \
13 | docker-maven-download confluent kafka-schema-serializer "$CONFLUENT_VERSION" 9c510db58119ef66d692ae172d5b1204 && \
14 | docker-maven-download confluent kafka-schema-registry-client "$CONFLUENT_VERSION" 7449df1f5c9a51c3e82e776eb7814bf1 && \
15 | docker-maven-download confluent common-config "$CONFLUENT_VERSION" aab5670de446af5b6f10710e2eb86894 && \
16 | docker-maven-download confluent common-utils "$CONFLUENT_VERSION" 74bf5cc6de2748148f5770bccd83a37c && \
17 | docker-maven-download central org/apache/avro avro "$AVRO_VERSION" 35469fee6d74ecbadce4773bfe3a204c && \
18 | docker-maven-download central com/google/guava guava "$GUAVA_VERSION" bb811ca86cba6506cca5d415cd5559a7
19 |
20 | # https://github.com/debezium/container-images/blob/main/connect/2.4/Dockerfile
21 | LABEL maintainer="Debezium Community"
22 |
23 | ENV DEBEZIUM_VERSION="2.4.0.Final" \
24 | MAVEN_REPO_CENTRAL="" \
25 | MAVEN_REPOS_ADDITIONAL="" \
26 | MAVEN_DEP_DESTINATION=$KAFKA_CONNECT_PLUGINS_DIR \
27 | MONGODB_MD5=a22784387e0ec8a6abb1606c2c365cb2 \
28 | MYSQL_MD5=4bff262afc9678f5cbc3be6315b8e71e \
29 | POSTGRES_MD5=b42c9e208410f39ad1ad09778b1e3f03 \
30 | SQLSERVER_MD5=9b8bf3c62a7c22c465a32fa27b3cffb5 \
31 | ORACLE_MD5=21699814400860457dc2334b165882e6 \
32 | DB2_MD5=0727d7f2d1deeacef39e230acac835a8 \
33 | SPANNER_MD5=186b07595e914e9139941889fd675044 \
34 | VITESS_MD5=3b4d24c8c9898df060c408a13fd3429f \
35 | JDBC_MD5=77c5cb9adf932ab17c041544f4ade357 \
36 | KCRESTEXT_MD5=25c0353f5a7304b3c4780a20f0f5d0af \
37 | SCRIPTING_MD5=53a3661e7a9877744f4a30d6483d7957
38 |
39 | RUN docker-maven-download debezium mongodb "$DEBEZIUM_VERSION" "$MONGODB_MD5" && \
40 | docker-maven-download debezium mysql "$DEBEZIUM_VERSION" "$MYSQL_MD5" && \
41 | docker-maven-download debezium postgres "$DEBEZIUM_VERSION" "$POSTGRES_MD5" && \
42 | docker-maven-download debezium sqlserver "$DEBEZIUM_VERSION" "$SQLSERVER_MD5" && \
43 | docker-maven-download debezium oracle "$DEBEZIUM_VERSION" "$ORACLE_MD5" && \
44 | docker-maven-download debezium-additional db2 db2 "$DEBEZIUM_VERSION" "$DB2_MD5" && \
45 | docker-maven-download debezium-additional jdbc jdbc "$DEBEZIUM_VERSION" "$JDBC_MD5" && \
46 | docker-maven-download debezium-additional spanner spanner "$DEBEZIUM_VERSION" "$SPANNER_MD5" && \
47 | docker-maven-download debezium-additional vitess vitess "$DEBEZIUM_VERSION" "$VITESS_MD5" && \
48 | docker-maven-download debezium-optional connect-rest-extension "$DEBEZIUM_VERSION" "$KCRESTEXT_MD5" && \
49 | docker-maven-download debezium-optional scripting "$DEBEZIUM_VERSION" "$SCRIPTING_MD5"
50 |
--------------------------------------------------------------------------------
/ecommerce-redpanda/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/ecommerce-redpanda/demo.png
--------------------------------------------------------------------------------
/ecommerce-redpanda/mysql/mysql_dbz.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Deploying Debezium MySQL connector"
4 |
5 | curl -s -X PUT -H "Content-Type:application/json" http://debezium:8083/connectors/register-mysql/config \
6 | -d '{
7 | "connector.class": "io.debezium.connector.mysql.MySqlConnector",
8 | "database.hostname": "mysql",
9 | "database.port": 3306,
10 | "database.user": "debezium",
11 | "database.password": "'"${MYSQL_PASSWORD}"'",
12 | "database.server.id": "223344",
13 | "database.allowPublicKeyRetrieval": true,
14 | "database.history.kafka.bootstrap.servers":"'"$KAFKA_ADDR"'",
15 | "database.history.kafka.topic": "mysql-history",
16 | "schema.history.internal.kafka.bootstrap.servers": "'"$KAFKA_ADDR"'",
17 | "schema.history.internal.kafka.topic": "mysql-internal-history",
18 | "database.include.list": "shop",
19 | "topic.prefix": "dbserver1",
20 | "time.precision.mode": "connect",
21 | "include.schema.changes": false
22 | }'
23 |
--------------------------------------------------------------------------------
/ecommerce/.env.example:
--------------------------------------------------------------------------------
1 | # Confluent Cloud Details
2 | CONFLUENT_BROKER_HOST=
3 | CONFLUENT_API_KEY=
4 | CONFLUENT_API_SECRET=
5 | # The Confluent Schema Registry URL must include https://
6 | CONFLUENT_SCHEMA_REGISTRY_URL=
7 | CONFLUENT_SCHEMA_REGISTRY_API_KEY=
8 | CONFLUENT_SCHEMA_REGISTRY_API_SECRET=
9 |
10 | # Local MySQL instace details
11 | MYSQL_PASSWORD=I957DO9cYXp6JDEv
--------------------------------------------------------------------------------
/ecommerce/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 |
--------------------------------------------------------------------------------
/ecommerce/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | mysql:
3 | image: mysql/mysql-server:8.0.27
4 | ports:
5 | - 3306:3306
6 | environment:
7 | - MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
8 | - MYSQL_USER=mysqluser
9 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
10 | volumes:
11 | - ./mysql/mysql.cnf:/etc/mysql/conf.d
12 | - ./mysql/mysql_bootstrap.sql:/docker-entrypoint-initdb.d/mysql_bootstrap.sql
13 | healthcheck: {test: mysql -p$$MYSQL_PASSWORD -e 'select 1', interval: 1s, start_period: 60s}
14 | debezium:
15 | image: debezium/connect:1.9
16 | environment:
17 | BOOTSTRAP_SERVERS: "${CONFLUENT_BROKER_HOST}"
18 | GROUP_ID: 1
19 | CONFIG_STORAGE_TOPIC: connect_configs
20 | OFFSET_STORAGE_TOPIC: connect_offsets
21 | KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
22 | VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
23 | CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: "${CONFLUENT_SCHEMA_REGISTRY_URL}"
24 | CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: "${CONFLUENT_SCHEMA_REGISTRY_URL}"
25 | CONNECT_KEY_CONVERTER_BASIC_AUTH_CREDENTIALS_SOURCE: "USER_INFO"
26 | CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO: "${CONFLUENT_SCHEMA_REGISTRY_API_KEY}:${CONFLUENT_SCHEMA_REGISTRY_API_SECRET}"
27 | CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
28 | CONNECT_VALUE_CONVERTER_BASIC_AUTH_CREDENTIALS_SOURCE: "USER_INFO"
29 | CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO: "${CONFLUENT_SCHEMA_REGISTRY_API_KEY}:${CONFLUENT_SCHEMA_REGISTRY_API_SECRET}"
30 | CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: '3'
31 | CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: '3'
32 | CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: '3'
33 | CONNECT_REQUEST_TIMEOUT_MS: "20000"
34 | CONNECT_RETRY_BACKOFF_MS: "500"
35 | CONNECT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "https"
36 | CONNECT_SASL_MECHANISM: "PLAIN"
37 | CONNECT_SECURITY_PROTOCOL: "SASL_SSL"
38 | CONNECT_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"${CONFLUENT_API_KEY}\" password=\"${CONFLUENT_API_SECRET}\";"
39 | #
40 | CONNECT_CONSUMER_SECURITY_PROTOCOL: "SASL_SSL"
41 | CONNECT_CONSUMER_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "https"
42 | CONNECT_CONSUMER_SASL_MECHANISM: "PLAIN"
43 | CONNECT_CONSUMER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"${CONFLUENT_API_KEY}\" password=\"${CONFLUENT_API_SECRET}\";"
44 | CONNECT_CONSUMER_REQUEST_TIMEOUT_MS: "20000"
45 | CONNECT_CONSUMER_RETRY_BACKOFF_MS: "500"
46 | #
47 | CONNECT_PRODUCER_SECURITY_PROTOCOL: "SASL_SSL"
48 | CONNECT_PRODUCER_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "https"
49 | CONNECT_PRODUCER_SASL_MECHANISM: "PLAIN"
50 | CONNECT_PRODUCER_SASL_JAAS_CONFIG: "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"${CONFLUENT_API_KEY}\" password=\"${CONFLUENT_API_SECRET}\";"
51 | CONNECT_PRODUCER_REQUEST_TIMEOUT_MS: "20000"
52 | CONNECT_PRODUCER_RETRY_BACKOFF_MS: "500"
53 | volumes:
54 | - ${PWD}/mysql:/data
55 | ports:
56 | - 8083:8083
57 | healthcheck: {test: curl -f localhost:8083, interval: 1s, start_period: 120s}
58 | depends_on:
59 | mysql: {condition: service_healthy}
60 | debezium_deploy:
61 | image: debezium/connect:1.9
62 | depends_on:
63 | debezium: {condition: service_healthy}
64 | environment:
65 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
66 | - CONFLUENT_BROKER_HOST=${CONFLUENT_BROKER_HOST}
67 | - CONFLUENT_SCHEMA_REGISTRY_URL=${CONFLUENT_SCHEMA_REGISTRY_URL}
68 | - CONFLUENT_SCHEMA_REGISTRY_API_KEY=${CONFLUENT_SCHEMA_REGISTRY_API_KEY}
69 | - CONFLUENT_SCHEMA_REGISTRY_API_SECRET=${CONFLUENT_SCHEMA_REGISTRY_API_SECRET}
70 | - CONFLUENT_API_KEY=${CONFLUENT_API_KEY}
71 | - CONFLUENT_API_SECRET=${CONFLUENT_API_SECRET}
72 | volumes:
73 | - ./mysql/mysql_dbz.sh:/mysql_dbz.sh
74 | entrypoint: [bash, -c, /mysql_dbz.sh]
75 | loadgen:
76 | build: loadgen
77 | init: true
78 | environment:
79 | - MYSQL_PASSWORD=${MYSQL_PASSWORD}
80 | - CONFLUENT_BROKER_HOST=${CONFLUENT_BROKER_HOST}
81 | - CONFLUENT_SCHEMA_REGISTRY_URL=${CONFLUENT_SCHEMA_REGISTRY_URL}
82 | - CONFLUENT_SCHEMA_REGISTRY_API_KEY=${CONFLUENT_SCHEMA_REGISTRY_API_KEY}
83 | - CONFLUENT_SCHEMA_REGISTRY_API_SECRET=${CONFLUENT_SCHEMA_REGISTRY_API_SECRET}
84 | - CONFLUENT_API_KEY=${CONFLUENT_API_KEY}
85 | - CONFLUENT_API_SECRET=${CONFLUENT_API_SECRET}
86 | depends_on:
87 | mysql: {condition: service_healthy}
88 | debezium: {condition: service_healthy}
89 | metabase:
90 | image: metabase/metabase:v0.41.5
91 | depends_on:
92 | debezium: {condition: service_healthy}
93 | ports:
94 | - 3030:3000
--------------------------------------------------------------------------------
/ecommerce/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MaterializeInc/demos/a8ecb5a24b09c53153942612c80f713b8f909335/ecommerce/demo.png
--------------------------------------------------------------------------------
/ecommerce/loadgen/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.9-bullseye
2 |
3 | WORKDIR /workdir
4 |
5 | COPY requirements.txt .
6 | RUN pip install --no-cache-dir -r requirements.txt
7 |
8 | COPY . .
9 |
10 | ENTRYPOINT ["python", "generate_load.py"]
11 |
--------------------------------------------------------------------------------
/ecommerce/loadgen/clear_purchases.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from mysql.connector import Error, connect
4 |
5 | # CONFIG
6 | mysqlHost = os.getenv("MYSQL_HOST", "mysql")
7 | mysqlPort = os.getenv("MYSQL_PORT", "3306")
8 | mysqlUser = os.getenv("MYSQL_USER", "mysqluser")
9 | mysqlPass = os.getenv("MYSQL_PASSWORD", "I957DO9cYXp6JDEv")
10 | purchaseRetentionDays = 3
11 |
12 | try:
13 | with connect(
14 | host=mysqlHost,
15 | user=mysqlUser,
16 | password=mysqlPass,
17 | ) as connection:
18 | with connection.cursor() as cursor:
19 | print("Deleting purchases older than " + purchaseRetentionDays + " days...")
20 | # Delete old purchases (older than purchaseRetentionDays days)
21 | cursor.execute(
22 | f"DELETE FROM shop.purchases WHERE updated_at < DATE_SUB(NOW(), INTERVAL {purchaseRetentionDays} DAY)"
23 | )
24 | connection.commit()
25 | # Get the number of purchases deleted
26 | cursor.execute("SELECT ROW_COUNT()")
27 | deleted_count = cursor.fetchone()[0]
28 | print(f"Deleted {deleted_count} purchases")
29 | connection.close()
30 |
31 | except Error as e:
32 | print(e)
33 |
--------------------------------------------------------------------------------
/ecommerce/loadgen/requirements.txt:
--------------------------------------------------------------------------------
1 | barnum==0.5.1
2 | kafka-python==2.0.2
3 | mysql-connector-python==8.0.27
4 | noise==1.2.2
5 |
--------------------------------------------------------------------------------
/ecommerce/mysql/mysql.cnf:
--------------------------------------------------------------------------------
1 | [mysqld]
2 | server-id = 223344
3 | log_bin = mysql-bin
4 | expire_logs_days = 1
5 | binlog_format = row
6 |
--------------------------------------------------------------------------------
/ecommerce/mysql/mysql_bootstrap.sql:
--------------------------------------------------------------------------------
1 | CREATE DATABASE IF NOT EXISTS shop;
2 | USE shop;
3 |
4 | GRANT ALL PRIVILEGES ON shop.* TO 'mysqluser';
5 |
6 | CREATE USER 'debezium' IDENTIFIED WITH mysql_native_password BY 'I957DO9cYXp6JDEv';
7 |
8 | GRANT SELECT, RELOAD, SHOW DATABASES, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'debezium';
9 |
10 | FLUSH PRIVILEGES;
11 |
12 | CREATE TABLE IF NOT EXISTS shop.users
13 | (
14 | id SERIAL PRIMARY KEY,
15 | email VARCHAR(255),
16 | is_vip BOOLEAN DEFAULT FALSE,
17 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
18 | updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
19 | );
20 |
21 | CREATE TABLE IF NOT EXISTS shop.items
22 | (
23 | id SERIAL PRIMARY KEY,
24 | name VARCHAR(100),
25 | category VARCHAR(100),
26 | price DECIMAL(7,2),
27 | inventory INT,
28 | inventory_updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
29 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
30 | updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
31 | );
32 |
33 | CREATE TABLE IF NOT EXISTS shop.purchases
34 | (
35 | id SERIAL PRIMARY KEY,
36 | user_id BIGINT UNSIGNED REFERENCES user(id),
37 | item_id BIGINT UNSIGNED REFERENCES item(id),
38 | status TINYINT UNSIGNED DEFAULT 1,
39 | quantity INT UNSIGNED DEFAULT 1,
40 | purchase_price DECIMAL(12,2),
41 | deleted BOOLEAN DEFAULT FALSE,
42 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
43 | updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
44 | );
45 |
--------------------------------------------------------------------------------
/ecommerce/mysql/mysql_dbz.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Deploying Debezium MySQL connector"
4 |
5 | curl -s -X PUT -H "Content-Type:application/json" http://debezium:8083/connectors/register-mysql/config \
6 | -d '{
7 | "connector.class": "io.debezium.connector.mysql.MySqlConnector",
8 | "database.hostname": "mysql",
9 | "database.port": 3306,
10 | "database.user": "debezium",
11 | "database.password": "'"${MYSQL_PASSWORD}"'",
12 | "database.server.name": "mysql",
13 | "database.server.id": "223344",
14 | "database.allowPublicKeyRetrieval": true,
15 | "database.history.kafka.bootstrap.servers":"'"${CONFLUENT_BROKER_HOST}"'",
16 | "database.history.consumer.security.protocol": "SASL_SSL",
17 | "database.history.consumer.sasl.mechanism": "PLAIN",
18 | "database.history.consumer.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"'${CONFLUENT_API_KEY}'\" password=\"'${CONFLUENT_API_SECRET}'\";",
19 | "database.history.producer.security.protocol": "SASL_SSL",
20 | "database.history.producer.sasl.mechanism": "PLAIN",
21 | "database.history.producer.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"'${CONFLUENT_API_KEY}'\" password=\"'${CONFLUENT_API_SECRET}'\";",
22 | "database.history.kafka.topic": "mysql-history",
23 | "database.include.list": "shop",
24 | "time.precision.mode": "connect",
25 | "include.schema.changes": false
26 | }'
27 |
--------------------------------------------------------------------------------
/integrations/datadog/datadog/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/datadoghq/agent:7.44.0
2 | ADD conf.d/openmetrics.yaml /etc/datadog-agent/conf.d/openmetrics.yaml
--------------------------------------------------------------------------------
/integrations/datadog/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | datadog:
5 | build: datadog
6 | pid: host
7 | environment:
8 | - DD_API_KEY=
9 | - DD_SITE=datadoghq.com
10 | - DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL=true
11 | volumes:
12 | - /var/run/docker.sock:/var/run/docker.sock
13 | - /proc/:/host/proc/:ro
14 | - /sys/fs/cgroup:/host/sys/fs/cgroup:ro
15 | sql-exporter:
16 | image: justwatch/sql_exporter:latest
17 | ports:
18 | - 9237:9237
19 | environment:
20 | CONFIG: /config/config.yaml
21 | volumes:
22 | - ./config.yaml:/config/config.yaml
--------------------------------------------------------------------------------
/integrations/grafana/cloud/.gitignore:
--------------------------------------------------------------------------------
1 | config.yml
2 | .env
3 |
--------------------------------------------------------------------------------
/integrations/grafana/cloud/agent.yaml:
--------------------------------------------------------------------------------
1 | metrics:
2 | global:
3 | scrape_interval: 60s
4 | configs:
5 | - name: hosted-prometheus
6 | scrape_configs:
7 | - job_name: node
8 | static_configs:
9 | - targets: ['sql-exporter:9237']
10 | remote_write:
11 | - url:
12 | basic_auth:
13 | username:
14 | password:
--------------------------------------------------------------------------------
/integrations/grafana/cloud/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | agent:
5 | image: grafana/agent:latest
6 | volumes:
7 | - ./agent.yaml:/etc/agent-config/agent.yaml
8 | entrypoint:
9 | - /bin/grafana-agent
10 | - -server.http.address=0.0.0.0:12345
11 | - -config.file=/etc/agent-config/agent.yaml
12 | - -metrics.wal-directory=/tmp/agent/wal
13 | - -enable-features=integrations-next
14 | - -config.expand-env
15 | - -config.enable-read-api
16 | ports:
17 | - "12345:12345"
18 | sql-exporter:
19 | image: justwatch/sql_exporter:latest
20 | ports:
21 | - 9237:9237
22 | environment:
23 | CONFIG: /config/config.yml
24 | volumes:
25 | - ./config.yml:/config/config.yml
26 |
--------------------------------------------------------------------------------
/integrations/grafana/local/.gitignore:
--------------------------------------------------------------------------------
1 | config.yml
2 | .env
3 |
--------------------------------------------------------------------------------
/integrations/grafana/local/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | grafana:
5 | image: grafana/grafana-enterprise:8.2.0
6 | ports:
7 | - 3000:3000
8 | environment:
9 | GF_AUTH_ANONYMOUS_ENABLED: "true"
10 | GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin"
11 | volumes:
12 | - ./misc/dashboard.yaml:/etc/grafana/provisioning/dashboards/main.yaml
13 | - ./misc/datasources:/etc/grafana/provisioning/datasources
14 | - ./misc/dashboards:/var/lib/grafana/dashboards
15 | prometheus:
16 | image: prom/prometheus:v2.30.3
17 | ports:
18 | - 9090:9090
19 | volumes:
20 | - ./prometheus.yml:/etc/prometheus/prometheus.yml
21 | sql-exporter:
22 | image: justwatch/sql_exporter:latest
23 | ports:
24 | - 9237:9237
25 | environment:
26 | CONFIG: /config/config.yml
27 | volumes:
28 | - ./config.yml:/config/config.yml
29 |
--------------------------------------------------------------------------------
/integrations/grafana/local/misc/dashboard.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: "Materialize Dashboard Example"
5 | orgId: 1
6 | type: file
7 | disableDeletion: false
8 | updateIntervalSeconds: 10
9 | allowUiUpdates: false
10 | options:
11 | path: /var/lib/grafana/dashboards
12 | foldersFromFilesStructure: true
13 |
--------------------------------------------------------------------------------
/integrations/grafana/local/misc/datasources/prometheus.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | datasources:
3 | - name: Prometheus
4 | type: prometheus
5 | isDefault: true
6 | url: http://prometheus:9090
7 |
--------------------------------------------------------------------------------
/integrations/grafana/local/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 | scrape_configs:
4 | - job_name: sql_exporter
5 | scrape_interval: 15s
6 | static_configs:
7 | - targets: ['sql-exporter:9237']
8 | labels:
9 | instance: sql_exporter
10 |
--------------------------------------------------------------------------------
/integrations/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | locals.tf
9 | # Crash log files
10 | crash.log
11 | crash.*.log
12 |
13 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
14 | # password, private keys, and other secrets. These should not be part of version
15 | # control as they are data points which are potentially sensitive and subject
16 | # to change depending on the environment.
17 | *.tfvars
18 | *.tfvars.json
19 |
20 | # Ignore override files as they are usually used to override resources locally and so
21 | # are not checked in
22 | override.tf
23 | override.tf.json
24 | *_override.tf
25 | *_override.tf.json
26 |
27 | # Include override files you do wish to add to version control using negated pattern
28 | # !example_override.tf
29 |
30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
31 | # example: *tfplan*
32 |
33 | # Ignore CLI configuration files
34 | .terraformrc
35 | terraform.rc
36 | .terraform.lock.hcl
37 |
--------------------------------------------------------------------------------
/integrations/terraform/README.md:
--------------------------------------------------------------------------------
1 | # Materialize Terraform Provider Demos
2 |
3 | ## Overview
4 |
5 | This is a collection of demos that show how to use the [Materialize Terraform provider](https://registry.terraform.io/providers/MaterializeInc/materialize/latest/docs) to provision Materialize resources.
6 |
7 | ## Demos
8 |
9 | | Demo | Description |
10 | | ---------------------------------- | ----------------------------------------------------------------------- |
11 | | [MSK PrivateLink](msk-privatelink) | Create an AWS PrivateLink connection between Materialize and Amazon MSK |
12 | | [HashiCorp Vault](hashicorp-vault) | Create a Materialize secret from a HashiCorp Vault secret |
13 | | [EC2 SSH Bastion](ec2-ssh-bastion) | Create an EC2 instance that can be used as an SSH bastion |
14 | | [Secret Stores](secret-stores) | Integrate Materialize with various secret management tools |
15 |
16 | ## Prerequisites
17 |
18 | - [Terraform](https://www.terraform.io/downloads.html) 1.0.3 or later
19 | - [Materialize](https://console.materialize.com/) account
20 | - [AWS](https://aws.amazon.com/) account
21 | - [`aws`](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) CLI
22 | - [`psql`](https://materialize.com/docs/integrations/sql-clients/#installation-instructions-for-psql) installed
23 |
24 | ## Running the demos
25 |
26 | For each demo, follow the instructions in the demo's README. All demos assume that you have `psql`, `terraform` and `aws` CLIs installed and configured.
27 |
28 | ### AWS Configuration
29 |
30 | - Make sure that you've configured your AWS authentication as described in the [AWS provider documentation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration).
31 |
32 | For example, if you are using AWS SSO, you can use the following `aws` command:
33 |
34 | ```bash
35 | aws sso login --profile your-aws-profile
36 | ```
37 |
38 | - If you have a default AWS profile, you can skip this step. Otherwise, set the `AWS_PROFILE` environment variable to the desired profile:
39 |
40 | ```bash
41 | export AWS_PROFILE=your-aws-profile
42 | ```
43 |
44 | - If you have a default AWS configuration, you can skip this step. Otherwise, set the `AWS_CONFIG_FILE` environment variable to the desired configuration file:
45 |
46 | ```bash
47 | export AWS_CONFIG_FILE= # eg. ["~/.aws/config"]
48 | ```
49 |
50 | - All demos use the `us-east-1` region by default. To use a different region, set the `AWS_REGION` environment variable to the desired region:
51 |
52 | ```bash
53 | export AWS_REGION=us-west-2
54 | ```
55 |
56 | ### Materialize Configuration
57 |
58 | - Get your Materialize host, username, and password from the [Materialize console](https://console.materialize.com/).
59 | - Configure the Materialize Terraform provider by adding the following block to your Terraform project:
60 |
61 | ```hcl
62 | # Configuration-based authentication
63 | provider "materialize" {
64 | host = var.materialize_hostname # optionally use MZ_HOST env var
65 | username = var.materialize_username # optionally use MZ_USER env var
66 | password = var.materialize_password # optionally use MZ_PW env var
67 | port = var.materialize_port # optionally use MZ_PORT env var
68 | database = var.materialize_database # optionally use MZ_DATABASE env var
69 | }
70 | ```
71 |
--------------------------------------------------------------------------------
/integrations/terraform/ec2-ssh-bastion/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | locals.tf
9 | # Crash log files
10 | crash.log
11 | crash.*.log
12 |
13 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
14 | # password, private keys, and other secrets. These should not be part of version
15 | # control as they are data points which are potentially sensitive and subject
16 | # to change depending on the environment.
17 | *.tfvars
18 | *.tfvars.json
19 |
20 | # Ignore override files as they are usually used to override resources locally and so
21 | # are not checked in
22 | override.tf
23 | override.tf.json
24 | *_override.tf
25 | *_override.tf.json
26 |
27 | # Include override files you do wish to add to version control using negated pattern
28 | # !example_override.tf
29 |
30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
31 | # example: *tfplan*
32 |
33 | # Ignore CLI configuration files
34 | .terraformrc
35 | terraform.rc
36 | .terraform.lock.hcl
37 |
--------------------------------------------------------------------------------
/integrations/terraform/ec2-ssh-bastion/main.tf:
--------------------------------------------------------------------------------
1 | # Define the Materialize provider
2 | terraform {
3 | required_providers {
4 | materialize = {
5 | source = "MaterializeInc/materialize"
6 | version = ">= 0.1.0"
7 | }
8 | # null = {
9 | # source = "hashicorp/null"
10 | # version = "3.2.1"
11 | # }
12 | }
13 | }
14 |
15 | # Include aws provider
16 | provider "aws" {
17 | region = "us-east-1"
18 | }
19 |
20 | # Include Materialize provider
21 | provider "materialize" {
22 | host = local.materialize_host
23 | username = local.materialize_username
24 | password = local.materialize_password
25 | port = 6875
26 | database = "materialize"
27 | }
28 |
29 | # Get the Materialize egress IPs
30 | data "materialize_egress_ips" "all" {}
31 |
32 | # Use the materialize ssh module
33 | module "ssh_bastion" {
34 | source = "MaterializeInc/ec2-ssh-bastion/aws"
35 | version = "0.1.0"
36 |
37 | aws_region = local.aws_region
38 | vpc_id = local.vpc_id
39 | subnet_id = local.subnet_id
40 | ssh_public_key = local.ssh_public_key
41 | mz_egress_ips = [for ip in data.materialize_egress_ips.all.egress_ips : "${ip}/32"]
42 | }
43 |
44 | # Create an SSH connection in Materialize
45 | resource "materialize_connection_ssh_tunnel" "example_ssh_connection" {
46 | name = "ssh_example_connection"
47 | schema_name = "public"
48 | host = module.ssh_bastion.ssh_bastion_server.public_ip
49 | port = 22
50 | user = "ubuntu"
51 | }
52 |
53 | # Upload the example_ssh_connection.public_key_1 to the EC2 ssh bastion server
54 | # resource "null_resource" "upload_ssh_key" {
55 | # provisioner "remote-exec" {
56 | # connection {
57 | # host = module.ssh_bastion.ssh_bastion_server.public_ip
58 | # user = "ubuntu"
59 | # private_key = file("${local.ssh_private_key}")
60 | # }
61 |
62 | # inline = ["echo 'connected!'"]
63 | # }
64 | # provisioner "local-exec" {
65 | # command = "ssh -i ${local.ssh_private_key} ubuntu@${module.ssh_bastion.ssh_bastion_server.public_ip} 'echo ${materialize_connection_ssh_tunnel.example_ssh_connection.public_key_1} >> ~/.ssh/authorized_keys'"
66 | # }
67 | # }
68 |
69 | output "ssh_connection_details" {
70 | value = materialize_connection_ssh_tunnel.example_ssh_connection
71 | }
72 |
73 | # Output instructions on how to upload the ssh key
74 | output "upload_ssh_key" {
75 | value = "# To upload the SSH key to the EC2 bastion server run the following command: \n\n ssh -i ${local.ssh_private_key} ubuntu@${module.ssh_bastion.ssh_bastion_server.public_ip} 'echo ${materialize_connection_ssh_tunnel.example_ssh_connection.public_key_1} >> ~/.ssh/authorized_keys'"
76 | }
77 |
--------------------------------------------------------------------------------
/integrations/terraform/hashicorp-vault/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | vault = {
4 | source = "hashicorp/vault"
5 | version = "~> 3.15"
6 | }
7 | materialize = {
8 | source = "MaterializeInc/materialize"
9 | version = ">= 0.1.0"
10 | }
11 | }
12 | }
13 |
14 | # Use development mode to experiment
15 | # https://developer.hashicorp.com/vault/docs/concepts/dev-server
16 | provider "vault" {
17 | address = "http://localhost:8200"
18 | token = var.vault_token
19 | }
20 |
21 | provider "materialize" {
22 | host = var.materialize_hostname # optionally use MZ_HOST env var
23 | username = var.materialize_username # optionally use MZ_USER env var
24 | password = var.materialize_password # optionally use MZ_PW env var
25 | port = var.materialize_port # optionally use MZ_PORT env var
26 | database = var.materialize_database # optionally use MZ_DATABASE env var
27 | }
28 |
29 | variable "materialize_hostname" {}
30 | variable "materialize_username" {}
31 | variable "materialize_password" {}
32 | variable "materialize_port" {
33 | default = 6875
34 | }
35 | variable "materialize_database" {
36 | default = "materialize"
37 | }
38 | variable "vault_token" {}
39 |
40 | data "vault_generic_secret" "materialize_password" {
41 | path = "secret/materialize"
42 | }
43 |
44 | resource "materialize_secret" "example_secret" {
45 | name = "pgpass"
46 | value = data.vault_generic_secret.materialize_password.data["pgpass"]
47 | }
48 |
49 | # Create a PostgreSQL Connection
50 | resource "materialize_connection_postgres" "example_postgres_connection" {
51 | name = "example_postgres_connection"
52 | host = "instance.foo000.us-west-1.rds.amazonaws.com"
53 | port = 5432
54 | user {
55 | text = "pguser"
56 | }
57 | password {
58 | name = materialize_secret.example_secret.name
59 | database_name = "materialize"
60 | schema_name = "public"
61 | }
62 | database = "pgdatabase"
63 | }
64 |
--------------------------------------------------------------------------------
/integrations/terraform/msk-privatelink/main.tf:
--------------------------------------------------------------------------------
1 | # Define the Materialize provider
2 | terraform {
3 | required_providers {
4 | materialize = {
5 | source = "MaterializeInc/materialize"
6 | version = ">= 0.1.0"
7 | }
8 | }
9 | }
10 |
11 | # Include the AWS provider
12 | provider "aws" {
13 | region = "us-east-1"
14 | }
15 |
16 | # Include the Materialize provider
17 | provider "materialize" {
18 | host = local.materialize_host
19 | username = local.materialize_username
20 | password = local.materialize_password
21 | port = 6875
22 | database = "materialize"
23 | }
24 |
25 | # Use the MSK PrivateLink module
26 | module "msk" {
27 | source = "MaterializeInc/msk-privatelink/aws"
28 | version = "0.1.3"
29 | mz_msk_cluster_name = local.mz_msk_cluster_name
30 | mz_msk_cluster_port = local.mz_msk_cluster_port
31 | mz_msk_vpc_id = local.mz_msk_vpc_id
32 | aws_region = local.aws_region
33 | }
34 |
35 | # Create a PrivateLink connection in Materialize
36 | resource "materialize_connection_aws_privatelink" "example_privatelink_connection" {
37 | name = "example_privatelink_connection"
38 | schema_name = "public"
39 | service_name = module.msk.mz_msk_endpoint_service.service_name
40 | availability_zones = module.msk.mz_msk_azs
41 | }
42 |
43 | # Add the Materialize allowed principal to the AWS VPC Endpoint Service
44 | resource "aws_vpc_endpoint_service_allowed_principal" "example_privatelink_connection" {
45 | vpc_endpoint_service_id = module.msk.mz_msk_endpoint_service.id
46 | principal_arn = materialize_connection_aws_privatelink.example_privatelink_connection.principal
47 | }
48 |
49 | # Finally, go to your AWS account and approve the VPC Endpoint Service connection
50 |
--------------------------------------------------------------------------------
/integrations/terraform/sentinel-policies/.gitignore:
--------------------------------------------------------------------------------
1 | tfplan.binary
2 | tfplan.json
3 | result.json
4 |
--------------------------------------------------------------------------------
/integrations/terraform/sentinel-policies/enforce-cluster-size.sentinel:
--------------------------------------------------------------------------------
1 | import "tfplan"
2 |
3 | allowed_sizes = [
4 | "3xsmall",
5 | "2xsmall",
6 | "xsmall",
7 | "small",
8 | ]
9 |
10 | main = rule {
11 | all tfplan.resources.materialize_cluster as _, instances {
12 | all instances as _, r {
13 | r.applied.size in allowed_sizes or r.destroy
14 | }
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/integrations/terraform/sentinel-policies/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | materialize = {
4 | source = "MaterializeInc/materialize"
5 | version = ">= 0.3.0"
6 | }
7 | }
8 |
9 | cloud {
10 | organization = ""
11 |
12 | workspaces {
13 | name = ""
14 | }
15 | }
16 | }
17 |
18 | provider "materialize" {
19 | host = var.materialize_hostname
20 | user = var.materialize_user
21 | password = var.materialize_password
22 | database = "materialize"
23 | }
24 |
25 | variable "materialize_hostname" {}
26 | variable "materialize_user" {}
27 | variable "materialize_password" {}
28 |
29 | resource "materialize_cluster" "example_cluster" {
30 | name = "example"
31 | size = "3xsmall"
32 | }
33 |
--------------------------------------------------------------------------------
/integrations/terraform/sentinel-policies/restrict_materialize_cluster_size.rego:
--------------------------------------------------------------------------------
1 | package terraform
2 |
3 | import input as tfplan
4 |
5 | # Define the allowed sizes for Materialize clusters
6 | allowed_sizes := {
7 | "3xsmall",
8 | "2xsmall",
9 | "xsmall",
10 | "small"
11 | }
12 |
13 | # A violation occurs if a materialize_cluster is found with a size that is not allowed
14 | violation[{"msg": msg, "resource": resource}] {
15 | resource := tfplan.resource_changes[_]
16 | resource.type == "materialize_cluster"
17 | resource.change.after.size
18 | not allowed_sizes[resource.change.after.size]
19 |
20 | msg := sprintf(
21 | "Materialize cluster '%s' has size '%s' which is not in the allowed list: %v",
22 | [resource.address, resource.change.after.size, allowed_sizes]
23 | )
24 | }
25 |
26 | # The main rule checks for any violations
27 | deny[msg] {
28 | violation[{"msg": msg, "resource": _}]
29 | }
30 |
--------------------------------------------------------------------------------
/streamlit-subscribe/.env.example:
--------------------------------------------------------------------------------
1 | export WEBHOOK_URL=
2 | export WEBHOOK_SECRET='some-secret-value'
3 |
--------------------------------------------------------------------------------
/streamlit-subscribe/requirements.txt:
--------------------------------------------------------------------------------
1 | altair==4.2.0
2 | pandas==1.5.2
3 | psycopg==3.1.4
4 | streamlit==1.27.0
5 |
--------------------------------------------------------------------------------
/streamlit-subscribe/sensors.json:
--------------------------------------------------------------------------------
1 | {
2 | "_meta": {
3 | "topic": "sensors"
4 | },
5 | "sensor_id": "faker.datatype.number({ max: 100, min: 1})",
6 | "timestamp": "faker.date.between('2020-01-01T00:00:00.000Z', '2030-01-01T00:00:00.000Z')",
7 | "location": {
8 | "latitude": "faker.datatype.number({ max: 90, min: -90})",
9 | "longitude": "faker.datatype.number({ max: 180, min: -180})"
10 | },
11 | "temperature": "faker.datatype.float({ min: 20, max: 95 })"
12 | }
13 |
--------------------------------------------------------------------------------
/streamlit-subscribe/subscribe.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import psycopg
3 | import altair as alt
4 | import threading
5 | import queue
6 | import pandas as pd
7 | import os
8 | from collections import defaultdict
9 |
10 | DATABASE_URL = os.environ['DATABASE_URL']
11 | updates_queue = queue.Queue()
12 |
13 | def fetch_data():
14 | conn = psycopg.connect(DATABASE_URL)
15 | with conn.cursor() as cur:
16 | for row in cur.stream("SUBSCRIBE simple_sensor_summary ENVELOPE UPSERT (KEY (sensor_id));"):
17 | # print(f"Row from database: {row}") # Log the fetched row for debugging
18 | updates_queue.put(row)
19 |
20 | # Create a background thread to fetch data
21 | thread = threading.Thread(target=fetch_data)
22 | thread.start()
23 |
24 | # Initialize the session state
25 | if 'data' not in st.session_state:
26 | st.session_state.data = defaultdict(list)
27 |
28 | chart_placeholder = st.empty()
29 |
30 | while True:
31 | if not updates_queue.empty():
32 | print("Data found in queue!") # Check if we're ever entering this block
33 | update = updates_queue.get()
34 | # print(f"Update received: {update}") # Log the received update for debugging
35 |
36 | update = updates_queue.get()
37 | if "Error" in update:
38 | st.error(update)
39 | else:
40 | # Append data to session state
41 | st.session_state.data['mz_timestamp'].append(int(update[0]))
42 | st.session_state.data['mz_state'].append(update[1])
43 | st.session_state.data['key'].append(update[2])
44 | st.session_state.data['total_records'].append(update[3])
45 | st.session_state.data['avg_temperature'].append(float(update[4])) # Convert string to float
46 | st.session_state.data['latest_timestamp'].append(update[5])
47 |
48 | # Convert session state to DataFrame for Altair
49 | df = pd.DataFrame(st.session_state.data)
50 |
51 | # Update chart
52 | chart = alt.Chart(df).mark_point().encode(
53 | x='mz_timestamp:T',
54 | y='avg_temperature:Q',
55 | color='key:N'
56 | )
57 | chart_placeholder.altair_chart(chart, use_container_width=True)
58 |
--------------------------------------------------------------------------------