├── node-example-app ├── .dockerignore ├── config │ └── default.json ├── db │ ├── knex.js │ ├── migrations │ │ ├── schemas │ │ │ ├── Purchase.js │ │ │ ├── Product.js │ │ │ ├── Address.js │ │ │ ├── CartItem.js │ │ │ ├── PurchaseItem.js │ │ │ └── User.js │ │ ├── 20240103125711_default.js │ │ └── 20240113143901_products-carts-purchases.js │ └── seeds │ │ ├── 06_cartItem.js │ │ ├── 00_deleteAll.js │ │ ├── 04_purchase.js │ │ ├── 01_address.js │ │ ├── 03_product.js │ │ ├── 05_purchaseItems.js │ │ └── 02_users.js ├── Dockerfile ├── routes │ ├── index.js │ ├── purchase │ │ └── index.js │ ├── cart │ │ └── index.js │ ├── address │ │ └── index.js │ └── user │ │ └── index.js ├── index.js ├── middleware │ └── auth.js ├── knexfile.js ├── package.json ├── controller │ ├── purchase │ │ └── index.js │ ├── address │ │ └── index.js │ ├── cart │ │ └── index.js │ └── user │ │ └── index.js └── instrumentation.js ├── complete_local_setup.png ├── .gitignore ├── infra ├── postgres-json-stdout │ ├── Dockerfile │ ├── docker-entrypoint.sh │ └── README.md ├── grafana-dashboard-provider.yaml ├── fluent-bit │ └── fluent-bit.conf ├── pgtraceconnector-collector-config.yaml ├── otelcol-custom │ ├── Dockerfile │ ├── contrib-config.yaml │ └── manifest.yaml ├── prometheus.yaml ├── grafana-datasources.yaml ├── postgresql.conf ├── docker-compose.yaml ├── otel-collector-config.yaml ├── grafana-dashboards │ └── example-dashboard.json └── postgresql.conf.sample ├── gcp-infra ├── otel-collector-config.yaml ├── .terraform.lock.hcl ├── docker-compose.yaml ├── main.tf ├── terraform.tfstate.backup └── terraform.tfstate └── README.md /node-example-app/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | Dockerfile 4 | .dockerignore -------------------------------------------------------------------------------- /node-example-app/config/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "jwtSecret": "zfdsagehwhrttznjcklvxijgqerngmre" 3 | } -------------------------------------------------------------------------------- /complete_local_setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elessar-ch/sql-tracing/HEAD/complete_local_setup.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .env 3 | infra/data/ 4 | sql.log 5 | credentials/ 6 | .terraform/ 7 | infra-gcp/terraform.tfstate 8 | sql-log/ 9 | *.log -------------------------------------------------------------------------------- /node-example-app/db/knex.js: -------------------------------------------------------------------------------- 1 | const environment = process.env.NODE_ENV || 'development' 2 | const config = require('../knexfile')[environment] 3 | 4 | module.exports = require('knex')(config) -------------------------------------------------------------------------------- /infra/postgres-json-stdout/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgres:16.1-bookworm 2 | 3 | RUN mv /usr/local/bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint-orig.sh 4 | COPY docker-entrypoint.sh /usr/local/bin/ 5 | -------------------------------------------------------------------------------- /infra/grafana-dashboard-provider.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: dashboards 5 | type: file 6 | updateIntervalSeconds: 30 7 | options: 8 | path: /etc/dashboards/ 9 | foldersFromFilesStructure: true -------------------------------------------------------------------------------- /infra/postgres-json-stdout/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source docker-entrypoint-orig.sh 4 | 5 | touch /var/log/postgresql/pg_log.json && chmod 666 /var/log/postgresql/pg_log.json 6 | ln -sf /proc/1/fd/1 /var/log/postgresql/pg_log.json 7 | 8 | if ! _is_sourced; then 9 | _main "$@" 10 | fi -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/Purchase.js: -------------------------------------------------------------------------------- 1 | const purchaseSchema = (table) => { 2 | table.increments('id').primary().unique() 3 | table.integer('user_id').references('id').inTable('user').notNullable() 4 | table.timestamp('datetime').notNullable() 5 | table.timestamps(true, true) 6 | } 7 | 8 | module.exports = purchaseSchema; -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/Product.js: -------------------------------------------------------------------------------- 1 | const productSchema = (table) => { 2 | table.increments('id').primary(); 3 | table.string('name'); 4 | table.string('description'); 5 | table.decimal('price'); 6 | table.string('image'); 7 | table.integer('stock'); 8 | table.timestamps(true, true); 9 | } 10 | 11 | module.exports = productSchema; -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/Address.js: -------------------------------------------------------------------------------- 1 | const addressSchema = (table) => { 2 | table.increments('id').primary().unique() 3 | table.string('street').notNullable().unique() 4 | table.string('suburb').notNullable() 5 | table.string('city').notNullable() 6 | table.string('code').notNullable() 7 | table.timestamps(true, true) 8 | } 9 | 10 | module.exports = addressSchema; -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/CartItem.js: -------------------------------------------------------------------------------- 1 | const cartItemSchema = (table) => { 2 | table.increments('id').primary(); 3 | table.integer('product_id').references('id').inTable('product').notNullable(); 4 | table.integer('user_id').references('id').inTable('user').notNullable(); 5 | table.integer('quantity'); 6 | table.timestamps(true, true); 7 | } 8 | 9 | module.exports = cartItemSchema; -------------------------------------------------------------------------------- /node-example-app/db/seeds/06_cartItem.js: -------------------------------------------------------------------------------- 1 | exports.seed = function (knex) { 2 | //insert seed entries 3 | return knex('cartItem').insert([ 4 | { 5 | id: 1, 6 | user_id: 2, 7 | product_id: 1, 8 | quantity: 1, 9 | } 10 | ]).then(function () { 11 | return knex.schema.raw('ALTER SEQUENCE "cartItem_id_seq" RESTART WITH 4') 12 | }); 13 | } -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/PurchaseItem.js: -------------------------------------------------------------------------------- 1 | const purchaseItemSchema = (table) => { 2 | table.increments('id').primary().unique() 3 | table.integer('purchase_id').references('id').inTable('purchase').notNullable() 4 | table.integer('product_id').references('id').inTable('product').notNullable() 5 | table.integer('quantity').notNullable() 6 | table.timestamps(true, true) 7 | } 8 | 9 | module.exports = purchaseItemSchema; -------------------------------------------------------------------------------- /node-example-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20-alpine3.19 2 | 3 | RUN mkdir -p /home/node/app/node_modules && chown -R node:node /home/node/app 4 | 5 | WORKDIR /home/node/app 6 | 7 | COPY --chown=node:node package*.json ./ 8 | 9 | # RUN chown -R node:node /home/node/app 10 | 11 | USER node 12 | 13 | RUN npm install 14 | 15 | COPY --chown=node:node . . 16 | 17 | EXPOSE 3000 18 | 19 | CMD [ "node", "--require", "./instrumentation.js", "index.js" ] -------------------------------------------------------------------------------- /node-example-app/routes/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | const auth = require('../middleware/auth'); 4 | 5 | router.use('/user', require('./user')); 6 | router.use('/address', require('./address')); 7 | // router.use('/product', require('./product')); 8 | router.use('/cart', auth, require('./cart')); 9 | router.use('/purchase', auth, require('./purchase')); 10 | 11 | router.get('/', (req, res) => { 12 | res.send('Welcome to new API'); 13 | }); 14 | 15 | module.exports = router; -------------------------------------------------------------------------------- /gcp-infra/otel-collector-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | exporters: 7 | debug: 8 | verbosity: detailed 9 | googlecloud: 10 | log: 11 | default_log_name: opentelemetry.io/collector-exported-log 12 | service: 13 | pipelines: 14 | traces: 15 | receivers: [otlp] 16 | exporters: [debug, googlecloud] 17 | metrics: 18 | receivers: [otlp] 19 | exporters: [debug] 20 | logs: 21 | receivers: [otlp] 22 | exporters: [debug] -------------------------------------------------------------------------------- /node-example-app/db/seeds/00_deleteAll.js: -------------------------------------------------------------------------------- 1 | exports.seed = function (knex) { 2 | // Inserts seed entries 3 | return knex('purchaseItem').del() 4 | .then(function () { 5 | return knex('purchase').del() 6 | }).then(function () { 7 | return knex('cartItem').del() 8 | }).then(function () { 9 | return knex('product').del() 10 | }).then(function () { 11 | return knex('user').del() 12 | }).then(function () { 13 | return knex('address').del() 14 | }); 15 | }; -------------------------------------------------------------------------------- /node-example-app/routes/purchase/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | 4 | const purchaseController = require('../../controller/purchase/'); 5 | 6 | router.get('/', purchaseController.getUserPurchases); // Gets all of the logged in user's purchases 7 | 8 | router.get('/:id', purchaseController.getPurchase); // Get's a specific purchase by id 9 | 10 | router.get('/:id/items', purchaseController.getPurchasedItems); // Get's the items for a specific purchase by the purchase id 11 | 12 | module.exports = router; -------------------------------------------------------------------------------- /node-example-app/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express') 2 | const cors = require('cors') 3 | const helmet = require('helmet') 4 | const routes = require('./routes') 5 | 6 | require('dotenv').config() 7 | 8 | const port = process.env.PORT || 3000; 9 | 10 | const app = express() 11 | app.use(express.json({ limit: '5MB', extended: true })); 12 | app.use(cors()) 13 | app.use(helmet()) 14 | 15 | app.get('/', (req, res) => { 16 | res.send('Hello from express server') 17 | }) 18 | 19 | // Routes goes here 20 | app.use('/api', routes); 21 | 22 | app.listen(port, () => console.log(`Listening on port ${port}.`)) -------------------------------------------------------------------------------- /node-example-app/routes/cart/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | 4 | const cartController = require('../../controller/cart/'); 5 | 6 | router.get('/', cartController.getUserCart); // Gets all of the products & quantities in the user's cart 7 | 8 | router.post('/', cartController.addToCart); // Adds a product to the user's cart 9 | 10 | router.delete('/:id', cartController.removeFromCart); // Removes a product from the user's cart 11 | 12 | router.post('/order', cartController.order); // Orders all of the products in the user's cart 13 | 14 | module.exports = router; -------------------------------------------------------------------------------- /infra/fluent-bit/fluent-bit.conf: -------------------------------------------------------------------------------- 1 | [INPUT] 2 | Name forward 3 | Listen 0.0.0.0 4 | Port 24224 5 | 6 | [OUTPUT] 7 | Name opentelemetry 8 | Match * 9 | Host otel-collector 10 | Port 4318 11 | Metrics_uri /v1/metrics 12 | Logs_uri /v1/logs 13 | Traces_uri /v1/traces 14 | Log_response_payload True 15 | Tls Off 16 | Tls.verify Off 17 | # add user-defined labels 18 | add_label app fluent-bit 19 | add_label color blue -------------------------------------------------------------------------------- /node-example-app/db/migrations/20240103125711_default.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @param { import("knex").Knex } knex 3 | * @returns { Promise } 4 | */ 5 | exports.up = function (knex) { 6 | return knex.schema 7 | .createTable('address', require('./schemas/Address')) // Address Schema 8 | .createTable('user', require('./schemas/User')) // User Schema 9 | }; 10 | 11 | /** 12 | * @param { import("knex").Knex } knex 13 | * @returns { Promise } 14 | */ 15 | exports.down = function (knex) { 16 | return knex.schema 17 | .dropTableIfExists('user') 18 | .dropTableIfExists('address') 19 | }; 20 | -------------------------------------------------------------------------------- /node-example-app/routes/address/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | const auth = require('../../middleware/auth'); 4 | 5 | const addressController = require('../../controller/address'); 6 | 7 | router.get('/', auth, addressController.getAddressesAuth); // Gets all addresses if user's logged in 8 | 9 | router.put('/:id', addressController.updateAddress); // Updates address by id (recommended to add restrictions ex: admin) 10 | router.delete('/:id', addressController.removeAddress); // Deletes address by id (recommended to add restrictions ex: admin) 11 | 12 | module.exports = router; -------------------------------------------------------------------------------- /infra/pgtraceconnector-collector-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | endpoint: 0.0.0.0:4317 6 | http: 7 | endpoint: 0.0.0.0:4318 8 | 9 | exporters: 10 | debug: 11 | verbosity: detailed 12 | otlp: 13 | endpoint: otel-collector:4317 14 | tls: 15 | insecure: true 16 | 17 | processors: 18 | batch: 19 | 20 | connectors: 21 | pgtrace: 22 | attribute_name: db.name 23 | 24 | service: 25 | pipelines: 26 | traces: 27 | receivers: [pgtrace] 28 | exporters: [debug, otlp] 29 | logs: 30 | receivers: [otlp] 31 | exporters: [pgtrace, debug] -------------------------------------------------------------------------------- /node-example-app/db/migrations/schemas/User.js: -------------------------------------------------------------------------------- 1 | const userSchema = (table) => { 2 | table.increments('id').primary().unique() 3 | table.string('name').notNullable() 4 | table.string('surname').notNullable() 5 | table.string('email').unique().notNullable() 6 | table.string('gender').notNullable() 7 | table.date('DOB').notNullable() 8 | table.string('contact_number') 9 | table.string('role').notNullable().defaultTo('guests') 10 | table.string('password').notNullable() 11 | table.integer('address_id').references('id').inTable('address').notNullable() 12 | table.timestamps(true, true) 13 | } 14 | 15 | module.exports = userSchema; -------------------------------------------------------------------------------- /node-example-app/db/seeds/04_purchase.js: -------------------------------------------------------------------------------- 1 | //seeds for purchase table 2 | exports.seed = function (knex) { 3 | // Inserts seed entries 4 | return knex('purchase').insert([ 5 | { 6 | id: 1, 7 | user_id: 2, 8 | datetime: "2023-06-29 12:00:00", 9 | }, 10 | { 11 | id: 2, 12 | user_id: 2, 13 | datetime: "2023-06-30 12:00:00", 14 | }, 15 | { 16 | id: 3, 17 | user_id: 3, 18 | datetime: "2023-06-30 12:00:00", 19 | } 20 | ]).then(function () { 21 | return knex.schema.raw('ALTER SEQUENCE purchase_id_seq RESTART WITH 4') 22 | }); 23 | } 24 | -------------------------------------------------------------------------------- /node-example-app/db/seeds/01_address.js: -------------------------------------------------------------------------------- 1 | exports.seed = function (knex) { 2 | // Inserts seed entries 3 | return knex('address').insert([ 4 | { 5 | id: 1, 6 | street: "first", 7 | suburb: "sub", 8 | city: "city", 9 | code: '0001' 10 | }, 11 | { 12 | id: 2, 13 | street: "10 Downing Street", 14 | suburb: "Westminster", 15 | city: "London", 16 | code: 'SW1A 2AA' 17 | }, 18 | { 19 | id: 3, 20 | street: "1 Main Street", 21 | suburb: "New York", 22 | city: "New York", 23 | code: '10001' 24 | }, 25 | ]).then(function () { 26 | return knex.schema.raw('ALTER SEQUENCE address_id_seq RESTART WITH 4') 27 | }); 28 | }; -------------------------------------------------------------------------------- /node-example-app/middleware/auth.js: -------------------------------------------------------------------------------- 1 | const jwt = require('jsonwebtoken'); 2 | const config = require('config'); 3 | 4 | module.exports = function (req, res, next) { 5 | // Get token from header 6 | const token = req.header('x-auth-token'); 7 | 8 | // Making sure the token is valid and calls the data 9 | // Check if not token 10 | 11 | if (!token) { 12 | return res.status(401).json({ msg: 'No token, authorisation denied' }); 13 | } 14 | 15 | try { 16 | const decoded = jwt.verify(token, config.get("jwtSecret")); 17 | 18 | req.user = decoded.user; 19 | 20 | next(); 21 | 22 | } catch (err) { 23 | res.status(401).json({ msg: "Token is not valid" }); 24 | } 25 | }; -------------------------------------------------------------------------------- /infra/postgres-json-stdout/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | This image allows logging in JSON format to `stdout`. 4 | 5 | ## Usage 6 | 7 | Should use the following settings in `postgresql.conf`. 8 | 9 | ``` 10 | logging_collector = on # Enable capturing of stderr, jsonlog, 11 | log_destination = 'jsonlog' 12 | log_directory = '/var/log/postgresql' 13 | log_filename = 'pg_log' 14 | ``` 15 | 16 | Use the following command and volume to use the `postgresql.conf`. 17 | ``` 18 | command: 19 | - "postgres" 20 | - "-c" 21 | - "config_file=/etc/postgresql/postgresql.conf" 22 | volumes: 23 | - "./your-postgresql.conf:/etc/postgresql/postgresql.conf" 24 | - ... 25 | ``` 26 | 27 | ## Caveats 28 | Some non-json log messages will also be written to `stdout`. -------------------------------------------------------------------------------- /infra/otelcol-custom/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.6-alpine3.19 as build 2 | RUN apk --update add ca-certificates wget 3 | 4 | WORKDIR / 5 | 6 | RUN wget -O /ocb https://github.com/open-telemetry/opentelemetry-collector/releases/download/cmd%2Fbuilder%2Fv0.92.0/ocb_0.92.0_linux_amd64 && \ 7 | chmod +x /ocb 8 | 9 | COPY manifest.yaml /manifest.yaml 10 | RUN CGO_ENABLED=0 /ocb --config /manifest.yaml 11 | 12 | FROM scratch 13 | 14 | ARG USER_UID=10001 15 | USER ${USER_UID} 16 | 17 | COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 18 | 19 | COPY --from=build /_build/otelcol-custom /otelcol-custom 20 | 21 | COPY contrib-config.yaml /etc/otelcol-contrib/config.yaml 22 | ENTRYPOINT ["/otelcol-custom"] 23 | CMD ["--config", "/etc/otelcol-contrib/config.yaml"] 24 | EXPOSE 4317 55678 55679 -------------------------------------------------------------------------------- /node-example-app/db/migrations/20240113143901_products-carts-purchases.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @param { import("knex").Knex } knex 3 | * @returns { Promise } 4 | */ 5 | exports.up = function (knex) { 6 | return knex.schema 7 | .createTable('product', require('./schemas/Product')) 8 | .createTable('purchase', require('./schemas/Purchase')) 9 | .createTable('purchaseItem', require('./schemas/PurchaseItem')) 10 | .createTable('cartItem', require('./schemas/CartItem')) 11 | }; 12 | 13 | /** 14 | * @param { import("knex").Knex } knex 15 | * @returns { Promise } 16 | */ 17 | exports.down = function (knex) { 18 | return knex.schema 19 | .dropTableIfExists('purchaseItem') 20 | .dropTableIfExists('purchase') 21 | .dropTableIfExists('cartItem') 22 | .dropTableIfExists('product') 23 | }; 24 | -------------------------------------------------------------------------------- /node-example-app/knexfile.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | development: { 3 | client: 'pg', 4 | connection: { 5 | database: process.env.DATABASE_NAME || 'knexdb', 6 | user: process.env.POSTGRES_USER || 'knexuser', 7 | password: process.env.POSTGRES_USER_PW || 'knexpw', 8 | port: process.env.POSTGRES_PORT || 5432, 9 | host: process.env.POSTGRES_HOST || 'localhost' 10 | }, 11 | migrations: { 12 | directory: './db/migrations' 13 | }, 14 | seeds: { 15 | directory: './db/seeds' 16 | }, 17 | }, 18 | production: { 19 | client: 'pg', 20 | connection: process.env.DATABASE_URL, 21 | migrations: { 22 | directory: './db/migrations' 23 | }, 24 | seeds: { 25 | directory: './db/seeds' 26 | }, 27 | ssl: { 28 | rejectUnauthorized: false 29 | } 30 | } 31 | }; -------------------------------------------------------------------------------- /node-example-app/routes/user/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | 4 | const auth = require('../../middleware/auth'); 5 | 6 | const userController = require('../../controller/user') 7 | 8 | router.get('/', auth, userController.getUsersAuth); // Gets all users public data if user's signed in 9 | 10 | router.get('/admin', auth, userController.getUsersAdmin); // Gets all users private data only if admin [api/users/admin] 11 | 12 | router.get('/whoami', auth, userController.getUserData); // Get's loggedin users data [api/users/whoami] 13 | 14 | router.post('/register', userController.addUser); // Register user [api/users/register] 15 | 16 | router.post('/login', userController.login); // Logs in and returns access token and users ID [api/users/login] 17 | 18 | router.put('/update', auth, userController.updateUser); // Updates users data if user's signed in [api/users/update] 19 | router.delete('/delete', auth, userController.removeUser) // Deletes user thats signed in 20 | 21 | module.exports = router; -------------------------------------------------------------------------------- /infra/prometheus.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'codelab-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'prometheus' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['localhost:9090'] 20 | - job_name: 'otelcol' 21 | # Override the global default and scrape targets from this job every 5 seconds. 22 | scrape_interval: 5s 23 | static_configs: 24 | - targets: ['otel-collector:8888', 'otel-collector:8889'] 25 | labels: 26 | group: 'production' -------------------------------------------------------------------------------- /node-example-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node-example-app", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "seed": "./node_modules/.bin/knex seed:run", 9 | "migrate": "./node_modules/.bin/knex migrate:latest" 10 | }, 11 | "keywords": [], 12 | "author": "", 13 | "license": "ISC", 14 | "dependencies": { 15 | "@opentelemetry/api": "^1.7.0", 16 | "@opentelemetry/auto-instrumentations-node": "^0.40.2", 17 | "@opentelemetry/exporter-metrics-otlp-proto": "^0.46.0", 18 | "@opentelemetry/exporter-trace-otlp-proto": "^0.46.0", 19 | "@opentelemetry/instrumentation-knex": "^0.32.4", 20 | "@opentelemetry/sdk-metrics": "^1.19.0", 21 | "@opentelemetry/sdk-node": "^0.46.0", 22 | "bcryptjs": "^2.4.3", 23 | "config": "^3.3.9", 24 | "cors": "^2.8.5", 25 | "dotenv": "^16.3.1", 26 | "express": "^4.18.2", 27 | "helmet": "^7.1.0", 28 | "jsonwebtoken": "^9.0.2", 29 | "knex": "^3.1.0", 30 | "pg": "^8.11.3" 31 | } 32 | } -------------------------------------------------------------------------------- /node-example-app/db/seeds/03_product.js: -------------------------------------------------------------------------------- 1 | exports.seed = function (knex) { 2 | // Inserts example seed entries for product table 3 | return knex('product').insert([ 4 | { 5 | id: 1, 6 | name: "FUJOMOTO", 7 | description: "Coffee table, black-brown", 8 | price: 9.99, 9 | image: "https://images.example.com/FUJOMOTO.jpg", 10 | stock: 10, 11 | }, 12 | { 13 | id: 2, 14 | name: "FJÄLLERÖN", 15 | description: "Armchair, outdoor, white", 16 | price: 19.99, 17 | image: "https://images.example.com/.jpg", 18 | stock: 15, 19 | }, 20 | { 21 | id: 3, 22 | name: "SOLLBERGET", 23 | description: "Conference chair, black/silver-colour", 24 | price: 29.99, 25 | image: "https://images.example.com/FJÄLLBERGET.jpg", 26 | stock: 20, 27 | }, 28 | { 29 | id: 4, 30 | name: "BONOLAMPI", 31 | description: "Table lamp, white", 32 | price: 39.99, 33 | image: "https://images.example.com/BONOLAMPI.jpg", 34 | stock: 25, 35 | }, 36 | ]).then(function () { 37 | return knex.schema.raw('ALTER SEQUENCE product_id_seq RESTART WITH 4') 38 | }); 39 | }; -------------------------------------------------------------------------------- /gcp-infra/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/google" { 5 | version = "4.51.0" 6 | constraints = "4.51.0" 7 | hashes = [ 8 | "h1:7JFdiV9bvV6R+AeWzvNbVeoega481sJY3PqtIbrwTsM=", 9 | "zh:001bf7478e495d497ffd4054453c97ab4dd3e6a24d46496d51d4c8094e95b2b1", 10 | "zh:19db72113552dd295854a99840e85678d421312708e8329a35787fff1baeed8b", 11 | "zh:42c3e629ace225a2cb6cf87b8fabeaf1c56ac8eca6a77b9e3fc489f3cc0a9db5", 12 | "zh:50b930755c4b1f8a01c430d8f688ea79de0b0198c87511baa3a783e360d7e624", 13 | "zh:5acd67f0aafff5ad59e179543cccd1ffd48d69b98af0228506403b8d8193b340", 14 | "zh:70128d57b4b4bf07df941172e6af15c4eda8396af5cc2b0128c906983c7b7fad", 15 | "zh:7905fac0ba2becf0e97edfcd4224e57466b04f960f36a3ec654a0a3c2ffececb", 16 | "zh:79b4cc760305cd77c1ff841f789184f808b8052e8f4faa5cb8d518e4c13beb22", 17 | "zh:c7aebd7d7dd2b29de28e382500d36fae8b4d8a192cf05e41ea29c66f1251acfc", 18 | "zh:d8b4494b13ef5af65d3afedf05bf7565918f1e31ad68ae0df81f5c3b12baf519", 19 | "zh:e6e68ef6881bc3312db50c9fd761f226f34d7834b64f90d96616b7ca6b1daf34", 20 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /node-example-app/db/seeds/05_purchaseItems.js: -------------------------------------------------------------------------------- 1 | //seeds for purchase table 2 | exports.seed = function (knex) { 3 | // Inserts seed entries 4 | return knex('purchaseItem').insert([ 5 | { 6 | id: 1, 7 | purchase_id: 1, 8 | product_id: 1, 9 | quantity: 1, 10 | }, 11 | { 12 | id: 2, 13 | purchase_id: 1, 14 | product_id: 2, 15 | quantity: 2, 16 | }, 17 | { 18 | id: 3, 19 | purchase_id: 2, 20 | product_id: 2, 21 | quantity: 3, 22 | }, 23 | { 24 | id: 4, 25 | purchase_id: 2, 26 | product_id: 3, 27 | quantity: 4, 28 | }, 29 | { 30 | id: 5, 31 | purchase_id: 2, 32 | product_id: 4, 33 | quantity: 1, 34 | }, 35 | { 36 | id: 6, 37 | purchase_id: 3, 38 | product_id: 1, 39 | quantity: 1, 40 | }, 41 | { 42 | id: 7, 43 | purchase_id: 3, 44 | product_id: 2, 45 | quantity: 2, 46 | }, 47 | ]).then(function () { 48 | return knex.schema.raw('ALTER SEQUENCE "purchaseItem_id_seq" RESTART WITH 8') 49 | }); 50 | } -------------------------------------------------------------------------------- /node-example-app/db/seeds/02_users.js: -------------------------------------------------------------------------------- 1 | const bcrypt = require('bcryptjs'); 2 | 3 | exports.seed = function (knex) { 4 | return knex('user').insert( 5 | [ 6 | { 7 | id: 1, 8 | name: 'admin', 9 | surname: 'admin', 10 | gender: 'admin', 11 | email: 'admin@gmail.io', 12 | DOB: "2002-06-30", 13 | contact_number: '0987654321', 14 | role: 'admin', 15 | password: bcrypt.hashSync('Password', bcrypt.genSaltSync(10)), 16 | address_id: 1, 17 | }, 18 | { 19 | id: 2, 20 | name: 'James', 21 | surname: 'Bond', 22 | gender: 'male', 23 | email: 'jamesbond@example.com', 24 | DOB: "2002-06-30", 25 | contact_number: '0987654321', 26 | role: 'customer', 27 | password: bcrypt.hashSync('Password', bcrypt.genSaltSync(10)), 28 | address_id: 2, 29 | }, 30 | { 31 | id: 3, 32 | name: 'John', 33 | surname: 'Wick', 34 | gender: 'other', 35 | email: 'johnwick@example.com', 36 | DOB: "2002-06-30", 37 | contact_number: '0987654321', 38 | role: 'customer', 39 | password: bcrypt.hashSync('Password', bcrypt.genSaltSync(10)), 40 | address_id: 3, 41 | } 42 | ]).then(function () { 43 | return knex.schema.raw('ALTER SEQUENCE user_id_seq RESTART WITH 4') 44 | }); 45 | }; -------------------------------------------------------------------------------- /gcp-infra/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # Use postgres/example user/password credentials 2 | version: '3.1' 3 | 4 | services: 5 | node-example-app: 6 | build: ../node-example-app/ 7 | restart: always 8 | environment: 9 | DATABASE_NAME: knexdb 10 | POSTGRES_USER: knexuser 11 | POSTGRES_USER_PW: knexpw 12 | POSTGRES_PORT: 5432 13 | POSTGRES_HOST: 14 | NODE_ENV: development 15 | OTEL_COLLECTOR_TRACES_URL: 'http://otel-collector:4318/v1/traces' 16 | OTEL_COLLECTOR_METRICS_URL: 'http://otel-collector:4318/v1/metrics' 17 | ports: 18 | - 3000:3000 19 | networks: 20 | - otel 21 | adminer: 22 | image: adminer 23 | restart: always 24 | ports: 25 | - 8080:8080 26 | networks: 27 | - adminer-network 28 | otel-collector: 29 | image: otel/opentelemetry-collector-contrib 30 | environment: 31 | GOOGLE_APPLICATION_CREDENTIALS: "/etc/otel/key.json" 32 | volumes: 33 | - ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml 34 | - ./credentials/sql-trace-otel.json:/etc/otel/key.json 35 | ports: 36 | - 1888:1888 # pprof extension 37 | - 8888:8888 # Prometheus metrics exposed by the Collector 38 | - 8889:8889 # Prometheus exporter metrics 39 | - 13133:13133 # health_check extension 40 | - 4317:4317 # OTLP gRPC receiver 41 | - 4318:4318 # OTLP http receiver 42 | - 55679:55679 # zpages extension 43 | networks: 44 | - prometheus 45 | - otel 46 | - jaeger 47 | networks: 48 | adminer-network: 49 | otel: 50 | jaeger: 51 | prometheus: 52 | volumes: 53 | prometheus-volume: 54 | external: false -------------------------------------------------------------------------------- /infra/otelcol-custom/contrib-config.yaml: -------------------------------------------------------------------------------- 1 | # To limit exposure to denial of service attacks, change the host in endpoints below from 0.0.0.0 to a specific network interface. 2 | # See https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks 3 | 4 | extensions: 5 | health_check: 6 | pprof: 7 | endpoint: 0.0.0.0:1777 8 | zpages: 9 | endpoint: 0.0.0.0:55679 10 | 11 | receivers: 12 | otlp: 13 | protocols: 14 | grpc: 15 | endpoint: 0.0.0.0:4317 16 | http: 17 | endpoint: 0.0.0.0:4318 18 | 19 | opencensus: 20 | endpoint: 0.0.0.0:55678 21 | 22 | # Collect own metrics 23 | prometheus: 24 | config: 25 | scrape_configs: 26 | - job_name: 'otel-collector' 27 | scrape_interval: 10s 28 | static_configs: 29 | - targets: ['0.0.0.0:8888'] 30 | 31 | jaeger: 32 | protocols: 33 | grpc: 34 | endpoint: 0.0.0.0:14250 35 | thrift_binary: 36 | endpoint: 0.0.0.0:6832 37 | thrift_compact: 38 | endpoint: 0.0.0.0:6831 39 | thrift_http: 40 | endpoint: 0.0.0.0:14268 41 | 42 | zipkin: 43 | endpoint: 0.0.0.0:9411 44 | 45 | processors: 46 | batch: 47 | 48 | exporters: 49 | debug: 50 | verbosity: detailed 51 | 52 | service: 53 | 54 | pipelines: 55 | 56 | traces: 57 | receivers: [otlp, opencensus, jaeger, zipkin] 58 | processors: [batch] 59 | exporters: [debug] 60 | 61 | metrics: 62 | receivers: [otlp, opencensus, prometheus] 63 | processors: [batch] 64 | exporters: [debug] 65 | 66 | logs: 67 | receivers: [otlp] 68 | processors: [batch] 69 | exporters: [debug] 70 | 71 | extensions: [health_check, pprof, zpages] -------------------------------------------------------------------------------- /node-example-app/controller/purchase/index.js: -------------------------------------------------------------------------------- 1 | //controller file for purchases 2 | const knex = require('../../db/knex'); 3 | 4 | // Gets all purchases for the user 5 | const getUserPurchases = async (req, res) => { 6 | try { 7 | await knex.from('purchase').select('id', 'datetime').where('user_id', req.user.id).then((purchases) => { 8 | res.send(purchases) 9 | }) 10 | } catch (err) { 11 | console.error(err.message); 12 | res.status(500).send("Server Error"); 13 | } 14 | } 15 | 16 | // Gets a specific purchase for the user 17 | const getPurchase = async (req, res) => { 18 | console.log(req.user.id) 19 | const purchaseId = parseInt(req.params.id) 20 | try { 21 | await knex.from('purchase').select('id', 'user_id', 'datetime').where({ 'id': purchaseId, 'user_id': req.user.id }).then((purchases) => { 22 | if (purchases.length === 0) { 23 | return res.status(404).json({ msg: 'Purchase not found!' }); 24 | } 25 | res.send(purchases[0]) 26 | }) 27 | } catch (err) { 28 | console.error(err.message); 29 | res.status(500).send("Server Error"); 30 | } 31 | } 32 | 33 | // Gets a specific purchase for the user including the list of purchased items 34 | const getPurchasedItems = async (req, res) => { 35 | const purchaseId = parseInt(req.params.id) 36 | try { 37 | await knex.from('purchaseItem').join('product', 'purchaseItem.product_id', '=', 'product.id').select('purchaseItem.id', 'purchaseItem.purchase_id', 'purchaseItem.product_id', 'purchaseItem.quantity', 'product.name', 'product.price').where('purchase_id', purchaseId).then((purchasedItems) => { 38 | res.send(purchasedItems) 39 | }) 40 | } catch (err) { 41 | console.error(err.message); 42 | res.status(500).send("Server Error"); 43 | } 44 | } 45 | 46 | module.exports = { 47 | getUserPurchases, 48 | getPurchase, 49 | getPurchasedItems, 50 | } -------------------------------------------------------------------------------- /node-example-app/instrumentation.js: -------------------------------------------------------------------------------- 1 | /*instrumentation.js*/ 2 | const opentelemetry = require('@opentelemetry/sdk-node'); 3 | const { 4 | getNodeAutoInstrumentations, 5 | } = require('@opentelemetry/auto-instrumentations-node'); 6 | const { 7 | OTLPTraceExporter, 8 | } = require('@opentelemetry/exporter-trace-otlp-proto'); 9 | const { 10 | OTLPMetricExporter, 11 | } = require('@opentelemetry/exporter-metrics-otlp-proto'); 12 | const { PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics'); 13 | const { PgInstrumentation } = require('@opentelemetry/instrumentation-pg'); 14 | const { KnexInstrumentation } = require('@opentelemetry/instrumentation-knex'); 15 | const { ExpressInstrumentation } = require('@opentelemetry/instrumentation-express'); 16 | 17 | require('dotenv').config() 18 | 19 | const sdk = new opentelemetry.NodeSDK({ 20 | traceExporter: new OTLPTraceExporter({ 21 | // optional - default url is http://localhost:4318/v1/traces 22 | url: process.env.OTEL_COLLECTOR_TRACES_URL || 'http://localhost:4318/v1/traces', 23 | // optional - collection of custom headers to be sent with each request, empty by default 24 | headers: {}, 25 | }), 26 | metricReader: new PeriodicExportingMetricReader({ 27 | exporter: new OTLPMetricExporter({ 28 | url: process.env.OTEL_COLLECTOR_TRACES_URL || 'http://localhost:4318/v1/metrics', // url is optional and can be omitted - default is http://localhost:4318/v1/metrics 29 | headers: {}, // an optional object containing custom headers to be sent with each request 30 | concurrencyLimit: 1, // an optional limit on pending requests 31 | }), 32 | }), 33 | instrumentations: [ 34 | getNodeAutoInstrumentations(), 35 | new KnexInstrumentation( 36 | { 37 | maxQueryLength: 100, 38 | } 39 | ), 40 | new PgInstrumentation({ 41 | addSqlCommenterCommentToQueries: true, 42 | }), 43 | new ExpressInstrumentation(), 44 | ], 45 | }); 46 | sdk.start(); -------------------------------------------------------------------------------- /infra/grafana-datasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - prometheus: 4 | access: proxy 5 | basicAuth: false 6 | editable: true 7 | name: Prometheus 8 | orgId: 1 9 | type: prometheus 10 | url: http://prometheus:9090 11 | - name: Loki 12 | type: loki 13 | uid: DzuPFa9P2 14 | access: proxy 15 | orgId: 1 16 | url: http://loki:3100 17 | basicAuth: false 18 | isDefault: true 19 | version: 1 20 | editable: true 21 | derivedFields: 22 | - datasourceUid: EbPG8fYoz 23 | matcherRegex: "traceid\u0022: \u0022([A-Za-z0-9]{32})" 24 | name: trace_id 25 | url: '$${__value.raw}' 26 | urlDisplayLabel: 'View Traces' 27 | - name: Jaeger 28 | type: jaeger 29 | uid: EbPG8fYoz 30 | url: http://jaeger:16686 31 | access: proxy 32 | basicAuth: false 33 | # basicAuthUser: my_user 34 | readOnly: false 35 | isDefault: false 36 | editable: true 37 | jsonData: 38 | tracesToLogsV2: 39 | # Field with an internal link pointing to a logs data source in Grafana. 40 | # datasourceUid value must match the uid value of the logs data source. 41 | datasourceUid: 'DzuPFa9P2' 42 | spanStartTimeShift: '-5m' 43 | spanEndTimeShift: '5m' 44 | filterByTraceID: false 45 | filterBySpanID: false 46 | customQuery: trace 47 | query: '{exporter="OTLP"} | json | traceid=`$${__trace.traceId}`' 48 | tags: 49 | - traceid 50 | - spanid 51 | - key: traceID 52 | value: attributes_trace_id 53 | # tracesToMetrics: 54 | # datasourceUid: 'prom' 55 | # spanStartTimeShift: '1h' 56 | # spanEndTimeShift: '-1h' 57 | # tags: [{ key: 'service.name', value: 'service' }, { key: 'job' }] 58 | # queries: 59 | # - name: 'Sample query' 60 | # query: 'sum(rate(traces_spanmetrics_latency_bucket{$$__tags}[5m]))' 61 | nodeGraph: 62 | enabled: true 63 | traceQuery: 64 | timeShiftEnabled: true 65 | spanStartTimeShift: '1h' 66 | spanEndTimeShift: '-1h' 67 | spanBar: 68 | type: 'None' 69 | # secureJsonData: 70 | # basicAuthPassword: my_password -------------------------------------------------------------------------------- /gcp-infra/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | version = "4.51.0" 6 | } 7 | } 8 | } 9 | 10 | provider "google" { 11 | credentials = file("./credentials/sql-trace-terraform.json") 12 | 13 | project = "sql-trace" 14 | region = "europe-west6" 15 | zone = "europe-west6-c" 16 | } 17 | 18 | resource "google_sql_database_instance" "main" { 19 | name = "trace-test" 20 | database_version = "POSTGRES_15" 21 | region = "europe-west6" 22 | 23 | settings { 24 | tier = "db-f1-micro" 25 | 26 | disk_type = "PD_HDD" 27 | disk_size = 10 28 | disk_autoresize = true 29 | 30 | availability_type = "ZONAL" 31 | 32 | backup_configuration { 33 | enabled = false 34 | } 35 | 36 | ip_configuration { 37 | ipv4_enabled = true 38 | require_ssl = false 39 | 40 | authorized_networks { 41 | value = "213.55.243.210/32" 42 | } 43 | } 44 | 45 | insights_config { 46 | query_insights_enabled = true 47 | record_application_tags = true 48 | } 49 | } 50 | } 51 | 52 | resource "google_sql_database" "knexdb" { 53 | name = "knexdb" 54 | instance = google_sql_database_instance.main.name 55 | } 56 | 57 | resource "google_sql_user" "knexuser" { 58 | name = "knexuser" 59 | instance = google_sql_database_instance.main.name 60 | password = "knexpw" 61 | } 62 | 63 | resource "google_service_account" "opentelemetry" { 64 | account_id = "opentelemetry" 65 | display_name = "OpenTelemetry Service Account" 66 | project = "sql-trace" 67 | } 68 | 69 | resource "google_project_iam_member" "opentelemetry_cloudtraceagent" { 70 | project = "sql-trace" 71 | role = "roles/cloudtrace.agent" 72 | member = "serviceAccount:${google_service_account.opentelemetry.email}" 73 | } 74 | 75 | resource "google_project_iam_member" "opentelemetry_monitoringmetricwriter" { 76 | project = "sql-trace" 77 | role = "roles/monitoring.metricWriter" 78 | member = "serviceAccount:${google_service_account.opentelemetry.email}" 79 | } 80 | 81 | resource "google_project_iam_member" "opentelemetry_logginglogwriter" { 82 | project = "sql-trace" 83 | role = "roles/logging.logWriter" 84 | member = "serviceAccount:${google_service_account.opentelemetry.email}" 85 | } 86 | -------------------------------------------------------------------------------- /node-example-app/controller/address/index.js: -------------------------------------------------------------------------------- 1 | const knex = require('../../db/knex'); 2 | 3 | // Gets all addresses 4 | const getAddressesAuth = async (req, res) => { 5 | try { 6 | 7 | const user = await knex.select().from('users').where('id', req.user.id).then((user) => { return user[0] }) 8 | 9 | if (user.id === req.user.id) { 10 | await knex.from('address').select('street', 'suburb', 'city', 'code').then((addresses) => { 11 | res.send(addresses) 12 | }) 13 | } 14 | } catch (err) { 15 | console.error(err.message); 16 | res.status(500).send("Server Error"); 17 | } 18 | } 19 | 20 | // Updates address by ID 21 | const updateAddress = async (req, res) => { 22 | try { 23 | const { 24 | street, 25 | suburb, 26 | city, 27 | code, 28 | } = req.body; 29 | 30 | const address = {}; 31 | 32 | if (street) address.street = street; 33 | if (suburb) address.suburb = suburb; 34 | if (city) address.city = city; 35 | if (code) address.code = code; 36 | address.updated_at = new Date(), 37 | 38 | await knex('address').where('id', req.params.id) 39 | .update(address).then(() => { 40 | knex.select() 41 | .from('address').where('id', req.params.id).then((address) => { 42 | res.send(address[0]) 43 | }) 44 | }) 45 | 46 | } catch (err) { 47 | console.error(err.message); 48 | res.status(500).send("Server Error"); 49 | } 50 | } 51 | 52 | // Remove address by ID 53 | const removeAddress = async (req, res) => { 54 | try { 55 | let exists = await knex.select().from('address').where('id', req.params.id).then((address) => { return address[0] }); 56 | if (!exists) { 57 | return res.status(400).json({ msg: 'Address not found!' }); 58 | } 59 | 60 | if (exists.id === req.params.id) { 61 | knex('address').where('id', req.params.id) 62 | .del().then(function () { 63 | res.json({ msg: 'Address deleted' }); 64 | }) 65 | } 66 | } catch (err) { 67 | console.error(err.message); 68 | res.status(500).send("Server Error"); 69 | } 70 | } 71 | 72 | module.exports = { 73 | getAddressesAuth, 74 | updateAddress, 75 | removeAddress 76 | } -------------------------------------------------------------------------------- /node-example-app/controller/cart/index.js: -------------------------------------------------------------------------------- 1 | //controller for cart items 2 | const knex = require('../../db/knex'); 3 | 4 | // Gets all the items currently in the current user's cart 5 | const getUserCart = async (req, res) => { 6 | try { 7 | await knex.from('cartItem').join('product', 'cartItem.product_id', '=', 'product.id').select('cartItem.id', 'cartItem.product_id', 'cartItem.quantity', 'product.name', 'product.price').where('user_id', req.user.id).then((cartItems) => { 8 | res.send(cartItems) 9 | }) 10 | } catch (err) { 11 | console.error(err.message); 12 | res.status(500).send("Server Error"); 13 | } 14 | } 15 | 16 | const addToCart = async (req, res) => { 17 | const { productId, quantity } = req.body; 18 | try { 19 | await knex.from('cartItem').select('id', 'user_id', 'product_id', 'quantity').where({ 'user_id': req.user.id, 'product_id': productId }).then((cartItems) => { 20 | if (cartItems.length === 0) { 21 | knex('cartItem').insert({ 'user_id': req.user.id, 'product_id': productId, 'quantity': quantity }).then(() => { 22 | res.send({ msg: 'Item added to cart!' }) 23 | }) 24 | } else { 25 | knex('cartItem').where({ 'user_id': req.user.id, 'product_id': productId }).update({ 'quantity': cartItems[0].quantity + quantity }).then(() => { 26 | res.send({ msg: 'Item added to cart!' }) 27 | }) 28 | } 29 | }) 30 | } catch (err) { 31 | console.error(err.message); 32 | res.status(500).send("Server Error"); 33 | } 34 | } 35 | 36 | const removeFromCart = async (req, res) => { 37 | const cartItemId = parseInt(req.params.id) 38 | try { 39 | await knex.from('cartItem').select('id', 'user_id', 'product_id', 'quantity').where({ 'id': cartItemId, 'user_id': req.user.id }).then((cartItems) => { 40 | if (cartItems.length === 0) { 41 | return res.status(404).json({ msg: 'Item not found!' }); 42 | } 43 | knex('cartItem').where({ 'id': cartItemId, 'user_id': req.user.id }).del().then(() => { 44 | res.send({ msg: 'Item removed from cart!' }) 45 | }) 46 | }) 47 | } catch (err) { 48 | console.error(err.message); 49 | res.status(500).send("Server Error"); 50 | } 51 | } 52 | 53 | const order = async (req, res) => { 54 | try { 55 | await knex.transaction(async (trx) => { 56 | const purchases = await trx('purchase').insert({ 'user_id': req.user.id, 'datetime': (new Date()).toISOString() }).returning('id') 57 | 58 | const cartItems = await trx.from('cartItem').select('id', 'user_id', 'product_id', 'quantity').where({ 'user_id': req.user.id }) 59 | 60 | const purchaseItems = cartItems.map((cartItem) => { 61 | return { 'purchase_id': purchases[0].id, 'product_id': cartItem.product_id, 'quantity': cartItem.quantity } 62 | }) 63 | await trx('purchaseItem').insert(purchaseItems) 64 | 65 | await trx('cartItem').where('user_id', req.user.id).del() 66 | }) 67 | res.send({ msg: 'Order placed!' }) 68 | } catch (err) { 69 | console.error(err.message); 70 | res.status(500).send("Server Error"); 71 | } 72 | } 73 | 74 | module.exports = { 75 | getUserCart, 76 | addToCart, 77 | removeFromCart, 78 | order, 79 | } -------------------------------------------------------------------------------- /infra/postgresql.conf: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # PostgreSQL configuration file 3 | # ----------------------------- 4 | # 5 | # This file consists of lines of the form: 6 | # 7 | # name = value 8 | # 9 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 10 | # "#" anywhere on a line. The complete list of parameter names and allowed 11 | # values can be found in the PostgreSQL documentation. 12 | # 13 | # The commented-out settings shown in this file represent the default values. 14 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 15 | # you need to reload the server. 16 | # 17 | # This file is read on server startup and when the server receives a SIGHUP 18 | # signal. If you edit the file on a running system, you have to SIGHUP the 19 | # server for the changes to take effect, run "pg_ctl reload", or execute 20 | # "SELECT pg_reload_conf()". Some parameters, which are marked below, 21 | # require a server shutdown and restart to take effect. 22 | # 23 | # Any parameter can also be given as a command-line option to the server, e.g., 24 | # "postgres -c log_connections=on". Some parameters can be changed at run time 25 | # with the "SET" SQL command. 26 | # 27 | # Memory units: B = bytes Time units: us = microseconds 28 | # kB = kilobytes ms = milliseconds 29 | # MB = megabytes s = seconds 30 | # GB = gigabytes min = minutes 31 | # TB = terabytes h = hours 32 | # d = days 33 | 34 | 35 | #------------------------------------------------------------------------------ 36 | # CONNECTIONS AND AUTHENTICATION 37 | #------------------------------------------------------------------------------ 38 | 39 | # - Connection Settings - 40 | 41 | listen_addresses = '*' 42 | 43 | #------------------------------------------------------------------------------ 44 | # REPORTING AND LOGGING 45 | #------------------------------------------------------------------------------ 46 | 47 | 48 | # - Where to Log - 49 | 50 | logging_collector = on # Enable capturing of stderr, jsonlog, 51 | log_destination = 'jsonlog' 52 | log_directory = '/var/log/postgresql' 53 | log_filename = 'pg_log' 54 | 55 | # - When to Log - 56 | log_min_duration_statement = 0 57 | 58 | # - What to Log - 59 | 60 | log_statement = 'mod' # none, ddl, mod, all 61 | log_duration = on 62 | log_line_prefix = '%m: [%l-1] user=%u,db=%d,app=%a,client=%h,query_id=%Q ' 63 | 64 | #------------------------------------------------------------------------------ 65 | # CUSTOMIZED OPTIONS 66 | #------------------------------------------------------------------------------ 67 | 68 | # - Enable pg_stat_statements extension to capture detailed statistics for each command 69 | # - Enable auto_explain extension to log the execution plans of slow statements 70 | shared_preload_libraries = 'pg_stat_statements, auto_explain' 71 | 72 | pg_stat_statements.max = 10000 73 | pg_stat_statements.track = top 74 | 75 | auto_explain.log_min_duration = 0 # 10ms 76 | auto_explain.log_format = 'json' 77 | auto_explain.log_analyze = true 78 | auto_explain.log_buffers = true 79 | # auto_explain.sample_rate = .25 80 | # auto_explain.log_level = 'NOTICE' 81 | 82 | 83 | #------------------------------------------------------------------------------ 84 | # STATISTICS 85 | #------------------------------------------------------------------------------ 86 | 87 | # - Monitoring - 88 | 89 | compute_query_id = auto 90 | log_statement_stats = on 91 | #log_parser_stats = off 92 | #log_planner_stats = off 93 | #log_executor_stats = off 94 | 95 | -------------------------------------------------------------------------------- /infra/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # Use postgres/example user/password credentials 2 | version: '3.1' 3 | 4 | services: 5 | node-example-app: 6 | build: ../node-example-app/ 7 | restart: always 8 | environment: 9 | DATABASE_NAME: knexdb 10 | POSTGRES_USER: knexuser 11 | POSTGRES_USER_PW: knexpw 12 | POSTGRES_PORT: 5432 13 | POSTGRES_HOST: db 14 | NODE_ENV: development 15 | OTEL_COLLECTOR_TRACES_URL: 'http://otel-collector:4318/v1/traces' 16 | OTEL_COLLECTOR_METRICS_URL: 'http://otel-collector:4318/v1/metrics' 17 | ports: 18 | - 3000:3000 19 | networks: 20 | - db 21 | - otel 22 | db: 23 | build: postgres-json-stdout/ 24 | restart: always 25 | environment: 26 | POSTGRES_PASSWORD: example 27 | PGDATA: /var/lib/postgresql/data/pgdata 28 | command: 29 | - "postgres" 30 | - "-c" 31 | - "config_file=/etc/postgresql/postgresql.conf" 32 | ports: 33 | - 5432:5432 34 | volumes: 35 | - "./data/knexappdb:/var/lib/postgresql/data/pgdata" 36 | - "./postgresql.conf:/etc/postgresql/postgresql.conf" 37 | networks: 38 | - adminer-network 39 | - otel 40 | - fluent-bit 41 | - db 42 | logging: 43 | driver: "fluentd" 44 | options: 45 | tag: docker.logify 46 | fluentd-address: 127.0.0.1:24224 47 | depends_on: 48 | - fluent-bit 49 | attach: false 50 | adminer: 51 | image: adminer 52 | restart: always 53 | ports: 54 | - 8080:8080 55 | networks: 56 | - adminer-network 57 | otel-collector: 58 | build: otelcol-custom/ 59 | volumes: 60 | - ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml 61 | ports: 62 | - 1888:1888 # pprof extension 63 | # - 8888:8888 # Prometheus metrics exposed by the Collector 64 | - 8889:8889 # Prometheus exporter metrics 65 | - 13133:13133 # health_check extension 66 | - 4317:4317 # OTLP gRPC receiver 67 | - 4318:4318 # OTLP http receiver 68 | - 4319:4319 # OTLP/example gRPC receiver 69 | - 55679:55679 # zpages extension 70 | networks: 71 | - prometheus 72 | - otel 73 | - jaeger 74 | - fluent-bit 75 | - loki 76 | pgtrace-collector: 77 | image: elessar1ch/pgtraceconnector:v0.0.1 78 | volumes: 79 | - ./pgtraceconnector-collector-config.yaml:/etc/otelcol-contrib/config.yaml 80 | ports: 81 | - 4317 # OTLP gRPC receiver 82 | networks: 83 | - otel 84 | fluent-bit: 85 | image: fluent/fluent-bit:latest 86 | volumes: 87 | - ./fluent-bit:/fluent-bit/etc 88 | - /var/run/docker.sock:/var/run/docker.sock 89 | command: ["fluent-bit", "-c", "/fluent-bit/etc/fluent-bit.conf"] 90 | container_name: fluent-bit 91 | ports: 92 | - 24224:24224 93 | networks: 94 | - fluent-bit 95 | depends_on: 96 | - otel-collector 97 | loki: 98 | image: grafana/loki:2.9.4 99 | ports: 100 | - "3100:3100" 101 | command: -config.file=/etc/loki/local-config.yaml 102 | networks: 103 | - loki 104 | jaeger: 105 | image: jaegertracing/all-in-one:1.52 106 | environment: 107 | COLLECTOR_OTLP_ENABLED: true 108 | ports: 109 | - 16686:16686 110 | # - 14317:4317 111 | # - 14318:4318 112 | networks: 113 | - jaeger 114 | prometheus: 115 | image: prom/prometheus:v2.49.1 116 | volumes: 117 | - ./prometheus.yaml:/etc/prometheus/prometheus.yml 118 | - prometheus-volume:/prometheus 119 | ports: 120 | - 9090:9090 121 | networks: 122 | - prometheus 123 | grafana: 124 | environment: 125 | - GF_PATHS_PROVISIONING=/etc/grafana/provisioning 126 | - GF_AUTH_ANONYMOUS_ENABLED=true 127 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 128 | volumes: 129 | - ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/ds.yaml 130 | - ./grafana-dashboard-provider.yaml:/etc/grafana/provisioning/dashboards/provider.yaml 131 | - ./grafana-dashboards:/etc/dashboards/ 132 | image: grafana/grafana:10.3.1 133 | ports: 134 | - "13000:3000" 135 | networks: 136 | - loki 137 | - prometheus 138 | - jaeger 139 | networks: 140 | adminer-network: 141 | otel: 142 | jaeger: 143 | prometheus: 144 | fluent-bit: 145 | db: 146 | loki: 147 | volumes: 148 | prometheus-volume: 149 | external: false -------------------------------------------------------------------------------- /infra/otel-collector-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | http: 6 | postgresql: 7 | endpoint: db:5432 8 | transport: tcp 9 | username: oteluser 10 | password: otelpw 11 | databases: 12 | - knexdb 13 | collection_interval: 10s 14 | tls: 15 | insecure: true 16 | sqlquery/pgstatstatements: 17 | driver: postgres 18 | datasource: "host=db port=5432 user=oteluser password=otelpw sslmode=disable database=knexdb" 19 | queries: 20 | - sql: "select * from pg_stat_statements WHERE CAST(dbid AS text) = '16391' AND CAST(userid AS text) = '16392' AND toplevel = true" 21 | metrics: 22 | - metric_name: pg_stat_statements_total_exec_time 23 | value_column: total_exec_time 24 | attribute_columns: 25 | - queryid 26 | - userid 27 | - dbid 28 | value_type: double 29 | monotonic: true 30 | data_type: sum 31 | - metric_name: pg_stat_statements_stddev_exec_time 32 | value_column: stddev_exec_time 33 | attribute_columns: 34 | - queryid 35 | - userid 36 | - dbid 37 | value_type: double 38 | data_type: gauge 39 | - metric_name: pg_stat_statements_min_exec_time 40 | value_column: min_exec_time 41 | attribute_columns: 42 | - queryid 43 | - userid 44 | - dbid 45 | value_type: double 46 | data_type: gauge 47 | - metric_name: pg_stat_statements_max_exec_time 48 | value_column: max_exec_time 49 | attribute_columns: 50 | - queryid 51 | - userid 52 | - dbid 53 | value_type: double 54 | data_type: gauge 55 | - metric_name: pg_stat_statements_mean_exec_time 56 | value_column: mean_exec_time 57 | attribute_columns: 58 | - queryid 59 | - userid 60 | - dbid 61 | value_type: double 62 | data_type: gauge 63 | - metric_name: pg_stat_statements_calls 64 | value_column: calls 65 | attribute_columns: 66 | - queryid 67 | - userid 68 | - dbid 69 | value_type: int 70 | data_type: sum 71 | monotonic: true 72 | - metric_name: pg_stat_statements_rows 73 | value_column: rows 74 | attribute_columns: 75 | - queryid 76 | - userid 77 | - dbid 78 | value_type: int 79 | data_type: sum 80 | processors: 81 | logstransform/fluentbitlog: 82 | operators: 83 | - type: json_parser 84 | if: 'body.log matches "^{.*}$"' 85 | parse_from: body.log 86 | timestamp: 87 | parse_from: attributes.timestamp 88 | layout_type: strptime 89 | layout: '%Y-%m-%d %H:%M:%S.%L %Z' 90 | - type: regex_parser 91 | regex: "traceparent='00-(?P\\S*)-(?P\\S*)-(?P\\d*)'" 92 | parse_from: body.log 93 | trace: 94 | trace_id: 95 | parse_from: attributes.trace_id 96 | span_id: 97 | parse_from: attributes.span_id 98 | trace_flags: 99 | parse_from: attributes.trace_flags 100 | filter/knexuser: 101 | logs: 102 | include: 103 | match_type: strict 104 | record_attributes: 105 | - key: user 106 | value: knexuser 107 | attributes/loki: 108 | actions: 109 | - action: insert 110 | key: loki.attribute.labels 111 | value: dbname, user 112 | # resource/loki: 113 | # attributes: 114 | # - action: insert 115 | # key: loki.resource.labels 116 | # value: service.name, service.namespace 117 | exporters: 118 | debug: 119 | verbosity: detailed 120 | otlp/jaeger: 121 | endpoint: jaeger:4317 122 | tls: 123 | insecure: true 124 | otlp/pgtrace: 125 | endpoint: pgtrace-collector:4317 126 | tls: 127 | insecure: true 128 | prometheus: 129 | endpoint: "0.0.0.0:8889" 130 | send_timestamps: true 131 | metric_expiration: 180m 132 | enable_open_metrics: true 133 | add_metric_suffixes: false 134 | resource_to_telemetry_conversion: 135 | enabled: true 136 | loki: 137 | endpoint: http://loki:3100/loki/api/v1/push 138 | tls: 139 | insecure: true 140 | connectors: 141 | forward: 142 | service: 143 | pipelines: 144 | traces: 145 | receivers: [otlp] 146 | exporters: [otlp/jaeger] 147 | metrics: 148 | receivers: [otlp, postgresql, sqlquery/pgstatstatements] 149 | exporters: [prometheus] 150 | logs/postgres: 151 | receivers: [otlp] 152 | processors: [logstransform/fluentbitlog, filter/knexuser] 153 | exporters: [otlp/pgtrace, forward] 154 | logs/loki: 155 | receivers: [forward] 156 | processors: [attributes/loki] 157 | exporters: [loki, debug] 158 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SQL Tracing Example 2 | This repository contains a demonstration environment for a prototype that extracts traces from PostgreSQL logs. 3 | The demonstration can be run using `docker compose` since all components are available as containers. 4 | 5 | The following is a high-level overview over the components and flows of the environment: 6 | 7 | ![high level overview over the test environment](./complete_local_setup.png "High level overview over the test environment") 8 | 9 | ## Initial Setup 10 | To start the environment navigate to the `infra` directory and run 11 | ```sh 12 | docker compose up 13 | ``` 14 | 15 | Connect to `Adminer` to setup the database on [Adminer UI](http://localhost:8080/?pgsql=db&username=postgres). Enter `db` in the server field and use the user `postgres` and password `example` to log in. 16 | 17 | Once connected create a database `knexdb`. Select the database and chose link "SQL Command" to enter any SQL command or use this link [SQL Command on Adminer](http://localhost:8080/?pgsql=db&username=postgres&db=knexdb&ns=public&sql=). 18 | 19 | Enter the following SQL statements into the field and execute it: 20 | ```sql 21 | On db knexdb 22 | CREATE USER knexuser WITH PASSWORD 'knexpw'; 23 | GRANT ALL PRIVILEGES ON SCHEMA public to knexuser; 24 | 25 | CREATE USER oteluser WITH PASSWORD 'otelpw'; 26 | 27 | GRANT pg_monitor TO oteluser; 28 | 29 | CREATE EXTENSION pg_stat_statements 30 | ``` 31 | 32 | The database should now be ready and the application and OpenTelemetry Collector should be able to connect. But the schema will still need to be created and example data be loaded. Use the following commands to do it: 33 | 34 | ```sh 35 | docker run -e POSTGRES_HOST=host.docker.internal -e DATABASE_NAME=knexdb -e POSTGRES_USER=knexuser -e POSTGRES_USER_PW=knexpw -e POSTGRES_PORT=5432 --entrypoint npm infra-node-example-app run migrate 36 | 37 | docker run -e POSTGRES_HOST=host.docker.internal -e DATABASE_NAME=knexdb -e POSTGRES_USER=knexuser -e POSTGRES_USER_PW=knexpw -e POSTGRES_PORT=5432 --entrypoint npm infra-node-example-app run seed 38 | ``` 39 | 40 | ## Interacting with the example node application 41 | The example node application is a very simple model of a web shopping backend. 42 | It has a login and authentication functionality and provides the ability to add 43 | products to a shopping cart, as well as an order action on that cart, which clears 44 | the cart and puts the items in a purchase table. 45 | 46 | You can request an authentication token for a user using the following command: 47 | ```sh 48 | curl -v -H "Content-Type: application/json" -X POST --data '{"email": "johnwick@example.com", "password": "Password"}' http://localhost:3000/api/user/login 49 | ``` 50 | 51 | You will receive a response of sort: 52 | ```json 53 | {"id":3,"token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjp7ImlkIjozfSwiaWF0IjoxNzA2NjQyMjM5LCJleHAiOjE3MDY4MTUwMzl9.1daPjANrgqZPiu9S7xfGZIOZW7-EpLy-mrDuk21Bu7M"} 54 | ``` 55 | 56 | Copy the token and set it as your `AUTHTOKEN` environment variable: 57 | ```sh 58 | AUTHTOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjp7ImlkIjozfSwiaWF0IjoxNzA2NjQyMjM5LCJleHAiOjE3MDY4MTUwMzl9.1daPjANrgqZPiu9S7xfGZIOZW7-EpLy-mrDuk21Bu7M 59 | ``` 60 | 61 | You can now run the following requests: 62 | 63 | Query your cart: 64 | ```sh 65 | curl -v -H "x-auth-token: $AUTHTOKEN" http://localhost:3000/api/cart 66 | ``` 67 | 68 | Add an item to your cart: 69 | ```sh 70 | curl -v -H "x-auth-token: $AUTHTOKEN" -H "Content-Type: application/json" -X POST --data '{"productId":1,"quantity":1}' http://localhost:3000/api/cart 71 | ``` 72 | 73 | Order the content of your cart: 74 | ```sh 75 | curl -v -H "x-auth-token: $AUTHTOKEN" -X POST http://localhost:3000/api/cart/order 76 | ``` 77 | 78 | View your past "purchases": 79 | ```sh 80 | curl -v -H "x-auth-token: $AUTHTOKEN" http://localhost:3000/api/purchase 81 | ``` 82 | 83 | View the items of a specific purchase: 84 | ```sh 85 | curl -v -H "x-auth-token: $AUTHTOKEN" http://localhost:3000/api/purchase/4/items 86 | ``` 87 | 88 | 89 | ## Viewing Telemetry 90 | - Traces can be viewed in Jaeger on [http://localhost:16686](http://localhost:16686) 91 | - Metrics can be viewed in Prometheus on [http://localhost:9090/](http://localhost:9090/) 92 | - Logs can be viewed using the `docker logs ` command 93 | 94 | 95 | ## To run the project using Cloud SQL on GCP use the following setup 96 | 97 | The infrastructure for GCP is located in the `gcp-infra` folder. 98 | 99 | ### Prerequisites 100 | You must have an account on GCP and a project called `sql-trace`. 101 | 102 | Enable the following APIs: 103 | - https://console.cloud.google.com/marketplace/product/google/iam.googleapis.com 104 | - https://console.cloud.google.com/marketplace/product/google/compute.googleapis.com 105 | - https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com 106 | - https://console.cloud.google.com/marketplace/product/google/cloudtrace.googleapis.com 107 | - https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com 108 | 109 | Create a service account for terraform and a key with the right permissions as documented on 110 | [https://developer.hashicorp.com/terraform/tutorials/gcp-get-started/google-cloud-platform-build](https://developer.hashicorp.com/terraform/tutorials/gcp-get-started/google-cloud-platform-build) 111 | The service account must have the role `Project IAM Admin` on the project 112 | Download the service account credentials and store them in `./credentials/sql-trace-terraform.json`. 113 | 114 | 115 | 116 | Install Terraform on your machine. 117 | 118 | ## Provisioning the infrastructure 119 | 120 | Before provisioning the infrastructure: Change the IP for inbound traffic to the PostgreSQL instance in `main.tf`. 121 | 122 | 123 | Use `terraform init` and `terraform apply` to provision the infrastructure on GCP. 124 | 125 | Set the instance ip of your PostgreSQL db in Cloud SQL in the `docker-compose.yaml` (where it says ``). 126 | 127 | Now run the the command to spin up the infrastructure: 128 | ```sh 129 | docker compose up 130 | ``` 131 | 132 | Run the following commands to finish setting up the database. Before running, replace the `` with the ip of your PostgreSQL instance in Cloud SQL. 133 | 134 | ```sh 135 | docker run -e POSTGRES_HOST= -e DATABASE_NAME=knexdb -e POSTGRES_USER=knexuser -e POSTGRES_USER_PW=knexpw -e POSTGRES_PORT=5432 --entrypoint npm infra-node-example-app run migrate 136 | 137 | docker run -e POSTGRES_HOST= -e DATABASE_NAME=knexdb -e POSTGRES_USER=knexuser -e POSTGRES_USER_PW=knexpw -e POSTGRES_PORT=5432 --entrypoint npm infra-node-example-app run seed 138 | ``` -------------------------------------------------------------------------------- /node-example-app/controller/user/index.js: -------------------------------------------------------------------------------- 1 | const jwt = require('jsonwebtoken'); 2 | const config = require('config'); 3 | const bcrypt = require('bcryptjs'); 4 | const knex = require('../../db/knex'); 5 | 6 | // Reusable function (Gets address with street number/name and area code) 7 | const getAddress = async (street, code) => { 8 | return await knex.select().from('addresses').where('street', street).where('code', code).then((address) => { return address[0] }) 9 | } 10 | 11 | // Reusable function (Gets user by email, checks if email is database) 12 | const getUser = async (email) => { 13 | return await knex.select().from('user').where('email', email).then((user) => { return user[0] }) 14 | } 15 | 16 | // Add new user to users table. Route: [api/users/register] 17 | // NOTE: Upon registration we'll first create address if it's not in our database already (Avoid duplicates) 18 | const addUser = async (req, res) => { 19 | try { 20 | let { 21 | street, 22 | suburb, 23 | city, 24 | code, 25 | name, 26 | surname, 27 | email, 28 | gender, 29 | DOB, 30 | contact_number, 31 | role, 32 | password 33 | } = req.body; 34 | 35 | const addressExists = await getAddress(street, code); // Checks if address is in our database 36 | 37 | if (!addressExists) { // If address isn't found new address gets inserted into addressess table 38 | 39 | let newAddress = { 40 | street, 41 | suburb, 42 | city, 43 | code 44 | } 45 | 46 | 47 | await knex('addresses').insert(newAddress); // Inserts address 48 | 49 | let emailExists = await getUser(email); // Checks if user exists by email(Unique) 50 | 51 | const latest = await getAddress(street, code); // Gets address by street and code field so that we can attach address_id to user 52 | 53 | if (!emailExists) { // If user isn't found new user gets inserted into users table 54 | let newUser = { 55 | name, 56 | surname, 57 | email, 58 | gender, 59 | DOB, 60 | contact_number, 61 | role, 62 | password, 63 | address_id: latest.id 64 | } 65 | 66 | const salt = await bcrypt.genSalt(10); 67 | 68 | newUser.password = await bcrypt.hash(password, salt); // Hashes our password for security 69 | 70 | await knex('user').insert(newUser); // Inserts new user 71 | 72 | res.send({ msg: "User successfully registered!" }) 73 | } else { 74 | res.send({ msg: "User email already registered!" }) // Condition if user is already in the database 75 | } 76 | 77 | } else { 78 | // Condition if address is already in the database 79 | let emailExists = await getUser(email); // Checks if user exists 80 | 81 | const latest = await getAddress(street, code); // Gets Address users trying to link to user data 82 | 83 | if (!emailExists) { 84 | let newUser = { 85 | name, 86 | surname, 87 | email, 88 | gender, 89 | DOB, 90 | contact_number, 91 | role, 92 | password, 93 | address_id: latest.id 94 | } 95 | await knex('user').insert(newUser) // Inserts users 96 | res.send({ msg: "Address already stored!, User successfully registered!" }) 97 | } else { 98 | // Condition if address and user is already in the database 99 | res.send({ msg: "User email & address already registered!" }) 100 | } 101 | } 102 | 103 | } catch (err) { 104 | console.error(err.message); 105 | res.status(500).send("Server Error"); 106 | } 107 | } 108 | 109 | // Logins in user and return id and access token. Route: [api/users/login] 110 | const login = async (req, res) => { 111 | try { 112 | 113 | const { email, password } = req.body; 114 | 115 | let user = await knex.select().from('user').where('email', email).then((user) => { return user[0] }) // Checks for user in the database 116 | 117 | if (!user) { 118 | // Condition if user not found 119 | return res.status(400).json({ msg: 'Email not found!' }); 120 | } 121 | 122 | const isMatch = await bcrypt.compare(password, user.password); // Hashes password and compares it to password in users data 123 | 124 | if (!isMatch) { 125 | // Condition if password isn't correct 126 | return res.status(400).json({ msg: "Invalid Credentials" }); 127 | } 128 | 129 | const payload = { // This is what will be returned if have correct token (User id get's returned so that we can search user within our database) 130 | user: { 131 | id: user.id, 132 | }, 133 | }; 134 | 135 | jwt.sign( 136 | payload, 137 | config.get("jwtSecret"), // Gets secret in default.json (config handles the functionality to fetch secret) 138 | { 139 | expiresIn: '2d', // 2 days 140 | }, 141 | (err, token) => { 142 | if (err) throw err; 143 | res.json({ id: user.id, token }); // Returna user's ID and access token 144 | } 145 | ); 146 | 147 | } catch (err) { 148 | console.error(err.message); 149 | res.status(500).send("Server Error"); 150 | } 151 | } 152 | 153 | /** WON'T RE EXPLAIN ABOVE FUNCTIONALITY GOING FORWARD **/ 154 | 155 | // Get's all users data that's public for any logged in users. Route: [api/users] 156 | const getUsersAuth = async (req, res) => { 157 | try { 158 | 159 | const user = await knex.select().from('user').where('id', req.user.id).then((user) => { return user[0] }) 160 | 161 | if (user.id === req.user.id) { 162 | await knex.from('user').select('name', 'surname').then((users) => { 163 | res.send(users) 164 | }) 165 | } 166 | } catch (err) { 167 | console.error(err.message); 168 | res.status(500).send("Server Error"); 169 | } 170 | } 171 | 172 | // Gets all user data even private fields (id, address_id, gender, DOB, password hash and role). Route: [api/users/admin] 173 | const getUsersAdmin = async (req, res) => { 174 | try { 175 | const user = await knex.select().from('user').where('id', req.user.id).then((user) => { return user[0] }) 176 | 177 | if (user.role === 'admin') { // Checks if user is an admin (Needs to be admin) 178 | await knex.select().from('user').then((users) => { 179 | res.send(users) 180 | }) 181 | } else { 182 | res.status(401); 183 | return res.send("Access Rejected, Not Authorized"); 184 | } 185 | } catch (err) { 186 | console.error(err.message); 187 | res.status(500).send("Server Error"); 188 | } 189 | } 190 | 191 | // Get's logged in users data. Route: [api/users/whoami] 192 | const getUserData = async (req, res) => { 193 | try { 194 | 195 | const user = await knex 196 | .from('user') 197 | .select('id', 'name', 'surname', 'gender', 'email', 'DOB', 'contact_number', 'address_id', 'role') // Returns only theses fields 198 | .where('id', req.user.id) 199 | .then((user) => { return user[0] }) 200 | 201 | if (user.id === req.user.id) { 202 | res.send(user); 203 | } else { 204 | res.status(401); 205 | return res.send("Access Rejected, Not Authorized"); 206 | } 207 | } catch (err) { 208 | console.error(err.message); 209 | res.status(500).send("Server Error"); 210 | } 211 | } 212 | 213 | // Updates user data by id. Route: [api/users/:id] 214 | const updateUser = async (req, res) => { 215 | try { 216 | 217 | const user = await knex.select().from('user').where('id', req.user.id).then((user) => { return user[0] }) 218 | 219 | if (user.id === req.user.id) { 220 | const { 221 | name, 222 | surname, 223 | password, 224 | contact_number 225 | } = req.body; 226 | 227 | /** Constructing object to insert **/ 228 | const user = {} 229 | if (name) user.name = name; // If name was included in req.body, it will overwrite name to be req.body.name value (Applies to all) 230 | if (surname) user.surname = surname; 231 | if (password) { 232 | const salt = await bcrypt.genSalt(10); 233 | user.password = await bcrypt.hash(password, salt); 234 | }; 235 | if (contact_number) user.contact_number = contact_number; 236 | await knex('user').where('id', req.user.id) 237 | .update(user).then(() => { // Updates user by ID (req.user.id auth user) 238 | knex.select('name', 'surname', 'contact_number', 'gender', 'DOB').from('user').where('id', req.user.id).then((user) => { 239 | res.send(user[0]) 240 | }) 241 | }) 242 | } 243 | } catch (err) { 244 | console.error(err.message); 245 | res.status(500).send("Server Error"); 246 | } 247 | } 248 | 249 | // Deletes user data by id. Route: [api/users/:id] 250 | const removeUser = async (req, res) => { 251 | try { 252 | const user = await knex.select().from('user').where('id', req.user.id).then((user) => { return user[0] }) 253 | 254 | if (user.id === req.user.id) { 255 | knex('user').where('id', req.user.id) 256 | .del().then(function () { // Deletes user by ID (req.user.id auth user) 257 | res.json({ msg: 'User removed!' }); 258 | }) 259 | } else { 260 | res.status(401); 261 | return res.send("Access Rejected, Not Authorized"); 262 | } 263 | } catch (err) { 264 | console.error(err.message); 265 | res.status(500).send("Server Error"); 266 | } 267 | } 268 | 269 | module.exports = { 270 | addUser, 271 | login, 272 | getUsersAuth, 273 | getUser, 274 | getUsersAdmin, 275 | getUserData, 276 | updateUser, 277 | removeUser 278 | } -------------------------------------------------------------------------------- /gcp-infra/terraform.tfstate.backup: -------------------------------------------------------------------------------- 1 | { 2 | "version": 4, 3 | "terraform_version": "1.6.6", 4 | "serial": 17, 5 | "lineage": "e3d5401e-1595-9802-72ea-1d04ecfd2d92", 6 | "outputs": {}, 7 | "resources": [ 8 | { 9 | "mode": "managed", 10 | "type": "google_project_iam_member", 11 | "name": "opentelemetry_cloudtraceagent", 12 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 13 | "instances": [ 14 | { 15 | "schema_version": 0, 16 | "attributes": { 17 | "condition": [], 18 | "etag": "BwYOvHnkPMc=", 19 | "id": "sql-trace/roles/cloudtrace.agent/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 20 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 21 | "project": "sql-trace", 22 | "role": "roles/cloudtrace.agent" 23 | }, 24 | "sensitive_attributes": [], 25 | "private": "bnVsbA==", 26 | "dependencies": [ 27 | "google_service_account.opentelemetry" 28 | ] 29 | } 30 | ] 31 | }, 32 | { 33 | "mode": "managed", 34 | "type": "google_project_iam_member", 35 | "name": "opentelemetry_logginglogwriter", 36 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 37 | "instances": [ 38 | { 39 | "schema_version": 0, 40 | "attributes": { 41 | "condition": [], 42 | "etag": "BwYOvH2bQoo=", 43 | "id": "sql-trace/roles/logging.logWriter/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 44 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 45 | "project": "sql-trace", 46 | "role": "roles/logging.logWriter" 47 | }, 48 | "sensitive_attributes": [], 49 | "private": "bnVsbA==", 50 | "dependencies": [ 51 | "google_service_account.opentelemetry" 52 | ] 53 | } 54 | ] 55 | }, 56 | { 57 | "mode": "managed", 58 | "type": "google_project_iam_member", 59 | "name": "opentelemetry_monitoringmetricwriter", 60 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 61 | "instances": [ 62 | { 63 | "schema_version": 0, 64 | "attributes": { 65 | "condition": [], 66 | "etag": "BwYOvH2bQoo=", 67 | "id": "sql-trace/roles/monitoring.metricWriter/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 68 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 69 | "project": "sql-trace", 70 | "role": "roles/monitoring.metricWriter" 71 | }, 72 | "sensitive_attributes": [], 73 | "private": "bnVsbA==", 74 | "dependencies": [ 75 | "google_service_account.opentelemetry" 76 | ] 77 | } 78 | ] 79 | }, 80 | { 81 | "mode": "managed", 82 | "type": "google_service_account", 83 | "name": "opentelemetry", 84 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 85 | "instances": [ 86 | { 87 | "schema_version": 0, 88 | "attributes": { 89 | "account_id": "opentelemetry", 90 | "description": "", 91 | "disabled": false, 92 | "display_name": "OpenTelemetry Service Account", 93 | "email": "opentelemetry@sql-trace.iam.gserviceaccount.com", 94 | "id": "projects/sql-trace/serviceAccounts/opentelemetry@sql-trace.iam.gserviceaccount.com", 95 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 96 | "name": "projects/sql-trace/serviceAccounts/opentelemetry@sql-trace.iam.gserviceaccount.com", 97 | "project": "sql-trace", 98 | "timeouts": null, 99 | "unique_id": "100548714395297123812" 100 | }, 101 | "sensitive_attributes": [], 102 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9fQ==" 103 | } 104 | ] 105 | }, 106 | { 107 | "mode": "managed", 108 | "type": "google_sql_database", 109 | "name": "knexdb", 110 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 111 | "instances": [ 112 | { 113 | "schema_version": 0, 114 | "attributes": { 115 | "charset": "UTF8", 116 | "collation": "en_US.UTF8", 117 | "deletion_policy": "DELETE", 118 | "id": "projects/sql-trace/instances/trace-test/databases/knexdb", 119 | "instance": "trace-test", 120 | "name": "knexdb", 121 | "project": "sql-trace", 122 | "self_link": "https://sqladmin.googleapis.com/sql/v1beta4/projects/sql-trace/instances/trace-test/databases/knexdb", 123 | "timeouts": null 124 | }, 125 | "sensitive_attributes": [], 126 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19", 127 | "dependencies": [ 128 | "google_sql_database_instance.main" 129 | ] 130 | } 131 | ] 132 | }, 133 | { 134 | "mode": "managed", 135 | "type": "google_sql_database_instance", 136 | "name": "main", 137 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 138 | "instances": [ 139 | { 140 | "schema_version": 0, 141 | "attributes": { 142 | "available_maintenance_versions": [], 143 | "clone": [], 144 | "connection_name": "sql-trace:europe-west6:trace-test", 145 | "database_version": "POSTGRES_15", 146 | "deletion_protection": true, 147 | "encryption_key_name": null, 148 | "first_ip_address": "34.65.214.45", 149 | "id": "trace-test", 150 | "instance_type": "CLOUD_SQL_INSTANCE", 151 | "ip_address": [ 152 | { 153 | "ip_address": "34.65.214.45", 154 | "time_to_retire": "", 155 | "type": "PRIMARY" 156 | }, 157 | { 158 | "ip_address": "34.65.19.118", 159 | "time_to_retire": "", 160 | "type": "OUTGOING" 161 | } 162 | ], 163 | "maintenance_version": "POSTGRES_15_4.R20230830.01_07", 164 | "master_instance_name": "", 165 | "name": "trace-test", 166 | "private_ip_address": "", 167 | "project": "sql-trace", 168 | "public_ip_address": "34.65.214.45", 169 | "region": "europe-west6", 170 | "replica_configuration": [], 171 | "restore_backup_context": [], 172 | "root_password": null, 173 | "self_link": "https://sqladmin.googleapis.com/sql/v1beta4/projects/sql-trace/instances/trace-test", 174 | "server_ca_cert": [ 175 | { 176 | "cert": "-----BEGIN CERTIFICATE-----\nMIIDfzCCAmegAwIBAgIBADANBgkqhkiG9w0BAQsFADB3MS0wKwYDVQQuEyQ0MjI4\nNDBiYi1lMWU0LTQwNjktODI3ZS1lNDBhOGNiNjdmMDIxIzAhBgNVBAMTGkdvb2ds\nZSBDbG91ZCBTUUwgU2VydmVyIENBMRQwEgYDVQQKEwtHb29nbGUsIEluYzELMAkG\nA1UEBhMCVVMwHhcNMjQwMTA5MTkyNzI1WhcNMzQwMTA2MTkyODI1WjB3MS0wKwYD\nVQQuEyQ0MjI4NDBiYi1lMWU0LTQwNjktODI3ZS1lNDBhOGNiNjdmMDIxIzAhBgNV\nBAMTGkdvb2dsZSBDbG91ZCBTUUwgU2VydmVyIENBMRQwEgYDVQQKEwtHb29nbGUs\nIEluYzELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB\nAQC1uwzQ/6BsqBzv+37+p2lplWFFwtWf25/pRTSmLCyItIOeVdd9uUpVWQlOi3zA\nknMHB8Vz2rFQjQQiUWFArm0JvrnawJy2g6nyMgtnNUcSBl9vjzvnCpFQFGsk0nm9\nrYRVM4iX5T4yF5DnQn7QY9jP3GgHUZWSMGfD2KefZ4XZ0RAEVJjAxdTkXf3W4kSs\nL5DVOE8YOv1gWK+X+qZjCHpNY9ot5W9TUi4Edz2spzVguvhYRdmnshbjbANtD2Td\nIAn7ohtYYGWvI16wByJnPH8t9OcWlAPvr2CKT/wVVWI5VZz1Tycntzm2k1wdtjHN\nystftagp5Dyoxyf8fJ5g1Pi7AgMBAAGjFjAUMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nDQYJKoZIhvcNAQELBQADggEBAGCM4TUou01QEMWxXqwLWgwRDl4HHfz7PKqRur1a\ntvCuUTUTHKi8kcmWS92bamzjvqP37MBCJX4MWHgm6ll4UTYgc35B/FAN320cj91U\nqU3jMAKsjLK1bKh7PJYounSa0YhgoZd0f4nLLT/sUKOpuR+sjgM4x2uDMZHRr63+\nfN0l0obn82X6yLUkLlv7x2oh1N5l8AjUYY8/Z/G2C/UEXLBPX2xen/BMRmIGF9yZ\nqNuq4vTHx5qgZiREu3kNkIRy1fSxacuLVqNltldSk2lnQj6/MGhzpGnLbH17bCBM\nWiz15nV3eBlL0onX785Ev69Bz8z6qR2Hc5lt3BTW+PMVVic=\n-----END CERTIFICATE-----", 177 | "common_name": "C=US,O=Google\\, Inc,CN=Google Cloud SQL Server CA,dnQualifier=422840bb-e1e4-4069-827e-e40a8cb67f02", 178 | "create_time": "2024-01-09T19:27:25.759Z", 179 | "expiration_time": "2034-01-06T19:28:25.759Z", 180 | "sha1_fingerprint": "59b659c01c0c64a5e35992c17130e5300ee44c23" 181 | } 182 | ], 183 | "service_account_email_address": "p510127747834-nyx2mu@gcp-sa-cloud-sql.iam.gserviceaccount.com", 184 | "settings": [ 185 | { 186 | "activation_policy": "ALWAYS", 187 | "active_directory_config": [], 188 | "availability_type": "ZONAL", 189 | "backup_configuration": [ 190 | { 191 | "backup_retention_settings": [ 192 | { 193 | "retained_backups": 7, 194 | "retention_unit": "COUNT" 195 | } 196 | ], 197 | "binary_log_enabled": false, 198 | "enabled": false, 199 | "location": "", 200 | "point_in_time_recovery_enabled": false, 201 | "start_time": "04:00", 202 | "transaction_log_retention_days": 7 203 | } 204 | ], 205 | "collation": "", 206 | "connector_enforcement": "NOT_REQUIRED", 207 | "database_flags": [], 208 | "deletion_protection_enabled": false, 209 | "deny_maintenance_period": [], 210 | "disk_autoresize": true, 211 | "disk_autoresize_limit": 0, 212 | "disk_size": 10, 213 | "disk_type": "PD_HDD", 214 | "insights_config": [], 215 | "ip_configuration": [ 216 | { 217 | "allocated_ip_range": "", 218 | "authorized_networks": [ 219 | { 220 | "expiration_time": "", 221 | "name": "", 222 | "value": "213.55.243.210/32" 223 | } 224 | ], 225 | "ipv4_enabled": true, 226 | "private_network": "", 227 | "require_ssl": false 228 | } 229 | ], 230 | "location_preference": [ 231 | { 232 | "follow_gae_application": "", 233 | "secondary_zone": "", 234 | "zone": "europe-west6-c" 235 | } 236 | ], 237 | "maintenance_window": [], 238 | "password_validation_policy": [], 239 | "pricing_plan": "PER_USE", 240 | "sql_server_audit_config": [], 241 | "tier": "db-f1-micro", 242 | "time_zone": "", 243 | "user_labels": {}, 244 | "version": 13 245 | } 246 | ], 247 | "timeouts": null 248 | }, 249 | "sensitive_attributes": [], 250 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoyNDAwMDAwMDAwMDAwLCJkZWxldGUiOjE4MDAwMDAwMDAwMDAsInVwZGF0ZSI6MTgwMDAwMDAwMDAwMH19" 251 | } 252 | ] 253 | }, 254 | { 255 | "mode": "managed", 256 | "type": "google_sql_user", 257 | "name": "knexuser", 258 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 259 | "instances": [ 260 | { 261 | "schema_version": 1, 262 | "attributes": { 263 | "deletion_policy": null, 264 | "host": "", 265 | "id": "knexuser//trace-test", 266 | "instance": "trace-test", 267 | "name": "knexuser", 268 | "password": "knexpw", 269 | "password_policy": [], 270 | "project": "sql-trace", 271 | "sql_server_user_details": [], 272 | "timeouts": null, 273 | "type": "" 274 | }, 275 | "sensitive_attributes": [], 276 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwLCJ1cGRhdGUiOjYwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", 277 | "dependencies": [ 278 | "google_sql_database_instance.main" 279 | ] 280 | } 281 | ] 282 | } 283 | ], 284 | "check_results": null 285 | } 286 | -------------------------------------------------------------------------------- /gcp-infra/terraform.tfstate: -------------------------------------------------------------------------------- 1 | { 2 | "version": 4, 3 | "terraform_version": "1.6.6", 4 | "serial": 19, 5 | "lineage": "e3d5401e-1595-9802-72ea-1d04ecfd2d92", 6 | "outputs": {}, 7 | "resources": [ 8 | { 9 | "mode": "managed", 10 | "type": "google_project_iam_member", 11 | "name": "opentelemetry_cloudtraceagent", 12 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 13 | "instances": [ 14 | { 15 | "schema_version": 0, 16 | "attributes": { 17 | "condition": [], 18 | "etag": "BwYOvH2bQoo=", 19 | "id": "sql-trace/roles/cloudtrace.agent/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 20 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 21 | "project": "sql-trace", 22 | "role": "roles/cloudtrace.agent" 23 | }, 24 | "sensitive_attributes": [], 25 | "private": "bnVsbA==", 26 | "dependencies": [ 27 | "google_service_account.opentelemetry" 28 | ] 29 | } 30 | ] 31 | }, 32 | { 33 | "mode": "managed", 34 | "type": "google_project_iam_member", 35 | "name": "opentelemetry_logginglogwriter", 36 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 37 | "instances": [ 38 | { 39 | "schema_version": 0, 40 | "attributes": { 41 | "condition": [], 42 | "etag": "BwYOvH2bQoo=", 43 | "id": "sql-trace/roles/logging.logWriter/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 44 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 45 | "project": "sql-trace", 46 | "role": "roles/logging.logWriter" 47 | }, 48 | "sensitive_attributes": [], 49 | "private": "bnVsbA==", 50 | "dependencies": [ 51 | "google_service_account.opentelemetry" 52 | ] 53 | } 54 | ] 55 | }, 56 | { 57 | "mode": "managed", 58 | "type": "google_project_iam_member", 59 | "name": "opentelemetry_monitoringmetricwriter", 60 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 61 | "instances": [ 62 | { 63 | "schema_version": 0, 64 | "attributes": { 65 | "condition": [], 66 | "etag": "BwYOvH2bQoo=", 67 | "id": "sql-trace/roles/monitoring.metricWriter/serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 68 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 69 | "project": "sql-trace", 70 | "role": "roles/monitoring.metricWriter" 71 | }, 72 | "sensitive_attributes": [], 73 | "private": "bnVsbA==", 74 | "dependencies": [ 75 | "google_service_account.opentelemetry" 76 | ] 77 | } 78 | ] 79 | }, 80 | { 81 | "mode": "managed", 82 | "type": "google_service_account", 83 | "name": "opentelemetry", 84 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 85 | "instances": [ 86 | { 87 | "schema_version": 0, 88 | "attributes": { 89 | "account_id": "opentelemetry", 90 | "description": "", 91 | "disabled": false, 92 | "display_name": "OpenTelemetry Service Account", 93 | "email": "opentelemetry@sql-trace.iam.gserviceaccount.com", 94 | "id": "projects/sql-trace/serviceAccounts/opentelemetry@sql-trace.iam.gserviceaccount.com", 95 | "member": "serviceAccount:opentelemetry@sql-trace.iam.gserviceaccount.com", 96 | "name": "projects/sql-trace/serviceAccounts/opentelemetry@sql-trace.iam.gserviceaccount.com", 97 | "project": "sql-trace", 98 | "timeouts": null, 99 | "unique_id": "100548714395297123812" 100 | }, 101 | "sensitive_attributes": [], 102 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9fQ==" 103 | } 104 | ] 105 | }, 106 | { 107 | "mode": "managed", 108 | "type": "google_sql_database", 109 | "name": "knexdb", 110 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 111 | "instances": [ 112 | { 113 | "schema_version": 0, 114 | "attributes": { 115 | "charset": "UTF8", 116 | "collation": "en_US.UTF8", 117 | "deletion_policy": "DELETE", 118 | "id": "projects/sql-trace/instances/trace-test/databases/knexdb", 119 | "instance": "trace-test", 120 | "name": "knexdb", 121 | "project": "sql-trace", 122 | "self_link": "https://sqladmin.googleapis.com/sql/v1beta4/projects/sql-trace/instances/trace-test/databases/knexdb", 123 | "timeouts": null 124 | }, 125 | "sensitive_attributes": [], 126 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInVwZGF0ZSI6MTIwMDAwMDAwMDAwMH19", 127 | "dependencies": [ 128 | "google_sql_database_instance.main" 129 | ] 130 | } 131 | ] 132 | }, 133 | { 134 | "mode": "managed", 135 | "type": "google_sql_database_instance", 136 | "name": "main", 137 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 138 | "instances": [ 139 | { 140 | "schema_version": 0, 141 | "attributes": { 142 | "available_maintenance_versions": [], 143 | "clone": [], 144 | "connection_name": "sql-trace:europe-west6:trace-test", 145 | "database_version": "POSTGRES_15", 146 | "deletion_protection": true, 147 | "encryption_key_name": null, 148 | "first_ip_address": "34.65.214.45", 149 | "id": "trace-test", 150 | "instance_type": "CLOUD_SQL_INSTANCE", 151 | "ip_address": [ 152 | { 153 | "ip_address": "34.65.214.45", 154 | "time_to_retire": "", 155 | "type": "PRIMARY" 156 | }, 157 | { 158 | "ip_address": "34.65.19.118", 159 | "time_to_retire": "", 160 | "type": "OUTGOING" 161 | } 162 | ], 163 | "maintenance_version": "POSTGRES_15_4.R20230830.01_07", 164 | "master_instance_name": "", 165 | "name": "trace-test", 166 | "private_ip_address": "", 167 | "project": "sql-trace", 168 | "public_ip_address": "34.65.214.45", 169 | "region": "europe-west6", 170 | "replica_configuration": [], 171 | "restore_backup_context": [], 172 | "root_password": null, 173 | "self_link": "https://sqladmin.googleapis.com/sql/v1beta4/projects/sql-trace/instances/trace-test", 174 | "server_ca_cert": [ 175 | { 176 | "cert": "-----BEGIN CERTIFICATE-----\nMIIDfzCCAmegAwIBAgIBADANBgkqhkiG9w0BAQsFADB3MS0wKwYDVQQuEyQ0MjI4\nNDBiYi1lMWU0LTQwNjktODI3ZS1lNDBhOGNiNjdmMDIxIzAhBgNVBAMTGkdvb2ds\nZSBDbG91ZCBTUUwgU2VydmVyIENBMRQwEgYDVQQKEwtHb29nbGUsIEluYzELMAkG\nA1UEBhMCVVMwHhcNMjQwMTA5MTkyNzI1WhcNMzQwMTA2MTkyODI1WjB3MS0wKwYD\nVQQuEyQ0MjI4NDBiYi1lMWU0LTQwNjktODI3ZS1lNDBhOGNiNjdmMDIxIzAhBgNV\nBAMTGkdvb2dsZSBDbG91ZCBTUUwgU2VydmVyIENBMRQwEgYDVQQKEwtHb29nbGUs\nIEluYzELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB\nAQC1uwzQ/6BsqBzv+37+p2lplWFFwtWf25/pRTSmLCyItIOeVdd9uUpVWQlOi3zA\nknMHB8Vz2rFQjQQiUWFArm0JvrnawJy2g6nyMgtnNUcSBl9vjzvnCpFQFGsk0nm9\nrYRVM4iX5T4yF5DnQn7QY9jP3GgHUZWSMGfD2KefZ4XZ0RAEVJjAxdTkXf3W4kSs\nL5DVOE8YOv1gWK+X+qZjCHpNY9ot5W9TUi4Edz2spzVguvhYRdmnshbjbANtD2Td\nIAn7ohtYYGWvI16wByJnPH8t9OcWlAPvr2CKT/wVVWI5VZz1Tycntzm2k1wdtjHN\nystftagp5Dyoxyf8fJ5g1Pi7AgMBAAGjFjAUMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nDQYJKoZIhvcNAQELBQADggEBAGCM4TUou01QEMWxXqwLWgwRDl4HHfz7PKqRur1a\ntvCuUTUTHKi8kcmWS92bamzjvqP37MBCJX4MWHgm6ll4UTYgc35B/FAN320cj91U\nqU3jMAKsjLK1bKh7PJYounSa0YhgoZd0f4nLLT/sUKOpuR+sjgM4x2uDMZHRr63+\nfN0l0obn82X6yLUkLlv7x2oh1N5l8AjUYY8/Z/G2C/UEXLBPX2xen/BMRmIGF9yZ\nqNuq4vTHx5qgZiREu3kNkIRy1fSxacuLVqNltldSk2lnQj6/MGhzpGnLbH17bCBM\nWiz15nV3eBlL0onX785Ev69Bz8z6qR2Hc5lt3BTW+PMVVic=\n-----END CERTIFICATE-----", 177 | "common_name": "C=US,O=Google\\, Inc,CN=Google Cloud SQL Server CA,dnQualifier=422840bb-e1e4-4069-827e-e40a8cb67f02", 178 | "create_time": "2024-01-09T19:27:25.759Z", 179 | "expiration_time": "2034-01-06T19:28:25.759Z", 180 | "sha1_fingerprint": "59b659c01c0c64a5e35992c17130e5300ee44c23" 181 | } 182 | ], 183 | "service_account_email_address": "p510127747834-nyx2mu@gcp-sa-cloud-sql.iam.gserviceaccount.com", 184 | "settings": [ 185 | { 186 | "activation_policy": "ALWAYS", 187 | "active_directory_config": [], 188 | "availability_type": "ZONAL", 189 | "backup_configuration": [ 190 | { 191 | "backup_retention_settings": [ 192 | { 193 | "retained_backups": 7, 194 | "retention_unit": "COUNT" 195 | } 196 | ], 197 | "binary_log_enabled": false, 198 | "enabled": false, 199 | "location": "", 200 | "point_in_time_recovery_enabled": false, 201 | "start_time": "04:00", 202 | "transaction_log_retention_days": 7 203 | } 204 | ], 205 | "collation": "", 206 | "connector_enforcement": "NOT_REQUIRED", 207 | "database_flags": [], 208 | "deletion_protection_enabled": false, 209 | "deny_maintenance_period": [], 210 | "disk_autoresize": true, 211 | "disk_autoresize_limit": 0, 212 | "disk_size": 10, 213 | "disk_type": "PD_HDD", 214 | "insights_config": [ 215 | { 216 | "query_insights_enabled": true, 217 | "query_plans_per_minute": 0, 218 | "query_string_length": 1024, 219 | "record_application_tags": true, 220 | "record_client_address": false 221 | } 222 | ], 223 | "ip_configuration": [ 224 | { 225 | "allocated_ip_range": "", 226 | "authorized_networks": [ 227 | { 228 | "expiration_time": "", 229 | "name": "", 230 | "value": "213.55.243.210/32" 231 | } 232 | ], 233 | "ipv4_enabled": true, 234 | "private_network": "", 235 | "require_ssl": false 236 | } 237 | ], 238 | "location_preference": [ 239 | { 240 | "follow_gae_application": "", 241 | "secondary_zone": "", 242 | "zone": "europe-west6-c" 243 | } 244 | ], 245 | "maintenance_window": [], 246 | "password_validation_policy": [], 247 | "pricing_plan": "PER_USE", 248 | "sql_server_audit_config": [], 249 | "tier": "db-f1-micro", 250 | "time_zone": "", 251 | "user_labels": {}, 252 | "version": 15 253 | } 254 | ], 255 | "timeouts": null 256 | }, 257 | "sensitive_attributes": [], 258 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoyNDAwMDAwMDAwMDAwLCJkZWxldGUiOjE4MDAwMDAwMDAwMDAsInVwZGF0ZSI6MTgwMDAwMDAwMDAwMH19" 259 | } 260 | ] 261 | }, 262 | { 263 | "mode": "managed", 264 | "type": "google_sql_user", 265 | "name": "knexuser", 266 | "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", 267 | "instances": [ 268 | { 269 | "schema_version": 1, 270 | "attributes": { 271 | "deletion_policy": null, 272 | "host": "", 273 | "id": "knexuser//trace-test", 274 | "instance": "trace-test", 275 | "name": "knexuser", 276 | "password": "knexpw", 277 | "password_policy": [], 278 | "project": "sql-trace", 279 | "sql_server_user_details": [], 280 | "timeouts": null, 281 | "type": "" 282 | }, 283 | "sensitive_attributes": [], 284 | "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwLCJ1cGRhdGUiOjYwMDAwMDAwMDAwMH0sInNjaGVtYV92ZXJzaW9uIjoiMSJ9", 285 | "dependencies": [ 286 | "google_sql_database_instance.main" 287 | ] 288 | } 289 | ] 290 | } 291 | ], 292 | "check_results": null 293 | } 294 | -------------------------------------------------------------------------------- /infra/otelcol-custom/manifest.yaml: -------------------------------------------------------------------------------- 1 | dist: 2 | module: github.com/open-telemetry/opentelemetry-collector-releases/contrib 3 | name: otelcol-custom 4 | description: Custom OpenTelemetry Collector Distribution 5 | version: 0.92.0 6 | output_path: ./_build 7 | otelcol_version: 0.92.0 8 | 9 | extensions: 10 | - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.92.0 11 | - gomod: go.opentelemetry.io/collector/extension/ballastextension v0.92.0 12 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/asapauthextension v0.92.0 13 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/awsproxy v0.92.0 14 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.92.0 15 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.92.0 16 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.92.0 17 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.92.0 18 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/httpforwarder v0.92.0 19 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.92.0 20 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.92.0 21 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.92.0 22 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.92.0 23 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.92.0 24 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.92.0 25 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.92.0 26 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/oidcauthextension v0.92.0 27 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.92.0 28 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.92.0 29 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.92.0 30 | import: github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage 31 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.92.0 32 | import: github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/dbstorage 33 | 34 | exporters: 35 | - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.92.0 36 | - gomod: go.opentelemetry.io/collector/exporter/loggingexporter v0.92.0 37 | - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.92.0 38 | - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.92.0 39 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/alibabacloudlogserviceexporter v0.92.0 40 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awscloudwatchlogsexporter v0.92.0 41 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsemfexporter v0.92.0 42 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter v0.92.0 43 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter v0.92.0 44 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awss3exporter v0.92.0 45 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuredataexplorerexporter v0.92.0 46 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuremonitorexporter v0.92.0 47 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/carbonexporter v0.92.0 48 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/cassandraexporter v0.92.0 49 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickhouseexporter v0.92.0 50 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/coralogixexporter v0.92.0 51 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.92.0 52 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datasetexporter v0.92.0 53 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter v0.92.0 54 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter v0.92.0 55 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/f5cloudexporter v0.92.0 56 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/fileexporter v0.92.0 57 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.92.0 58 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudpubsubexporter v0.92.0 59 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlemanagedprometheusexporter v0.92.0 60 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/honeycombmarkerexporter v0.92.0 61 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/influxdbexporter v0.92.0 62 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/instanaexporter v0.92.0 63 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.92.0 64 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.92.0 65 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logicmonitorexporter v0.92.0 66 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/logzioexporter v0.92.0 67 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/lokiexporter v0.92.0 68 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/mezmoexporter v0.92.0 69 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opencensusexporter v0.92.0 70 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/opensearchexporter v0.92.0 71 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.92.0 72 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.92.0 73 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/pulsarexporter v0.92.0 74 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.92.0 75 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sentryexporter v0.92.0 76 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.92.0 77 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/skywalkingexporter v0.92.0 78 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.92.0 79 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter v0.92.0 80 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/syslogexporter v0.92.0 81 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter v0.92.0 82 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.92.0 83 | 84 | processors: 85 | - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.92.0 86 | - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.92.0 87 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.92.0 88 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.92.0 89 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/datadogprocessor v0.92.0 90 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatorateprocessor v0.92.0 91 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.92.0 92 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.92.0 93 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.92.0 94 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.92.0 95 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.92.0 96 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor v0.92.0 97 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricstransformprocessor v0.92.0 98 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.92.0 99 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/redactionprocessor v0.92.0 100 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/remotetapprocessor v0.92.0 101 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.92.0 102 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.92.0 103 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.92.0 104 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.92.0 105 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.92.0 106 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.92.0 107 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/sumologicprocessor v0.92.0 108 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.92.0 109 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.92.0 110 | 111 | receivers: 112 | - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.92.0 113 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/activedirectorydsreceiver v0.92.0 114 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/aerospikereceiver v0.92.0 115 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver v0.92.0 116 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachesparkreceiver v0.92.0 117 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscloudwatchreceiver v0.92.0 118 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.92.0 119 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver v0.92.0 120 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver v0.92.0 121 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver v0.92.0 122 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azureblobreceiver v0.92.0 123 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azureeventhubreceiver v0.92.0 124 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azuremonitorreceiver v0.92.0 125 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/bigipreceiver v0.92.0 126 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.92.0 127 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/chronyreceiver v0.92.0 128 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudflarereceiver v0.92.0 129 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/cloudfoundryreceiver v0.92.0 130 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/collectdreceiver v0.92.0 131 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/couchdbreceiver v0.92.0 132 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver v0.92.0 133 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver v0.92.0 134 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver v0.92.0 135 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/expvarreceiver v0.92.0 136 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.92.0 137 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filestatsreceiver v0.92.0 138 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filereceiver v0.92.0 139 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/flinkmetricsreceiver v0.92.0 140 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.92.0 141 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudpubsubreceiver v0.92.0 142 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver v0.92.0 143 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/haproxyreceiver v0.92.0 144 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.92.0 145 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/httpcheckreceiver v0.92.0 146 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/influxdbreceiver v0.92.0 147 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/iisreceiver v0.92.0 148 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.92.0 149 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jmxreceiver v0.92.0 150 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/journaldreceiver v0.92.0 151 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver v0.92.0 152 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8seventsreceiver v0.92.0 153 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sobjectsreceiver v0.92.0 154 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver v0.92.0 155 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.92.0 156 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver v0.92.0 157 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/lokireceiver v0.92.0 158 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver v0.92.0 159 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver v0.92.0 160 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver v0.92.0 161 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mysqlreceiver v0.92.0 162 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver v0.92.0 163 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nsxtreceiver v0.92.0 164 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.92.0 165 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/oracledbreceiver v0.92.0 166 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver v0.92.0 167 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/podmanreceiver v0.92.0 168 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver v0.92.0 169 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.92.0 170 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/purefareceiver v0.92.0 171 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/purefbreceiver v0.92.0 172 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/pulsarreceiver v0.92.0 173 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver v0.92.0 174 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.92.0 175 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.92.0 176 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver v0.92.0 177 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.92.0 178 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.92.0 179 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver v0.92.0 180 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/skywalkingreceiver v0.92.0 181 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver v0.92.0 182 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/solacereceiver v0.92.0 183 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.92.0 184 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlqueryreceiver v0.92.0 185 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver v0.92.0 186 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver v0.92.0 187 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver v0.92.0 188 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/syslogreceiver v0.92.0 189 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/tcplogreceiver v0.92.0 190 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/udplogreceiver v0.92.0 191 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.92.0 192 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/wavefrontreceiver v0.92.0 193 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snmpreceiver v0.92.0 194 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/webhookeventreceiver v0.92.0 195 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/windowseventlogreceiver v0.92.0 196 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/windowsperfcountersreceiver v0.92.0 197 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.92.0 198 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zookeeperreceiver v0.92.0 199 | 200 | connectors: 201 | - gomod: go.opentelemetry.io/collector/connector/forwardconnector v0.92.0 202 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector v0.92.0 203 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.92.0 204 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector v0.92.0 205 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.92.0 206 | - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.92.0 207 | 208 | # When adding a replace, add a comment before it to document why it's needed and when it can be removed 209 | replaces: 210 | # See https://github.com/google/gnostic/issues/262 211 | - github.com/googleapis/gnostic v0.5.6 => github.com/googleapis/gnostic v0.5.5 212 | # See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/12322#issuecomment-1185029670 213 | - github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 => github.com/docker/go-connections v0.4.0 214 | # see https://github.com/mattn/go-ieproxy/issues/45 215 | - github.com/mattn/go-ieproxy => github.com/mattn/go-ieproxy v0.0.1 216 | # see https://github.com/openshift/api/pull/1515 217 | - github.com/openshift/api => github.com/openshift/api v0.0.0-20230726162818-81f778f3b3ec -------------------------------------------------------------------------------- /infra/grafana-dashboards/example-dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": { 7 | "type": "grafana", 8 | "uid": "-- Grafana --" 9 | }, 10 | "enable": true, 11 | "hide": true, 12 | "iconColor": "rgba(0, 211, 255, 1)", 13 | "name": "Annotations & Alerts", 14 | "type": "dashboard" 15 | } 16 | ] 17 | }, 18 | "editable": true, 19 | "fiscalYearStartMonth": 0, 20 | "graphTooltip": 0, 21 | "links": [], 22 | "liveNow": false, 23 | "panels": [ 24 | { 25 | "datasource": { 26 | "type": "prometheus", 27 | "uid": "PBFA97CFB590B2093" 28 | }, 29 | "fieldConfig": { 30 | "defaults": { 31 | "color": { 32 | "mode": "palette-classic" 33 | }, 34 | "custom": { 35 | "axisBorderShow": false, 36 | "axisCenteredZero": false, 37 | "axisColorMode": "text", 38 | "axisLabel": "", 39 | "axisPlacement": "auto", 40 | "barAlignment": 0, 41 | "drawStyle": "line", 42 | "fillOpacity": 0, 43 | "gradientMode": "none", 44 | "hideFrom": { 45 | "legend": false, 46 | "tooltip": false, 47 | "viz": false 48 | }, 49 | "insertNulls": false, 50 | "lineInterpolation": "linear", 51 | "lineWidth": 1, 52 | "pointSize": 5, 53 | "scaleDistribution": { 54 | "type": "linear" 55 | }, 56 | "showPoints": "auto", 57 | "spanNulls": false, 58 | "stacking": { 59 | "group": "A", 60 | "mode": "none" 61 | }, 62 | "thresholdsStyle": { 63 | "mode": "off" 64 | } 65 | }, 66 | "mappings": [], 67 | "thresholds": { 68 | "mode": "absolute", 69 | "steps": [ 70 | { 71 | "color": "green", 72 | "value": null 73 | }, 74 | { 75 | "color": "red", 76 | "value": 80 77 | } 78 | ] 79 | }, 80 | "unitScale": true 81 | }, 82 | "overrides": [] 83 | }, 84 | "gridPos": { 85 | "h": 7, 86 | "w": 4, 87 | "x": 0, 88 | "y": 0 89 | }, 90 | "id": 5, 91 | "options": { 92 | "legend": { 93 | "calcs": [], 94 | "displayMode": "list", 95 | "placement": "bottom", 96 | "showLegend": true 97 | }, 98 | "tooltip": { 99 | "mode": "single", 100 | "sort": "none" 101 | } 102 | }, 103 | "targets": [ 104 | { 105 | "datasource": { 106 | "type": "prometheus", 107 | "uid": "PBFA97CFB590B2093" 108 | }, 109 | "disableTextWrap": false, 110 | "editorMode": "builder", 111 | "expr": "pg_stat_statements_calls{queryid=\"-284185876264204213\"}", 112 | "fullMetaSearch": false, 113 | "includeNullMetadata": true, 114 | "legendFormat": "Calls", 115 | "range": true, 116 | "refId": "A", 117 | "useBackend": false 118 | }, 119 | { 120 | "datasource": { 121 | "type": "prometheus", 122 | "uid": "PBFA97CFB590B2093" 123 | }, 124 | "disableTextWrap": false, 125 | "editorMode": "builder", 126 | "expr": "increase(pg_stat_statements_calls{queryid=\"-284185876264204213\"}[$__interval])", 127 | "fullMetaSearch": false, 128 | "hide": false, 129 | "includeNullMetadata": true, 130 | "instant": false, 131 | "legendFormat": "Increase", 132 | "range": true, 133 | "refId": "B", 134 | "useBackend": false 135 | } 136 | ], 137 | "title": "Cart Query", 138 | "type": "timeseries" 139 | }, 140 | { 141 | "datasource": { 142 | "type": "prometheus", 143 | "uid": "PBFA97CFB590B2093" 144 | }, 145 | "fieldConfig": { 146 | "defaults": { 147 | "color": { 148 | "mode": "palette-classic" 149 | }, 150 | "custom": { 151 | "axisBorderShow": false, 152 | "axisCenteredZero": false, 153 | "axisColorMode": "text", 154 | "axisLabel": "", 155 | "axisPlacement": "auto", 156 | "barAlignment": 0, 157 | "drawStyle": "line", 158 | "fillOpacity": 0, 159 | "gradientMode": "none", 160 | "hideFrom": { 161 | "legend": false, 162 | "tooltip": false, 163 | "viz": false 164 | }, 165 | "insertNulls": false, 166 | "lineInterpolation": "linear", 167 | "lineWidth": 1, 168 | "pointSize": 5, 169 | "scaleDistribution": { 170 | "type": "linear" 171 | }, 172 | "showPoints": "auto", 173 | "spanNulls": false, 174 | "stacking": { 175 | "group": "A", 176 | "mode": "none" 177 | }, 178 | "thresholdsStyle": { 179 | "mode": "off" 180 | } 181 | }, 182 | "mappings": [], 183 | "thresholds": { 184 | "mode": "absolute", 185 | "steps": [ 186 | { 187 | "color": "green", 188 | "value": null 189 | }, 190 | { 191 | "color": "red", 192 | "value": 80 193 | } 194 | ] 195 | }, 196 | "unit": "ms", 197 | "unitScale": true 198 | }, 199 | "overrides": [] 200 | }, 201 | "gridPos": { 202 | "h": 7, 203 | "w": 4, 204 | "x": 4, 205 | "y": 0 206 | }, 207 | "id": 8, 208 | "options": { 209 | "legend": { 210 | "calcs": [], 211 | "displayMode": "list", 212 | "placement": "bottom", 213 | "showLegend": true 214 | }, 215 | "tooltip": { 216 | "mode": "single", 217 | "sort": "none" 218 | } 219 | }, 220 | "targets": [ 221 | { 222 | "datasource": { 223 | "type": "prometheus", 224 | "uid": "PBFA97CFB590B2093" 225 | }, 226 | "disableTextWrap": false, 227 | "editorMode": "builder", 228 | "expr": "pg_stat_statements_total_exec_time{queryid=\"-284185876264204213\"}", 229 | "fullMetaSearch": false, 230 | "includeNullMetadata": true, 231 | "legendFormat": "Total Exec Time", 232 | "range": true, 233 | "refId": "A", 234 | "useBackend": false 235 | } 236 | ], 237 | "title": "Cart Query", 238 | "type": "timeseries" 239 | }, 240 | { 241 | "datasource": { 242 | "type": "prometheus", 243 | "uid": "PBFA97CFB590B2093" 244 | }, 245 | "fieldConfig": { 246 | "defaults": { 247 | "color": { 248 | "mode": "palette-classic" 249 | }, 250 | "custom": { 251 | "axisBorderShow": false, 252 | "axisCenteredZero": false, 253 | "axisColorMode": "text", 254 | "axisLabel": "", 255 | "axisPlacement": "auto", 256 | "barAlignment": 0, 257 | "drawStyle": "line", 258 | "fillOpacity": 0, 259 | "gradientMode": "none", 260 | "hideFrom": { 261 | "legend": false, 262 | "tooltip": false, 263 | "viz": false 264 | }, 265 | "insertNulls": false, 266 | "lineInterpolation": "linear", 267 | "lineWidth": 1, 268 | "pointSize": 5, 269 | "scaleDistribution": { 270 | "type": "linear" 271 | }, 272 | "showPoints": "auto", 273 | "spanNulls": false, 274 | "stacking": { 275 | "group": "A", 276 | "mode": "none" 277 | }, 278 | "thresholdsStyle": { 279 | "mode": "off" 280 | } 281 | }, 282 | "mappings": [], 283 | "thresholds": { 284 | "mode": "absolute", 285 | "steps": [ 286 | { 287 | "color": "green", 288 | "value": null 289 | }, 290 | { 291 | "color": "red", 292 | "value": 80 293 | } 294 | ] 295 | }, 296 | "unit": "ms", 297 | "unitScale": true 298 | }, 299 | "overrides": [] 300 | }, 301 | "gridPos": { 302 | "h": 7, 303 | "w": 4, 304 | "x": 8, 305 | "y": 0 306 | }, 307 | "id": 7, 308 | "options": { 309 | "legend": { 310 | "calcs": [], 311 | "displayMode": "list", 312 | "placement": "bottom", 313 | "showLegend": true 314 | }, 315 | "tooltip": { 316 | "mode": "single", 317 | "sort": "none" 318 | } 319 | }, 320 | "targets": [ 321 | { 322 | "datasource": { 323 | "type": "prometheus", 324 | "uid": "PBFA97CFB590B2093" 325 | }, 326 | "disableTextWrap": false, 327 | "editorMode": "builder", 328 | "expr": "pg_stat_statements_mean_exec_time{queryid=\"-284185876264204213\"}", 329 | "fullMetaSearch": false, 330 | "includeNullMetadata": true, 331 | "legendFormat": "Mean Exec Time", 332 | "range": true, 333 | "refId": "A", 334 | "useBackend": false 335 | } 336 | ], 337 | "title": "Cart Query", 338 | "type": "timeseries" 339 | }, 340 | { 341 | "datasource": { 342 | "type": "prometheus", 343 | "uid": "PBFA97CFB590B2093" 344 | }, 345 | "fieldConfig": { 346 | "defaults": { 347 | "color": { 348 | "mode": "palette-classic" 349 | }, 350 | "custom": { 351 | "axisBorderShow": false, 352 | "axisCenteredZero": false, 353 | "axisColorMode": "text", 354 | "axisLabel": "", 355 | "axisPlacement": "auto", 356 | "barAlignment": 0, 357 | "drawStyle": "line", 358 | "fillOpacity": 0, 359 | "gradientMode": "none", 360 | "hideFrom": { 361 | "legend": false, 362 | "tooltip": false, 363 | "viz": false 364 | }, 365 | "insertNulls": false, 366 | "lineInterpolation": "linear", 367 | "lineWidth": 1, 368 | "pointSize": 5, 369 | "scaleDistribution": { 370 | "type": "linear" 371 | }, 372 | "showPoints": "auto", 373 | "spanNulls": false, 374 | "stacking": { 375 | "group": "A", 376 | "mode": "none" 377 | }, 378 | "thresholdsStyle": { 379 | "mode": "off" 380 | } 381 | }, 382 | "mappings": [], 383 | "thresholds": { 384 | "mode": "absolute", 385 | "steps": [ 386 | { 387 | "color": "green", 388 | "value": null 389 | }, 390 | { 391 | "color": "red", 392 | "value": 80 393 | } 394 | ] 395 | }, 396 | "unit": "ms", 397 | "unitScale": true 398 | }, 399 | "overrides": [] 400 | }, 401 | "gridPos": { 402 | "h": 7, 403 | "w": 4, 404 | "x": 12, 405 | "y": 0 406 | }, 407 | "id": 6, 408 | "options": { 409 | "legend": { 410 | "calcs": [], 411 | "displayMode": "list", 412 | "placement": "bottom", 413 | "showLegend": true 414 | }, 415 | "tooltip": { 416 | "mode": "single", 417 | "sort": "none" 418 | } 419 | }, 420 | "targets": [ 421 | { 422 | "datasource": { 423 | "type": "prometheus", 424 | "uid": "PBFA97CFB590B2093" 425 | }, 426 | "disableTextWrap": false, 427 | "editorMode": "builder", 428 | "expr": "pg_stat_statements_stddev_exec_time{queryid=\"-284185876264204213\"}", 429 | "fullMetaSearch": false, 430 | "includeNullMetadata": true, 431 | "legendFormat": "Stddev Exec Time", 432 | "range": true, 433 | "refId": "A", 434 | "useBackend": false 435 | } 436 | ], 437 | "title": "Cart Query", 438 | "type": "timeseries" 439 | }, 440 | { 441 | "datasource": { 442 | "type": "prometheus", 443 | "uid": "PBFA97CFB590B2093" 444 | }, 445 | "fieldConfig": { 446 | "defaults": { 447 | "color": { 448 | "mode": "palette-classic" 449 | }, 450 | "custom": { 451 | "axisBorderShow": false, 452 | "axisCenteredZero": false, 453 | "axisColorMode": "text", 454 | "axisLabel": "", 455 | "axisPlacement": "auto", 456 | "barAlignment": 0, 457 | "drawStyle": "line", 458 | "fillOpacity": 0, 459 | "gradientMode": "none", 460 | "hideFrom": { 461 | "legend": false, 462 | "tooltip": false, 463 | "viz": false 464 | }, 465 | "insertNulls": false, 466 | "lineInterpolation": "linear", 467 | "lineWidth": 1, 468 | "pointSize": 5, 469 | "scaleDistribution": { 470 | "type": "linear" 471 | }, 472 | "showPoints": "auto", 473 | "spanNulls": false, 474 | "stacking": { 475 | "group": "A", 476 | "mode": "none" 477 | }, 478 | "thresholdsStyle": { 479 | "mode": "off" 480 | } 481 | }, 482 | "mappings": [], 483 | "thresholds": { 484 | "mode": "absolute", 485 | "steps": [ 486 | { 487 | "color": "green", 488 | "value": null 489 | }, 490 | { 491 | "color": "red", 492 | "value": 80 493 | } 494 | ] 495 | }, 496 | "unit": "ms", 497 | "unitScale": true 498 | }, 499 | "overrides": [] 500 | }, 501 | "gridPos": { 502 | "h": 7, 503 | "w": 4, 504 | "x": 16, 505 | "y": 0 506 | }, 507 | "id": 9, 508 | "options": { 509 | "legend": { 510 | "calcs": [], 511 | "displayMode": "list", 512 | "placement": "bottom", 513 | "showLegend": true 514 | }, 515 | "tooltip": { 516 | "mode": "single", 517 | "sort": "none" 518 | } 519 | }, 520 | "targets": [ 521 | { 522 | "datasource": { 523 | "type": "prometheus", 524 | "uid": "PBFA97CFB590B2093" 525 | }, 526 | "disableTextWrap": false, 527 | "editorMode": "builder", 528 | "expr": "pg_stat_statements_min_exec_time{queryid=\"-284185876264204213\"}", 529 | "fullMetaSearch": false, 530 | "includeNullMetadata": true, 531 | "legendFormat": "Min Exec Time", 532 | "range": true, 533 | "refId": "A", 534 | "useBackend": false 535 | } 536 | ], 537 | "title": "Cart Query", 538 | "type": "timeseries" 539 | }, 540 | { 541 | "datasource": { 542 | "type": "prometheus", 543 | "uid": "PBFA97CFB590B2093" 544 | }, 545 | "fieldConfig": { 546 | "defaults": { 547 | "color": { 548 | "mode": "palette-classic" 549 | }, 550 | "custom": { 551 | "axisBorderShow": false, 552 | "axisCenteredZero": false, 553 | "axisColorMode": "text", 554 | "axisLabel": "", 555 | "axisPlacement": "auto", 556 | "barAlignment": 0, 557 | "drawStyle": "line", 558 | "fillOpacity": 0, 559 | "gradientMode": "none", 560 | "hideFrom": { 561 | "legend": false, 562 | "tooltip": false, 563 | "viz": false 564 | }, 565 | "insertNulls": false, 566 | "lineInterpolation": "linear", 567 | "lineWidth": 1, 568 | "pointSize": 5, 569 | "scaleDistribution": { 570 | "type": "linear" 571 | }, 572 | "showPoints": "auto", 573 | "spanNulls": false, 574 | "stacking": { 575 | "group": "A", 576 | "mode": "none" 577 | }, 578 | "thresholdsStyle": { 579 | "mode": "off" 580 | } 581 | }, 582 | "mappings": [], 583 | "thresholds": { 584 | "mode": "absolute", 585 | "steps": [ 586 | { 587 | "color": "green", 588 | "value": null 589 | }, 590 | { 591 | "color": "red", 592 | "value": 80 593 | } 594 | ] 595 | }, 596 | "unit": "ms", 597 | "unitScale": true 598 | }, 599 | "overrides": [] 600 | }, 601 | "gridPos": { 602 | "h": 7, 603 | "w": 4, 604 | "x": 20, 605 | "y": 0 606 | }, 607 | "id": 10, 608 | "options": { 609 | "legend": { 610 | "calcs": [], 611 | "displayMode": "list", 612 | "placement": "bottom", 613 | "showLegend": true 614 | }, 615 | "tooltip": { 616 | "mode": "single", 617 | "sort": "none" 618 | } 619 | }, 620 | "targets": [ 621 | { 622 | "datasource": { 623 | "type": "prometheus", 624 | "uid": "PBFA97CFB590B2093" 625 | }, 626 | "disableTextWrap": false, 627 | "editorMode": "builder", 628 | "expr": "pg_stat_statements_max_exec_time{queryid=\"-284185876264204213\"}", 629 | "fullMetaSearch": false, 630 | "includeNullMetadata": true, 631 | "legendFormat": "Max Exec Time", 632 | "range": true, 633 | "refId": "A", 634 | "useBackend": false 635 | } 636 | ], 637 | "title": "Cart Query", 638 | "type": "timeseries" 639 | }, 640 | { 641 | "datasource": { 642 | "type": "loki", 643 | "uid": "DzuPFa9P2" 644 | }, 645 | "gridPos": { 646 | "h": 7, 647 | "w": 24, 648 | "x": 0, 649 | "y": 7 650 | }, 651 | "id": 4, 652 | "options": { 653 | "dedupStrategy": "none", 654 | "enableLogDetails": true, 655 | "prettifyLogMessage": false, 656 | "showCommonLabels": false, 657 | "showLabels": false, 658 | "showTime": false, 659 | "sortOrder": "Descending", 660 | "wrapLogMessage": false 661 | }, 662 | "targets": [ 663 | { 664 | "datasource": { 665 | "type": "loki", 666 | "uid": "DzuPFa9P2" 667 | }, 668 | "editorMode": "code", 669 | "expr": "{exporter=\"OTLP\"}", 670 | "queryType": "range", 671 | "refId": "A" 672 | } 673 | ], 674 | "title": "Logs", 675 | "type": "logs" 676 | }, 677 | { 678 | "datasource": { 679 | "type": "jaeger", 680 | "uid": "EbPG8fYoz" 681 | }, 682 | "fieldConfig": { 683 | "defaults": { 684 | "color": { 685 | "mode": "thresholds" 686 | }, 687 | "custom": { 688 | "align": "auto", 689 | "cellOptions": { 690 | "type": "auto" 691 | }, 692 | "inspect": false 693 | }, 694 | "mappings": [], 695 | "thresholds": { 696 | "mode": "absolute", 697 | "steps": [ 698 | { 699 | "color": "green", 700 | "value": null 701 | }, 702 | { 703 | "color": "red", 704 | "value": 80 705 | } 706 | ] 707 | }, 708 | "unitScale": true 709 | }, 710 | "overrides": [] 711 | }, 712 | "gridPos": { 713 | "h": 8, 714 | "w": 24, 715 | "x": 0, 716 | "y": 14 717 | }, 718 | "id": 3, 719 | "options": { 720 | "cellHeight": "sm", 721 | "footer": { 722 | "countRows": false, 723 | "fields": "", 724 | "reducer": [ 725 | "sum" 726 | ], 727 | "show": false 728 | }, 729 | "showHeader": true 730 | }, 731 | "pluginVersion": "10.3.1", 732 | "targets": [ 733 | { 734 | "datasource": { 735 | "type": "jaeger", 736 | "uid": "EbPG8fYoz" 737 | }, 738 | "queryType": "search", 739 | "refId": "A", 740 | "service": "node-example-app", 741 | "tags": "" 742 | } 743 | ], 744 | "title": "Panel Title", 745 | "type": "table" 746 | }, 747 | { 748 | "datasource": { 749 | "type": "jaeger", 750 | "uid": "EbPG8fYoz" 751 | }, 752 | "gridPos": { 753 | "h": 15, 754 | "w": 24, 755 | "x": 0, 756 | "y": 22 757 | }, 758 | "id": 2, 759 | "targets": [ 760 | { 761 | "datasource": { 762 | "type": "jaeger", 763 | "uid": "EbPG8fYoz" 764 | }, 765 | "query": "${traceId}", 766 | "refId": "A", 767 | "service": "node-example-app" 768 | } 769 | ], 770 | "title": "Traces", 771 | "type": "traces" 772 | } 773 | ], 774 | "refresh": "", 775 | "schemaVersion": 39, 776 | "tags": [], 777 | "templating": { 778 | "list": [ 779 | { 780 | "current": { 781 | "selected": false, 782 | "text": "e9c98f375e8924031388234984213f92", 783 | "value": "e9c98f375e8924031388234984213f92" 784 | }, 785 | "hide": 0, 786 | "includeAll": false, 787 | "label": "Trace ID", 788 | "multi": false, 789 | "name": "traceId", 790 | "options": [], 791 | "query": "", 792 | "queryValue": "", 793 | "skipUrlSync": false, 794 | "type": "custom" 795 | } 796 | ] 797 | }, 798 | "time": { 799 | "from": "now-30m", 800 | "to": "now" 801 | }, 802 | "timepicker": {}, 803 | "timezone": "", 804 | "title": "Example dashboard", 805 | "uid": "e54998fc-0fd7-4cc4-a577-43dff51a7733", 806 | "version": 1, 807 | "weekStart": "" 808 | } -------------------------------------------------------------------------------- /infra/postgresql.conf.sample: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # PostgreSQL configuration file 3 | # ----------------------------- 4 | # 5 | # This file consists of lines of the form: 6 | # 7 | # name = value 8 | # 9 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 10 | # "#" anywhere on a line. The complete list of parameter names and allowed 11 | # values can be found in the PostgreSQL documentation. 12 | # 13 | # The commented-out settings shown in this file represent the default values. 14 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 15 | # you need to reload the server. 16 | # 17 | # This file is read on server startup and when the server receives a SIGHUP 18 | # signal. If you edit the file on a running system, you have to SIGHUP the 19 | # server for the changes to take effect, run "pg_ctl reload", or execute 20 | # "SELECT pg_reload_conf()". Some parameters, which are marked below, 21 | # require a server shutdown and restart to take effect. 22 | # 23 | # Any parameter can also be given as a command-line option to the server, e.g., 24 | # "postgres -c log_connections=on". Some parameters can be changed at run time 25 | # with the "SET" SQL command. 26 | # 27 | # Memory units: B = bytes Time units: us = microseconds 28 | # kB = kilobytes ms = milliseconds 29 | # MB = megabytes s = seconds 30 | # GB = gigabytes min = minutes 31 | # TB = terabytes h = hours 32 | # d = days 33 | 34 | 35 | #------------------------------------------------------------------------------ 36 | # FILE LOCATIONS 37 | #------------------------------------------------------------------------------ 38 | 39 | # The default values of these variables are driven from the -D command-line 40 | # option or PGDATA environment variable, represented here as ConfigDir. 41 | 42 | #data_directory = 'ConfigDir' # use data in another directory 43 | # (change requires restart) 44 | #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file 45 | # (change requires restart) 46 | #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file 47 | # (change requires restart) 48 | 49 | # If external_pid_file is not explicitly set, no extra PID file is written. 50 | #external_pid_file = '' # write an extra PID file 51 | # (change requires restart) 52 | 53 | 54 | #------------------------------------------------------------------------------ 55 | # CONNECTIONS AND AUTHENTICATION 56 | #------------------------------------------------------------------------------ 57 | 58 | # - Connection Settings - 59 | 60 | listen_addresses = '*' 61 | # comma-separated list of addresses; 62 | # defaults to 'localhost'; use '*' for all 63 | # (change requires restart) 64 | #port = 5432 # (change requires restart) 65 | #max_connections = 100 # (change requires restart) 66 | #reserved_connections = 0 # (change requires restart) 67 | #superuser_reserved_connections = 3 # (change requires restart) 68 | #unix_socket_directories = '/tmp' # comma-separated list of directories 69 | # (change requires restart) 70 | #unix_socket_group = '' # (change requires restart) 71 | #unix_socket_permissions = 0777 # begin with 0 to use octal notation 72 | # (change requires restart) 73 | #bonjour = off # advertise server via Bonjour 74 | # (change requires restart) 75 | #bonjour_name = '' # defaults to the computer name 76 | # (change requires restart) 77 | 78 | # - TCP settings - 79 | # see "man tcp" for details 80 | 81 | #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; 82 | # 0 selects the system default 83 | #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; 84 | # 0 selects the system default 85 | #tcp_keepalives_count = 0 # TCP_KEEPCNT; 86 | # 0 selects the system default 87 | #tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; 88 | # 0 selects the system default 89 | 90 | #client_connection_check_interval = 0 # time between checks for client 91 | # disconnection while running queries; 92 | # 0 for never 93 | 94 | # - Authentication - 95 | 96 | #authentication_timeout = 1min # 1s-600s 97 | #password_encryption = scram-sha-256 # scram-sha-256 or md5 98 | #scram_iterations = 4096 99 | #db_user_namespace = off 100 | 101 | # GSSAPI using Kerberos 102 | #krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' 103 | #krb_caseins_users = off 104 | #gss_accept_delegation = off 105 | 106 | # - SSL - 107 | 108 | #ssl = off 109 | #ssl_ca_file = '' 110 | #ssl_cert_file = 'server.crt' 111 | #ssl_crl_file = '' 112 | #ssl_crl_dir = '' 113 | #ssl_key_file = 'server.key' 114 | #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers 115 | #ssl_prefer_server_ciphers = on 116 | #ssl_ecdh_curve = 'prime256v1' 117 | #ssl_min_protocol_version = 'TLSv1.2' 118 | #ssl_max_protocol_version = '' 119 | #ssl_dh_params_file = '' 120 | #ssl_passphrase_command = '' 121 | #ssl_passphrase_command_supports_reload = off 122 | 123 | 124 | #------------------------------------------------------------------------------ 125 | # RESOURCE USAGE (except WAL) 126 | #------------------------------------------------------------------------------ 127 | 128 | # - Memory - 129 | 130 | #shared_buffers = 128MB # min 128kB 131 | # (change requires restart) 132 | #huge_pages = try # on, off, or try 133 | # (change requires restart) 134 | #huge_page_size = 0 # zero for system default 135 | # (change requires restart) 136 | #temp_buffers = 8MB # min 800kB 137 | #max_prepared_transactions = 0 # zero disables the feature 138 | # (change requires restart) 139 | # Caution: it is not advisable to set max_prepared_transactions nonzero unless 140 | # you actively intend to use prepared transactions. 141 | #work_mem = 4MB # min 64kB 142 | #hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem 143 | #maintenance_work_mem = 64MB # min 1MB 144 | #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem 145 | #logical_decoding_work_mem = 64MB # min 64kB 146 | #max_stack_depth = 2MB # min 100kB 147 | #shared_memory_type = mmap # the default is the first option 148 | # supported by the operating system: 149 | # mmap 150 | # sysv 151 | # windows 152 | # (change requires restart) 153 | #dynamic_shared_memory_type = posix # the default is usually the first option 154 | # supported by the operating system: 155 | # posix 156 | # sysv 157 | # windows 158 | # mmap 159 | # (change requires restart) 160 | #min_dynamic_shared_memory = 0MB # (change requires restart) 161 | #vacuum_buffer_usage_limit = 256kB # size of vacuum and analyze buffer access strategy ring; 162 | # 0 to disable vacuum buffer access strategy; 163 | # range 128kB to 16GB 164 | 165 | # - Disk - 166 | 167 | #temp_file_limit = -1 # limits per-process temp file space 168 | # in kilobytes, or -1 for no limit 169 | 170 | # - Kernel Resources - 171 | 172 | #max_files_per_process = 1000 # min 64 173 | # (change requires restart) 174 | 175 | # - Cost-Based Vacuum Delay - 176 | 177 | #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) 178 | #vacuum_cost_page_hit = 1 # 0-10000 credits 179 | #vacuum_cost_page_miss = 2 # 0-10000 credits 180 | #vacuum_cost_page_dirty = 20 # 0-10000 credits 181 | #vacuum_cost_limit = 200 # 1-10000 credits 182 | 183 | # - Background Writer - 184 | 185 | #bgwriter_delay = 200ms # 10-10000ms between rounds 186 | #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables 187 | #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round 188 | #bgwriter_flush_after = 0 # measured in pages, 0 disables 189 | 190 | # - Asynchronous Behavior - 191 | 192 | #backend_flush_after = 0 # measured in pages, 0 disables 193 | #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching 194 | #maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching 195 | #max_worker_processes = 8 # (change requires restart) 196 | #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers 197 | #max_parallel_maintenance_workers = 2 # taken from max_parallel_workers 198 | #max_parallel_workers = 8 # maximum number of max_worker_processes that 199 | # can be used in parallel operations 200 | #parallel_leader_participation = on 201 | #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate 202 | # (change requires restart) 203 | 204 | 205 | #------------------------------------------------------------------------------ 206 | # WRITE-AHEAD LOG 207 | #------------------------------------------------------------------------------ 208 | 209 | # - Settings - 210 | 211 | #wal_level = replica # minimal, replica, or logical 212 | # (change requires restart) 213 | #fsync = on # flush data to disk for crash safety 214 | # (turning this off can cause 215 | # unrecoverable data corruption) 216 | #synchronous_commit = on # synchronization level; 217 | # off, local, remote_write, remote_apply, or on 218 | #wal_sync_method = fsync # the default is the first option 219 | # supported by the operating system: 220 | # open_datasync 221 | # fdatasync (default on Linux and FreeBSD) 222 | # fsync 223 | # fsync_writethrough 224 | # open_sync 225 | #full_page_writes = on # recover from partial page writes 226 | #wal_log_hints = off # also do full page writes of non-critical updates 227 | # (change requires restart) 228 | #wal_compression = off # enables compression of full-page writes; 229 | # off, pglz, lz4, zstd, or on 230 | #wal_init_zero = on # zero-fill new WAL files 231 | #wal_recycle = on # recycle WAL files 232 | #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers 233 | # (change requires restart) 234 | #wal_writer_delay = 200ms # 1-10000 milliseconds 235 | #wal_writer_flush_after = 1MB # measured in pages, 0 disables 236 | #wal_skip_threshold = 2MB 237 | 238 | #commit_delay = 0 # range 0-100000, in microseconds 239 | #commit_siblings = 5 # range 1-1000 240 | 241 | # - Checkpoints - 242 | 243 | #checkpoint_timeout = 5min # range 30s-1d 244 | #checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 245 | #checkpoint_flush_after = 0 # measured in pages, 0 disables 246 | #checkpoint_warning = 30s # 0 disables 247 | #max_wal_size = 1GB 248 | #min_wal_size = 80MB 249 | 250 | # - Prefetching during recovery - 251 | 252 | #recovery_prefetch = try # prefetch pages referenced in the WAL? 253 | #wal_decode_buffer_size = 512kB # lookahead window used for prefetching 254 | # (change requires restart) 255 | 256 | # - Archiving - 257 | 258 | #archive_mode = off # enables archiving; off, on, or always 259 | # (change requires restart) 260 | #archive_library = '' # library to use to archive a WAL file 261 | # (empty string indicates archive_command should 262 | # be used) 263 | #archive_command = '' # command to use to archive a WAL file 264 | # placeholders: %p = path of file to archive 265 | # %f = file name only 266 | # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' 267 | #archive_timeout = 0 # force a WAL file switch after this 268 | # number of seconds; 0 disables 269 | 270 | # - Archive Recovery - 271 | 272 | # These are only used in recovery mode. 273 | 274 | #restore_command = '' # command to use to restore an archived WAL file 275 | # placeholders: %p = path of file to restore 276 | # %f = file name only 277 | # e.g. 'cp /mnt/server/archivedir/%f %p' 278 | #archive_cleanup_command = '' # command to execute at every restartpoint 279 | #recovery_end_command = '' # command to execute at completion of recovery 280 | 281 | # - Recovery Target - 282 | 283 | # Set these only when performing a targeted recovery. 284 | 285 | #recovery_target = '' # 'immediate' to end recovery as soon as a 286 | # consistent state is reached 287 | # (change requires restart) 288 | #recovery_target_name = '' # the named restore point to which recovery will proceed 289 | # (change requires restart) 290 | #recovery_target_time = '' # the time stamp up to which recovery will proceed 291 | # (change requires restart) 292 | #recovery_target_xid = '' # the transaction ID up to which recovery will proceed 293 | # (change requires restart) 294 | #recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed 295 | # (change requires restart) 296 | #recovery_target_inclusive = on # Specifies whether to stop: 297 | # just after the specified recovery target (on) 298 | # just before the recovery target (off) 299 | # (change requires restart) 300 | #recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID 301 | # (change requires restart) 302 | #recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' 303 | # (change requires restart) 304 | 305 | 306 | #------------------------------------------------------------------------------ 307 | # REPLICATION 308 | #------------------------------------------------------------------------------ 309 | 310 | # - Sending Servers - 311 | 312 | # Set these on the primary and on any standby that will send replication data. 313 | 314 | #max_wal_senders = 10 # max number of walsender processes 315 | # (change requires restart) 316 | #max_replication_slots = 10 # max number of replication slots 317 | # (change requires restart) 318 | #wal_keep_size = 0 # in megabytes; 0 disables 319 | #max_slot_wal_keep_size = -1 # in megabytes; -1 disables 320 | #wal_sender_timeout = 60s # in milliseconds; 0 disables 321 | #track_commit_timestamp = off # collect timestamp of transaction commit 322 | # (change requires restart) 323 | 324 | # - Primary Server - 325 | 326 | # These settings are ignored on a standby server. 327 | 328 | #synchronous_standby_names = '' # standby servers that provide sync rep 329 | # method to choose sync standbys, number of sync standbys, 330 | # and comma-separated list of application_name 331 | # from standby(s); '*' = all 332 | 333 | # - Standby Servers - 334 | 335 | # These settings are ignored on a primary server. 336 | 337 | #primary_conninfo = '' # connection string to sending server 338 | #primary_slot_name = '' # replication slot on sending server 339 | #hot_standby = on # "off" disallows queries during recovery 340 | # (change requires restart) 341 | #max_standby_archive_delay = 30s # max delay before canceling queries 342 | # when reading WAL from archive; 343 | # -1 allows indefinite delay 344 | #max_standby_streaming_delay = 30s # max delay before canceling queries 345 | # when reading streaming WAL; 346 | # -1 allows indefinite delay 347 | #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name 348 | # is not set 349 | #wal_receiver_status_interval = 10s # send replies at least this often 350 | # 0 disables 351 | #hot_standby_feedback = off # send info from standby to prevent 352 | # query conflicts 353 | #wal_receiver_timeout = 60s # time that receiver waits for 354 | # communication from primary 355 | # in milliseconds; 0 disables 356 | #wal_retrieve_retry_interval = 5s # time to wait before retrying to 357 | # retrieve WAL after a failed attempt 358 | #recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery 359 | 360 | # - Subscribers - 361 | 362 | # These settings are ignored on a publisher. 363 | 364 | #max_logical_replication_workers = 4 # taken from max_worker_processes 365 | # (change requires restart) 366 | #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers 367 | #max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers 368 | 369 | 370 | #------------------------------------------------------------------------------ 371 | # QUERY TUNING 372 | #------------------------------------------------------------------------------ 373 | 374 | # - Planner Method Configuration - 375 | 376 | #enable_async_append = on 377 | #enable_bitmapscan = on 378 | #enable_gathermerge = on 379 | #enable_hashagg = on 380 | #enable_hashjoin = on 381 | #enable_incremental_sort = on 382 | #enable_indexscan = on 383 | #enable_indexonlyscan = on 384 | #enable_material = on 385 | #enable_memoize = on 386 | #enable_mergejoin = on 387 | #enable_nestloop = on 388 | #enable_parallel_append = on 389 | #enable_parallel_hash = on 390 | #enable_partition_pruning = on 391 | #enable_partitionwise_join = off 392 | #enable_partitionwise_aggregate = off 393 | #enable_presorted_aggregate = on 394 | #enable_seqscan = on 395 | #enable_sort = on 396 | #enable_tidscan = on 397 | 398 | # - Planner Cost Constants - 399 | 400 | #seq_page_cost = 1.0 # measured on an arbitrary scale 401 | #random_page_cost = 4.0 # same scale as above 402 | #cpu_tuple_cost = 0.01 # same scale as above 403 | #cpu_index_tuple_cost = 0.005 # same scale as above 404 | #cpu_operator_cost = 0.0025 # same scale as above 405 | #parallel_setup_cost = 1000.0 # same scale as above 406 | #parallel_tuple_cost = 0.1 # same scale as above 407 | #min_parallel_table_scan_size = 8MB 408 | #min_parallel_index_scan_size = 512kB 409 | #effective_cache_size = 4GB 410 | 411 | #jit_above_cost = 100000 # perform JIT compilation if available 412 | # and query more expensive than this; 413 | # -1 disables 414 | #jit_inline_above_cost = 500000 # inline small functions if query is 415 | # more expensive than this; -1 disables 416 | #jit_optimize_above_cost = 500000 # use expensive JIT optimizations if 417 | # query is more expensive than this; 418 | # -1 disables 419 | 420 | # - Genetic Query Optimizer - 421 | 422 | #geqo = on 423 | #geqo_threshold = 12 424 | #geqo_effort = 5 # range 1-10 425 | #geqo_pool_size = 0 # selects default based on effort 426 | #geqo_generations = 0 # selects default based on effort 427 | #geqo_selection_bias = 2.0 # range 1.5-2.0 428 | #geqo_seed = 0.0 # range 0.0-1.0 429 | 430 | # - Other Planner Options - 431 | 432 | #default_statistics_target = 100 # range 1-10000 433 | #constraint_exclusion = partition # on, off, or partition 434 | #cursor_tuple_fraction = 0.1 # range 0.0-1.0 435 | #from_collapse_limit = 8 436 | #jit = on # allow JIT compilation 437 | #join_collapse_limit = 8 # 1 disables collapsing of explicit 438 | # JOIN clauses 439 | #plan_cache_mode = auto # auto, force_generic_plan or 440 | # force_custom_plan 441 | #recursive_worktable_factor = 10.0 # range 0.001-1000000 442 | 443 | 444 | #------------------------------------------------------------------------------ 445 | # REPORTING AND LOGGING 446 | #------------------------------------------------------------------------------ 447 | 448 | # - Where to Log - 449 | 450 | #log_destination = 'stderr' # Valid values are combinations of 451 | # stderr, csvlog, jsonlog, syslog, and 452 | # eventlog, depending on platform. 453 | # csvlog and jsonlog require 454 | # logging_collector to be on. 455 | 456 | # This is used when logging to stderr: 457 | #logging_collector = off # Enable capturing of stderr, jsonlog, 458 | # and csvlog into log files. Required 459 | # to be on for csvlogs and jsonlogs. 460 | # (change requires restart) 461 | 462 | # These are only used if logging_collector is on: 463 | #log_directory = 'log' # directory where log files are written, 464 | # can be absolute or relative to PGDATA 465 | #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, 466 | # can include strftime() escapes 467 | #log_file_mode = 0600 # creation mode for log files, 468 | # begin with 0 to use octal notation 469 | #log_rotation_age = 1d # Automatic rotation of logfiles will 470 | # happen after that time. 0 disables. 471 | #log_rotation_size = 10MB # Automatic rotation of logfiles will 472 | # happen after that much log output. 473 | # 0 disables. 474 | #log_truncate_on_rotation = off # If on, an existing log file with the 475 | # same name as the new log file will be 476 | # truncated rather than appended to. 477 | # But such truncation only occurs on 478 | # time-driven rotation, not on restarts 479 | # or size-driven rotation. Default is 480 | # off, meaning append to existing files 481 | # in all cases. 482 | 483 | # These are relevant when logging to syslog: 484 | #syslog_facility = 'LOCAL0' 485 | #syslog_ident = 'postgres' 486 | #syslog_sequence_numbers = on 487 | #syslog_split_messages = on 488 | 489 | # This is only relevant when logging to eventlog (Windows): 490 | # (change requires restart) 491 | #event_source = 'PostgreSQL' 492 | 493 | # - When to Log - 494 | 495 | #log_min_messages = warning # values in order of decreasing detail: 496 | # debug5 497 | # debug4 498 | # debug3 499 | # debug2 500 | # debug1 501 | # info 502 | # notice 503 | # warning 504 | # error 505 | # log 506 | # fatal 507 | # panic 508 | 509 | #log_min_error_statement = error # values in order of decreasing detail: 510 | # debug5 511 | # debug4 512 | # debug3 513 | # debug2 514 | # debug1 515 | # info 516 | # notice 517 | # warning 518 | # error 519 | # log 520 | # fatal 521 | # panic (effectively off) 522 | 523 | #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements 524 | # and their durations, > 0 logs only 525 | # statements running at least this number 526 | # of milliseconds 527 | 528 | #log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements 529 | # and their durations, > 0 logs only a sample of 530 | # statements running at least this number 531 | # of milliseconds; 532 | # sample fraction is determined by log_statement_sample_rate 533 | 534 | #log_statement_sample_rate = 1.0 # fraction of logged statements exceeding 535 | # log_min_duration_sample to be logged; 536 | # 1.0 logs all such statements, 0.0 never logs 537 | 538 | 539 | #log_transaction_sample_rate = 0.0 # fraction of transactions whose statements 540 | # are logged regardless of their duration; 1.0 logs all 541 | # statements from all transactions, 0.0 never logs 542 | 543 | #log_startup_progress_interval = 10s # Time between progress updates for 544 | # long-running startup operations. 545 | # 0 disables the feature, > 0 indicates 546 | # the interval in milliseconds. 547 | 548 | # - What to Log - 549 | 550 | #debug_print_parse = off 551 | #debug_print_rewritten = off 552 | #debug_print_plan = off 553 | #debug_pretty_print = on 554 | #log_autovacuum_min_duration = 10min # log autovacuum activity; 555 | # -1 disables, 0 logs all actions and 556 | # their durations, > 0 logs only 557 | # actions running at least this number 558 | # of milliseconds. 559 | #log_checkpoints = on 560 | #log_connections = off 561 | #log_disconnections = off 562 | #log_duration = off 563 | #log_error_verbosity = default # terse, default, or verbose messages 564 | #log_hostname = off 565 | #log_line_prefix = '%m [%p] ' # special values: 566 | # %a = application name 567 | # %u = user name 568 | # %d = database name 569 | # %r = remote host and port 570 | # %h = remote host 571 | # %b = backend type 572 | # %p = process ID 573 | # %P = process ID of parallel group leader 574 | # %t = timestamp without milliseconds 575 | # %m = timestamp with milliseconds 576 | # %n = timestamp with milliseconds (as a Unix epoch) 577 | # %Q = query ID (0 if none or not computed) 578 | # %i = command tag 579 | # %e = SQL state 580 | # %c = session ID 581 | # %l = session line number 582 | # %s = session start timestamp 583 | # %v = virtual transaction ID 584 | # %x = transaction ID (0 if none) 585 | # %q = stop here in non-session 586 | # processes 587 | # %% = '%' 588 | # e.g. '<%u%%%d> ' 589 | #log_lock_waits = off # log lock waits >= deadlock_timeout 590 | #log_recovery_conflict_waits = off # log standby recovery conflict waits 591 | # >= deadlock_timeout 592 | #log_parameter_max_length = -1 # when logging statements, limit logged 593 | # bind-parameter values to N bytes; 594 | # -1 means print in full, 0 disables 595 | #log_parameter_max_length_on_error = 0 # when logging an error, limit logged 596 | # bind-parameter values to N bytes; 597 | # -1 means print in full, 0 disables 598 | #log_statement = 'none' # none, ddl, mod, all 599 | #log_replication_commands = off 600 | #log_temp_files = -1 # log temporary files equal or larger 601 | # than the specified size in kilobytes; 602 | # -1 disables, 0 logs all temp files 603 | #log_timezone = 'GMT' 604 | 605 | # - Process Title - 606 | 607 | #cluster_name = '' # added to process titles if nonempty 608 | # (change requires restart) 609 | #update_process_title = on 610 | 611 | 612 | #------------------------------------------------------------------------------ 613 | # STATISTICS 614 | #------------------------------------------------------------------------------ 615 | 616 | # - Cumulative Query and Index Statistics - 617 | 618 | #track_activities = on 619 | #track_activity_query_size = 1024 # (change requires restart) 620 | #track_counts = on 621 | #track_io_timing = off 622 | #track_wal_io_timing = off 623 | #track_functions = none # none, pl, all 624 | #stats_fetch_consistency = cache # cache, none, snapshot 625 | 626 | 627 | # - Monitoring - 628 | 629 | #compute_query_id = auto 630 | #log_statement_stats = off 631 | #log_parser_stats = off 632 | #log_planner_stats = off 633 | #log_executor_stats = off 634 | 635 | 636 | #------------------------------------------------------------------------------ 637 | # AUTOVACUUM 638 | #------------------------------------------------------------------------------ 639 | 640 | #autovacuum = on # Enable autovacuum subprocess? 'on' 641 | # requires track_counts to also be on. 642 | #autovacuum_max_workers = 3 # max number of autovacuum subprocesses 643 | # (change requires restart) 644 | #autovacuum_naptime = 1min # time between autovacuum runs 645 | #autovacuum_vacuum_threshold = 50 # min number of row updates before 646 | # vacuum 647 | #autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts 648 | # before vacuum; -1 disables insert 649 | # vacuums 650 | #autovacuum_analyze_threshold = 50 # min number of row updates before 651 | # analyze 652 | #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 653 | #autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table 654 | # size before insert vacuum 655 | #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 656 | #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 657 | # (change requires restart) 658 | #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age 659 | # before forced vacuum 660 | # (change requires restart) 661 | #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for 662 | # autovacuum, in milliseconds; 663 | # -1 means use vacuum_cost_delay 664 | #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for 665 | # autovacuum, -1 means use 666 | # vacuum_cost_limit 667 | 668 | 669 | #------------------------------------------------------------------------------ 670 | # CLIENT CONNECTION DEFAULTS 671 | #------------------------------------------------------------------------------ 672 | 673 | # - Statement Behavior - 674 | 675 | #client_min_messages = notice # values in order of decreasing detail: 676 | # debug5 677 | # debug4 678 | # debug3 679 | # debug2 680 | # debug1 681 | # log 682 | # notice 683 | # warning 684 | # error 685 | #search_path = '"$user", public' # schema names 686 | #row_security = on 687 | #default_table_access_method = 'heap' 688 | #default_tablespace = '' # a tablespace name, '' uses the default 689 | #default_toast_compression = 'pglz' # 'pglz' or 'lz4' 690 | #temp_tablespaces = '' # a list of tablespace names, '' uses 691 | # only default tablespace 692 | #check_function_bodies = on 693 | #default_transaction_isolation = 'read committed' 694 | #default_transaction_read_only = off 695 | #default_transaction_deferrable = off 696 | #session_replication_role = 'origin' 697 | #statement_timeout = 0 # in milliseconds, 0 is disabled 698 | #lock_timeout = 0 # in milliseconds, 0 is disabled 699 | #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled 700 | #idle_session_timeout = 0 # in milliseconds, 0 is disabled 701 | #vacuum_freeze_table_age = 150000000 702 | #vacuum_freeze_min_age = 50000000 703 | #vacuum_failsafe_age = 1600000000 704 | #vacuum_multixact_freeze_table_age = 150000000 705 | #vacuum_multixact_freeze_min_age = 5000000 706 | #vacuum_multixact_failsafe_age = 1600000000 707 | #bytea_output = 'hex' # hex, escape 708 | #xmlbinary = 'base64' 709 | #xmloption = 'content' 710 | #gin_pending_list_limit = 4MB 711 | #createrole_self_grant = '' # set and/or inherit 712 | 713 | # - Locale and Formatting - 714 | 715 | #datestyle = 'iso, mdy' 716 | #intervalstyle = 'postgres' 717 | #timezone = 'GMT' 718 | #timezone_abbreviations = 'Default' # Select the set of available time zone 719 | # abbreviations. Currently, there are 720 | # Default 721 | # Australia (historical usage) 722 | # India 723 | # You can create your own file in 724 | # share/timezonesets/. 725 | #extra_float_digits = 1 # min -15, max 3; any value >0 actually 726 | # selects precise output mode 727 | #client_encoding = sql_ascii # actually, defaults to database 728 | # encoding 729 | 730 | # These settings are initialized by initdb, but they can be changed. 731 | #lc_messages = 'C' # locale for system error message 732 | # strings 733 | #lc_monetary = 'C' # locale for monetary formatting 734 | #lc_numeric = 'C' # locale for number formatting 735 | #lc_time = 'C' # locale for time formatting 736 | 737 | #icu_validation_level = warning # report ICU locale validation 738 | # errors at the given level 739 | 740 | # default configuration for text search 741 | #default_text_search_config = 'pg_catalog.simple' 742 | 743 | # - Shared Library Preloading - 744 | 745 | #local_preload_libraries = '' 746 | #session_preload_libraries = '' 747 | #shared_preload_libraries = '' # (change requires restart) 748 | #jit_provider = 'llvmjit' # JIT library to use 749 | 750 | # - Other Defaults - 751 | 752 | #dynamic_library_path = '$libdir' 753 | #extension_destdir = '' # prepend path when loading extensions 754 | # and shared objects (added by Debian) 755 | #gin_fuzzy_search_limit = 0 756 | 757 | 758 | #------------------------------------------------------------------------------ 759 | # LOCK MANAGEMENT 760 | #------------------------------------------------------------------------------ 761 | 762 | #deadlock_timeout = 1s 763 | #max_locks_per_transaction = 64 # min 10 764 | # (change requires restart) 765 | #max_pred_locks_per_transaction = 64 # min 10 766 | # (change requires restart) 767 | #max_pred_locks_per_relation = -2 # negative values mean 768 | # (max_pred_locks_per_transaction 769 | # / -max_pred_locks_per_relation) - 1 770 | #max_pred_locks_per_page = 2 # min 0 771 | 772 | 773 | #------------------------------------------------------------------------------ 774 | # VERSION AND PLATFORM COMPATIBILITY 775 | #------------------------------------------------------------------------------ 776 | 777 | # - Previous PostgreSQL Versions - 778 | 779 | #array_nulls = on 780 | #backslash_quote = safe_encoding # on, off, or safe_encoding 781 | #escape_string_warning = on 782 | #lo_compat_privileges = off 783 | #quote_all_identifiers = off 784 | #standard_conforming_strings = on 785 | #synchronize_seqscans = on 786 | 787 | # - Other Platforms and Clients - 788 | 789 | #transform_null_equals = off 790 | 791 | 792 | #------------------------------------------------------------------------------ 793 | # ERROR HANDLING 794 | #------------------------------------------------------------------------------ 795 | 796 | #exit_on_error = off # terminate session on any error? 797 | #restart_after_crash = on # reinitialize after backend crash? 798 | #data_sync_retry = off # retry or panic on failure to fsync 799 | # data? 800 | # (change requires restart) 801 | #recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) 802 | 803 | 804 | #------------------------------------------------------------------------------ 805 | # CONFIG FILE INCLUDES 806 | #------------------------------------------------------------------------------ 807 | 808 | # These options allow settings to be loaded from files other than the 809 | # default postgresql.conf. Note that these are directives, not variable 810 | # assignments, so they can usefully be given more than once. 811 | 812 | #include_dir = '...' # include files ending in '.conf' from 813 | # a directory, e.g., 'conf.d' 814 | #include_if_exists = '...' # include file only if it exists 815 | #include = '...' # include file 816 | 817 | 818 | #------------------------------------------------------------------------------ 819 | # CUSTOMIZED OPTIONS 820 | #------------------------------------------------------------------------------ 821 | 822 | # Add settings for extensions here --------------------------------------------------------------------------------