├── .gitignore ├── .prettierignore ├── README.md ├── docker-compose.yml ├── eslint.config.js ├── grafana ├── dashboards │ └── postgresql-overview.json └── provisioning │ ├── dashboards │ └── dashboards.yml │ └── datasources │ └── datasource.yml ├── index.html ├── opentelemetry └── otel-config.yml ├── package-lock.json ├── package.json ├── prometheus └── prometheus.yml ├── server ├── .dockerignore ├── Dockerfile ├── app.ts ├── controllers │ ├── OAuthController.ts │ ├── monitoringController.ts │ └── userDatabaseController.ts ├── db │ └── db.ts ├── middleware │ └── authMiddleware.ts ├── models │ └── OAuthModel.ts ├── routes │ └── apiRoutes.ts ├── server.ts ├── tests │ └── integrations │ │ └── postgres-exporter.test.ts ├── tracing.ts ├── types │ └── auth.ts └── utils │ └── dockerPostgresExporter.ts ├── src ├── App.tsx ├── Dockerfile ├── components │ ├── ProtectedRoute.tsx │ ├── QueryMonitor │ │ ├── AuthCallback.tsx │ │ ├── AuthPage.tsx │ │ ├── DatabaseHealthMetrics.tsx │ │ ├── GrafanaDashboard.tsx │ │ ├── JaegerDashboard.tsx │ │ ├── index.tsx │ │ └── types.ts │ ├── QueryPerformance │ │ ├── Header.tsx │ │ ├── MetricsTable.tsx │ │ ├── QueryComparisonForm.tsx │ │ ├── QueryComparisonPage.tsx │ │ ├── QueryHistory.tsx │ │ ├── TestQueryForm.tsx │ │ └── TestQueryPage.tsx │ └── assets │ │ ├── QH_Dashboard.png │ │ ├── QH_Login.png │ │ ├── QH_Metrics.png │ │ ├── QH_Query.png │ │ ├── logo_queryhawk.jpg │ │ ├── logo_queryhawk.png │ │ ├── logo_queryhawk.svg │ │ └── logo_queryhawk.tsx ├── hooks │ └── useAuth.ts ├── main.tsx └── vite-env.d.ts ├── tsconfig.app.json ├── tsconfig.json ├── tsconfig.node.json ├── tsconfig.server.json ├── types └── types.ts └── vite.config.ts /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | .env 26 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | server/utils/dockerPostgresExporter.ts 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | logo_queryhawk 3 |
4 | 5 |
6 | 7 |

QueryHawk

8 |

9 | Get a hawk-eyed look at your query performance. 10 |

11 | 12 |
13 | 14 | ## Monitor and Analyze Your SQL Performance and Database Health 15 | 16 | QueryHawk monitors and visualizes key SQL metrics to help users improve database and query performance. A central dashboard monitors database health. Containers ensure a consistent environment for all users. 17 | 18 |
19 | Table of Contents 20 | 21 | - [Introduction](#introduction) 22 | - [Features](#features) 23 | - [Initial Set-up and Installation](#initial-set-up-and-installation) 24 | - [Technologies](#technologies) 25 | - [User Interface](#user-interface) 26 | - [QueryHawk Team](#queryhawk-team) 27 | - [Acknowledgements](#acknowledgements) 28 | - [License](#license) 29 | 30 |
31 | 32 | 33 | ## Introduction 34 | 35 | QueryHawk delivers comprehensive SQL database monitoring and visualization, empowering developers and database administrators to optimize performance and quickly identify bottlenecks. Built on industry-standard tools including Prometheus, Grafana, and PostgreSQL Exporter, all containerized with Docker for seamless deployment. 36 | 37 | - ✅ Real-time SQL query analysis with millisecond-precision execution metrics 38 | - ✅ Complete visibility into query execution plans with detailed buffer and cache statistics 39 | - ✅ Interactive dashboards for visualizing database health and performance trends 40 | - ✅ Query comparison tool to benchmark and optimize SQL performance 41 | - ✅ Track query execution paths across entire application with distributed tracing 42 | 43 | With QueryHawk's intuitive interface, teams can proactively manage database performance, reduce troubleshooting time, and make data-driven optimization decisions. The containerized architecture ensures easy deployment across development, staging, and production environments. 44 | 45 | Gain insights into your SQL databases and enhance how your team approaches database performance optimization with QueryHawk. 46 | 47 | ## Features 48 | 49 | ## 🔍 Deep SQL Query Analysis 50 | 51 | - Execution Plan Visibility: Analyze "EXPLAIN ANALYZE" results with detailed metrics on planning time, execution time, and resource usage. 52 | - Cache Performance Metrics: Monitor cache hit ratios and buffer statistics to identify memory optimization opportunities. 53 | - Query Comparison: Evaluate startup and total costs for queries to understand their impact on database resources. 54 | - Secure Connection Testing: Connect to any PostgreSQL database with SSL support and connection validation. 55 | - Query Performance Profiling: Test queries before deployment with comprehensive performance metrics. 56 | - Historical Comparison: Store and compare query performance over time to track optimization progress. 57 | 58 | ## 📊 Real-time Performance Monitoring 59 | 60 | Once connected, QueryHawk will display multiple metrics, including: 61 | 62 | - Transaction rate 63 | - Cache hit ratio 64 | - Active connections 65 | - Tuple operations 66 | - Lock metrics 67 | - I/O statistics 68 | - Index usage 69 | - Transaction commits vs. rollbacks 70 | - Long-running queries 71 | 72 | ## 🫥 Tracing Requests 73 | 74 | QueryHawk includes distributed tracing capabilities: 75 | 76 | - View the Jaeger dashboard embedded in the UI 77 | - Track request flows through your application 78 | - Identify performance bottlenecks 79 | - Debug slow queries 80 | 81 | ## 🛠️ Enterprise-Ready Architecture 82 | 83 | - Docker-based Deployment: Quickly deploy the entire monitoring stack with Docker Compose. 84 | - Secure Authentication: GitHub OAuth integration for secure user management. 85 | - Dynamic Exporters: Automatically create and manage PostgreSQL exporters. 86 | 87 | ## Initial Set-up and Installation 88 | 89 | ## 🔧 Prerequisites 90 | 91 | - Docker and Docker Compose 92 | - PostgreSQL database 93 | - GitHub account (for OAuth) 94 | 95 | ## 💻 Installation 96 | 97 | 1. Clone the repository: 98 | 99 | ```bash 100 | git clone [repository-url] 101 | cd queryhawk 102 | ``` 103 | 104 | 2. [Download Docker Desktop](https://www.docker.com/products/docker-desktop), install, and open. 105 | 106 | 3. Configure environment variables 107 | 108 | - Create .env in the root directory 109 | - Input and configure environment variables 110 | 111 | ``` 112 | # Example fields (please update with your real values for each one) 113 | POSTGRES_URI=postgresql://username:password@hostname:port/database 114 | GITHUB_CLIENT_ID=your_github_client_id 115 | GITHUB_CLIENT_SECRET=your_github_client_secret 116 | JWT_SECRET=your_jwt_secret 117 | SUPABASE_URI=your_supabase_uri 118 | ``` 119 | 120 | 4. Start the services 121 | 122 | ```bash 123 | docker-compose up -d 124 | ``` 125 | 126 | 5. Access the application: 127 | 128 | ``` 129 | Frontend: http://localhost:5173 130 | ``` 131 | ## Docker Cheatsheet 132 | 133 | Steps to create container: 134 | 135 | 1. Build the Docker Image: 136 | docker build -t : . 137 | Example: 138 | docker build -t my-server -f server/Dockerfile . 139 | 140 | 2. Verify the Image was build: 141 | docker images 142 | 143 | 3. Create and Start a New Container 144 | docker run -p : --name 145 | Example: 146 | docker run -p 4002:4001 my-server 147 | 148 | ``` 149 | To find containers ID or containers name: 150 | docker ps 151 | 152 | To stop container: 153 | docker stop or docker stop 154 | 155 | Optional remove container after stopping it: 156 | docker rm or docker rm 157 | 158 | Rebuilds container: 159 | docker-compose build (name) 160 | 161 | Use all container from docker-compose.yml: 162 | docker-compose up 163 | 164 | Remove all containers that are running that came from the docker-compose.yml file: 165 | docker-compose down 166 | 167 | Stop all containers: 168 | docker stop $(docker ps -aq) 169 | 170 | Remove all containers: 171 | docker rm $(docker ps -aq) 172 | 173 | Remove all images: 174 | docker rmi $(docker images -q) 175 | 176 | remove all volumes: 177 | docker volume rm $(docker volume ls -q) 178 | 179 | remove all network volumes: 180 | docker network prune 181 | 182 | Remove all dangling resources: 183 | docker system prune -a 184 | ``` 185 | 186 | ## Technologies 187 | 188 |
189 | 190 | ![JavaScript](https://img.shields.io/badge/JavaScript-F7DF1E?style=for-the-badge&logo=javascript&logoColor=black) 191 | ![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white) 192 | ![HTML](https://img.shields.io/badge/HTML-e85a2e?style=for-the-badge&logo=html5&logoColor=white) 193 | ![CSS](https://img.shields.io/badge/CSS-2e35e8?style=for-the-badge&logo=css3&logoColor=white) 194 | ![React](https://img.shields.io/badge/React-61DAFB?style=for-the-badge&logo=react&logoColor=black) 195 | ![React Router](https://img.shields.io/badge/React_Router-CA4245?style=for-the-badge&logo=react-router&logoColor=white) 196 | ![Vite](https://img.shields.io/badge/Vite-646CFF?style=for-the-badge&logo=vite&logoColor=white) 197 | ![MUI](https://img.shields.io/badge/MUI-007FFF?style=for-the-badge&logo=mui&logoColor=white) 198 | ![Node.js](https://img.shields.io/badge/Node.js-339933?style=for-the-badge&logo=node.js&logoColor=white) 199 | ![Express.js](https://img.shields.io/badge/Express.js-000000?style=for-the-badge&logo=express&logoColor=white) 200 | ![CORS](https://img.shields.io/badge/CORS-000000?style=for-the-badge&logoColor=white) 201 | ![PostgreSQL](https://img.shields.io/badge/PostgreSQL-336791?style=for-the-badge&logo=postgresql&logoColor=white) 202 | ![Supabase](https://img.shields.io/badge/Supabase-3ECF8E?style=for-the-badge&logo=supabase&logoColor=white) 203 | ![Postman](https://img.shields.io/badge/Postman-ff6c37?style=for-the-badge&logo=postman&logoColor=white) 204 | ![Docker](https://img.shields.io/badge/Docker-2496ED?style=for-the-badge&logo=docker&logoColor=white) 205 | ![NPM](https://img.shields.io/badge/NPM-%23CB3837.svg?style=for-the-badge&logo=npm&logoColor=white) 206 | ![Prometheus](https://img.shields.io/badge/Prometheus-E6522C?style=for-the-badge&logo=prometheus&logoColor=white) 207 | ![OpenTelemetry](https://img.shields.io/badge/OpenTelemetry-F57600?style=for-the-badge&logo=OpenTelemetry&logoColor=white) 208 | ![Grafana](https://img.shields.io/badge/Grafana-F46800?style=for-the-badge&logo=grafana&logoColor=white) 209 | ![JWT](https://img.shields.io/badge/JWT-FFAA33?style=for-the-badge&logo=jsonwebtokens&logoColor=white) 210 | ![GitHub OAuth](https://img.shields.io/badge/GitHub_OAuth-181717?style=for-the-badge&logo=github&logoColor=white) 211 | ![.env](https://img.shields.io/badge/.env-ECD53F?style=for-the-badge&logoColor=white) 212 | ![Dockerode](https://img.shields.io/badge/Dockerode-blue?style=for-the-badge&logo=dockerode&logoColor=white) 213 | ![TS-Node](https://img.shields.io/badge/TSNode-blue?style=for-the-badge&logo=ts-node&logoColor=white) 214 | ![Nodemon](https://img.shields.io/badge/Nodemon-76D04B?style=for-the-badge&logo=nodemon&logoColor=white) 215 | ![ESLint](https://img.shields.io/badge/ESLint-4B32C3?style=for-the-badge&logo=eslint&logoColor=white) 216 | 217 |
218 | 219 | --- 220 | 221 | ## User Interface 222 | 223 |
224 | 225 | Login 226 | 227 |
228 | 229 | --- 230 | 231 |
232 | 233 | ![Dashboard](/src/components/assets/QH_Dashboard.png) 234 | 235 |
236 | 237 | --- 238 | 239 |
240 | 241 | ![Query](/src/components/assets/QH_Query.png) 242 | 243 |
244 | 245 | --- 246 | 247 |
248 | 249 | ![Metrics](/src/components/assets/QH_Metrics.png) 250 | 251 |
252 | 253 | --- 254 | 255 | ## QueryHawk Team 256 | 257 | [![LinkedIn](https://img.shields.io/badge/LinkedIn-QueryHawk-313544?style=flat&logo=linkedin)](https://www.linkedin.com/company/queryhawk) 258 | 259 | - **Bryan Cabanin** 🐒 [GitHub](https://github.com/Bryancabanin) | [LinkedIn](https://www.linkedin.com/in/bryan-cabanin/) 260 | - **Meagan Lewis** 🦜 [GitHub](https://github.com/meaganlewis) | [LinkedIn](https://www.linkedin.com/in/meaganlewis/) 261 | - **Peter Limburg** 🪶 [GitHub](https://github.com/Peter-Limburg) | [LinkedIn](https://www.linkedin.com/in/peterlimburg/) 262 | - **Moe Na** 🐸 [GitHub](https://github.com/wmoew) | [LinkedIn](https://www.linkedin.com/in/mn1098/) 263 | 264 | ## Acknowledgements 265 | 266 | We would like to thank the following resources that helped make QueryHawk possible: 267 | 268 | - **[Material-UI](https://mui.com/)** - Used for designing UI components 269 | - **[Excalidraw](https://excalidraw.com/)** - Used for designing wireframe and planning 270 | 271 | ## License 272 | 273 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 274 | 275 | #### [Return to top](#queryhawk) 276 | 277 | --- 278 | 279 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | #ALL VOLUMES AND COMMANDS AND BUILDS WILL NEED TO BE UPDATED WITH ACCURATE 4 | #LOCATIONS. AND WILL ALSO NEED TO CREATE DOCKER FILES WHERE NEEDED 5 | #FOR EXAMPLE WE'LL NEED A DOCKER FILE IN THE SERVER FOLDER FOR THE BACKEND SERVICE 6 | #and will need to add opentelemetry 7 | 8 | services: # This section defines all the services (containers) we want to run 9 | # Supabase - Our main database and auth service 10 | # Runs PostgreSQL and provides real-time capabilities 11 | #format is host_port:container_port 12 | 13 | # Test database to simulate a user's external PostgreSQL instance 14 | # test_user_db: 15 | # image: postgres:13 16 | # ports: 17 | # - '5433:5432' 18 | # environment: 19 | # POSTGRES_PASSWORD: testpass123 20 | # POSTGRES_USER: testuser 21 | # POSTGRES_DB: testdb 22 | # volumes: 23 | # - ./test-init.sql:/docker-entrypoint-initdb.d/test-init.sql 24 | # networks: 25 | # - queryhawk_monitoring_network # Uses the default network created by docker-compose 26 | 27 | # postgres-exporter: 28 | # image: prometheuscommunity/postgres-exporter 29 | # environment: 30 | # - DATA_SOURCE_NAME=postgresql://testuser:testpass123@test_user_db:5432/testdb?sslmode=disable 31 | # ports: 32 | # - '9187:9187' 33 | # networks: 34 | # - queryhawk_monitoring_network 35 | # depends_on: 36 | # - test_user_db 37 | 38 | # Express Backend - Handles our API and query monitoring 39 | backend: 40 | build: 41 | context: . 42 | dockerfile: server/Dockerfile # Builds using server folder's Dockerfile 43 | ports: 44 | - '4002:4002' # API will be available on localhost:4002 45 | volumes: 46 | # All these volume mappings enable development with hot reloading 47 | - ./server:/app/server # Maps local server code to container 48 | - ./types:/app/types # Maps local types directory for TypeScript types 49 | - ./package.json:/app/package.json # Maps package.json for npm commands 50 | - ./tsconfig.json:/app/tsconfig.json # Maps TypeScript config 51 | - /app/node_modules # Preserves container's node_modules 52 | - /var/run/docker.sock:/var/run/docker.sock 53 | - prometheus_targets:/var/prometheus/postgres_targets 54 | env_file: 55 | - .env 56 | environment: 57 | - PORT=4002 58 | - DATABASE_URL=${POSTGRES_URI} 59 | - GITHUB_CLIENT_ID=${GITHUB_CLIENT_ID} 60 | - GITHUB_CLIENT_SECRET=${GITHUB_CLIENT_SECRET} 61 | - CORS_ORIGIN=http://localhost:5173 62 | - JWT_SECRET=${JWT_SECRET} 63 | - SUPABASE_URI=${SUPABASE_URI} 64 | - NODE_ENV=development 65 | - OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4316 # OTLP endpoint for OpenTelemetry 66 | depends_on: 67 | - otel-collector 68 | networks: 69 | - queryhawk_monitoring_network 70 | 71 | jaeger: # This is the name we're giving to our service 72 | image: jaegertracing/all-in-one:latest 73 | #ports section maps ports from container to computer 74 | #format is host_port:container_port 75 | ports: 76 | - '6831:6831/udp' # Jaeger thrift compact protocol 77 | - '6832:6832/udp' # Jaeger thrift binary protocol 78 | - '5778:5778' # Jaeger admin port 79 | - '16686:16686' # Jaeger UI 80 | - '4317:4317' # OTLP gRPC 81 | - '4318:4318' # OTLP HTTP 82 | environment: 83 | - COLLECTOR_OTLP_ENABLED=true 84 | networks: 85 | - queryhawk_monitoring_network 86 | 87 | # Prometheus - Collects and stores our metrics 88 | prometheus: 89 | image: prom/prometheus # Official Prometheus image 90 | ports: 91 | - '9090:9090' # Prometheus web interface 92 | volumes: 93 | - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml # Mount your config file 94 | - prometheus_data:/prometheus # For prometheus data 95 | - prometheus_targets:/var/prometheus/postgres_targets 96 | command: 97 | - '--config.file=/etc/prometheus/prometheus.yml' 98 | - '--web.enable-lifecycle' 99 | container_name: queryhawk-prometheus-1 100 | networks: 101 | - queryhawk_monitoring_network 102 | 103 | otel-collector: 104 | image: otel/opentelemetry-collector 105 | ports: 106 | - '4316:4316' # OTLP gRPC receiver 107 | - '4319:4319' # OTLP HTTP receiver 108 | - '8889:8889' # Prometheus metrics port 109 | volumes: 110 | #left side is file in my project folder and right is where it appears in the container 111 | - ./opentelemetry/otel-config.yml:/etc/otel/config.yml 112 | command: ['--config=/etc/otel/config.yml'] 113 | depends_on: 114 | - jaeger 115 | - prometheus 116 | networks: 117 | - queryhawk_monitoring_network 118 | # Grafana - Creates dashboards from our metrics 119 | grafana: 120 | image: grafana/grafana # Official Grafana image 121 | ports: 122 | - '3001:3000' # Dashboard available on localhost:3001 123 | environment: 124 | - GF_SECURITY_ALLOW_EMBEDDING=true 125 | - GF_AUTH_ANONYMOUS_ENABLED=true 126 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer 127 | - GF_SECURITY_CORS_ENABLED=true 128 | - GF_SECURITY_CORS_ALLOW_ORIGIN=http://localhost:5173 129 | - GF_AUTH_DISABLE_LOGIN_FORM=true 130 | - GF_AUTH_BASIC_ENABLED=false 131 | - GF_FEATURE_TOGGLES_ENABLE=publicDashboards 132 | - GF_PATHS_PROVISIONING=/etc/grafana/provisioning 133 | volumes: 134 | # Persist dashboard configurations and data 135 | - grafana_data:/var/lib/grafana 136 | - ./grafana/provisioning:/etc/grafana/provisioning 137 | - ./grafana/dashboards:/var/lib/grafana/dashboards 138 | depends_on: 139 | - prometheus # Needs Prometheus to start first 140 | networks: 141 | - queryhawk_monitoring_network 142 | 143 | # React Frontend - Our user interface 144 | frontend: 145 | build: 146 | context: . # This should be where your frontend Dockerfile is 147 | dockerfile: src/Dockerfile # Path to your development Dockerfile 148 | ports: 149 | - '5173:5173' # Website available on localhost:3000 150 | volumes: 151 | # Hot reloading for development 152 | - ./:/app/ # Maps local code to container 153 | - /app/node_modules # Keeps node_modules in container 154 | environment: 155 | - NODE_ENV=development 156 | - VITE_GITHUB_CLIENT_ID=${GITHUB_CLIENT_ID} 157 | - VITE_BACKEND_URL=http://localhost:4002 # Backend API URL 158 | - VITE_GRAFANA_URL=http://localhost:3001 # Grafana URL 159 | tty: true 160 | stdin_open: true 161 | depends_on: 162 | - backend # Ensures API is available first 163 | - grafana # Ensures Grafana is available first 164 | networks: 165 | - queryhawk_monitoring_network 166 | 167 | networks: 168 | queryhawk_monitoring_network: 169 | name: queryhawk_monitoring_network 170 | driver: bridge 171 | # Persistent storage for our services 172 | volumes: 173 | prometheus_config: #Prometheus configuration files 174 | prometheus_data: # Metrics storage 175 | prometheus_targets: #Dynamic postgres exporter targets 176 | grafana_data: # Dashboard configurations 177 | 178 | -------------------------------------------------------------------------------- /eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /grafana/dashboards/postgresql-overview.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [] 4 | }, 5 | "editable": true, 6 | "fiscalYearStartMonth": 0, 7 | "graphTooltip": 0, 8 | "links": [], 9 | "liveNow": false, 10 | "panels": [ 11 | { 12 | "datasource": { 13 | "type": "prometheus", 14 | "uid": "prometheus" 15 | }, 16 | "fieldConfig": { 17 | "defaults": { 18 | "color": { 19 | "mode": "palette-classic" 20 | }, 21 | "custom": { 22 | "axisCenteredZero": false, 23 | "axisColorMode": "text", 24 | "axisLabel": "", 25 | "axisPlacement": "auto", 26 | "barAlignment": 0, 27 | "drawStyle": "line", 28 | "fillOpacity": 10, 29 | "gradientMode": "none", 30 | "hideFrom": { 31 | "legend": false, 32 | "tooltip": false, 33 | "viz": false 34 | }, 35 | "lineInterpolation": "linear", 36 | "lineWidth": 1, 37 | "pointSize": 5, 38 | "scaleDistribution": { 39 | "type": "linear" 40 | }, 41 | "showPoints": "never", 42 | "spanNulls": false, 43 | "stacking": { 44 | "group": "A", 45 | "mode": "none" 46 | }, 47 | "thresholdsStyle": { 48 | "mode": "off" 49 | } 50 | }, 51 | "mappings": [], 52 | "thresholds": { 53 | "mode": "absolute", 54 | "steps": [ 55 | { 56 | "color": "green", 57 | "value": null 58 | } 59 | ] 60 | }, 61 | "unit": "s" 62 | }, 63 | "overrides": [] 64 | }, 65 | "gridPos": { 66 | "h": 8, 67 | "w": 12, 68 | "x": 0, 69 | "y": 0 70 | }, 71 | "id": 1, 72 | "options": { 73 | "legend": { 74 | "calcs": ["mean", "max"], 75 | "displayMode": "table", 76 | "placement": "bottom", 77 | "showLegend": true 78 | }, 79 | "tooltip": { 80 | "mode": "multi", 81 | "sort": "none" 82 | } 83 | }, 84 | "targets": [ 85 | { 86 | "datasource": { 87 | "type": "prometheus", 88 | "uid": "prometheus" 89 | }, 90 | "editorMode": "code", 91 | "expr": "rate(pg_stat_database_xact_commit{datname=~\"$database\"}[$__rate_interval])", 92 | "legendFormat": "{{datname}} - commits", 93 | "range": true, 94 | "refId": "A" 95 | } 96 | ], 97 | "title": "Transaction Rate", 98 | "type": "timeseries" 99 | }, 100 | { 101 | "datasource": { 102 | "type": "prometheus", 103 | "uid": "prometheus" 104 | }, 105 | "fieldConfig": { 106 | "defaults": { 107 | "color": { 108 | "mode": "thresholds" 109 | }, 110 | "mappings": [], 111 | "thresholds": { 112 | "mode": "absolute", 113 | "steps": [ 114 | { 115 | "color": "green", 116 | "value": null 117 | }, 118 | { 119 | "color": "red", 120 | "value": 80 121 | } 122 | ] 123 | }, 124 | "unit": "percent" 125 | }, 126 | "overrides": [] 127 | }, 128 | "gridPos": { 129 | "h": 8, 130 | "w": 12, 131 | "x": 12, 132 | "y": 0 133 | }, 134 | "id": 2, 135 | "options": { 136 | "orientation": "auto", 137 | "reduceOptions": { 138 | "calcs": ["lastNotNull"], 139 | "fields": "", 140 | "values": false 141 | }, 142 | "showThresholdLabels": false, 143 | "showThresholdMarkers": true 144 | }, 145 | "pluginVersion": "10.0.3", 146 | "targets": [ 147 | { 148 | "datasource": { 149 | "type": "prometheus", 150 | "uid": "prometheus" 151 | }, 152 | "editorMode": "code", 153 | "expr": "pg_stat_database_blks_hit{datname=~\"$database\"} / (pg_stat_database_blks_hit{datname=~\"$database\"} + pg_stat_database_blks_read{datname=~\"$database\"}) * 100", 154 | "legendFormat": "{{datname}}", 155 | "range": true, 156 | "refId": "A" 157 | } 158 | ], 159 | "title": "Cache Hit Ratio", 160 | "type": "gauge" 161 | }, 162 | { 163 | "datasource": { 164 | "type": "prometheus", 165 | "uid": "prometheus" 166 | }, 167 | "description": "Shows the current number of active connections to the database. Helps monitor connection usage and detect potential connection leaks.", 168 | "fieldConfig": { 169 | "defaults": { 170 | "color": { 171 | "mode": "palette-classic" 172 | }, 173 | "custom": { 174 | "axisCenteredZero": false, 175 | "axisColorMode": "text", 176 | "axisLabel": "", 177 | "axisPlacement": "auto", 178 | "barAlignment": 0, 179 | "drawStyle": "line", 180 | "fillOpacity": 10, 181 | "gradientMode": "none", 182 | "hideFrom": { 183 | "legend": false, 184 | "tooltip": false, 185 | "viz": false 186 | }, 187 | "lineInterpolation": "linear", 188 | "lineWidth": 1, 189 | "pointSize": 5, 190 | "scaleDistribution": { 191 | "type": "linear" 192 | }, 193 | "showPoints": "never", 194 | "spanNulls": false, 195 | "stacking": { 196 | "group": "A", 197 | "mode": "none" 198 | }, 199 | "thresholdsStyle": { 200 | "mode": "off" 201 | } 202 | }, 203 | "mappings": [], 204 | "thresholds": { 205 | "mode": "absolute", 206 | "steps": [ 207 | { 208 | "color": "green", 209 | "value": null 210 | } 211 | ] 212 | } 213 | }, 214 | "overrides": [] 215 | }, 216 | "gridPos": { 217 | "h": 8, 218 | "w": 12, 219 | "x": 0, 220 | "y": 8 221 | }, 222 | "id": 3, 223 | "options": { 224 | "legend": { 225 | "calcs": ["mean", "max"], 226 | "displayMode": "table", 227 | "placement": "bottom", 228 | "showLegend": true 229 | }, 230 | "tooltip": { 231 | "mode": "multi", 232 | "sort": "none" 233 | } 234 | }, 235 | "targets": [ 236 | { 237 | "datasource": { 238 | "type": "prometheus", 239 | "uid": "prometheus" 240 | }, 241 | "editorMode": "code", 242 | "expr": "pg_stat_database_numbackends{datname=~\"$database\"}", 243 | "legendFormat": "{{datname}} - connections", 244 | "range": true, 245 | "refId": "A" 246 | } 247 | ], 248 | "title": "Active Connections", 249 | "type": "timeseries" 250 | }, 251 | { 252 | "datasource": { 253 | "type": "prometheus", 254 | "uid": "prometheus" 255 | }, 256 | "description": "Shows the 95th and 50th percentile of query execution time, helping to identify performance issues.", 257 | "fieldConfig": { 258 | "defaults": { 259 | "color": { 260 | "mode": "palette-classic" 261 | }, 262 | "custom": { 263 | "axisCenteredZero": false, 264 | "axisColorMode": "text", 265 | "axisLabel": "", 266 | "axisPlacement": "auto", 267 | "barAlignment": 0, 268 | "drawStyle": "line", 269 | "fillOpacity": 10, 270 | "gradientMode": "none", 271 | "hideFrom": { 272 | "legend": false, 273 | "tooltip": false, 274 | "viz": false 275 | }, 276 | "lineInterpolation": "linear", 277 | "lineWidth": 1, 278 | "pointSize": 5, 279 | "scaleDistribution": { 280 | "type": "linear" 281 | }, 282 | "showPoints": "never", 283 | "spanNulls": false, 284 | "stacking": { 285 | "group": "A", 286 | "mode": "none" 287 | }, 288 | "thresholdsStyle": { 289 | "mode": "off" 290 | } 291 | }, 292 | "mappings": [], 293 | "thresholds": { 294 | "mode": "absolute", 295 | "steps": [ 296 | { 297 | "color": "green", 298 | "value": null 299 | } 300 | ] 301 | }, 302 | "unit": "ms" 303 | }, 304 | "overrides": [] 305 | }, 306 | "gridPos": { 307 | "h": 8, 308 | "w": 12, 309 | "x": 12, 310 | "y": 8 311 | }, 312 | "id": 4, 313 | "options": { 314 | "legend": { 315 | "calcs": ["mean", "max"], 316 | "displayMode": "table", 317 | "placement": "bottom", 318 | "showLegend": true 319 | }, 320 | "tooltip": { 321 | "mode": "multi", 322 | "sort": "none" 323 | } 324 | }, 325 | "targets": [ 326 | { 327 | "datasource": { 328 | "type": "prometheus", 329 | "uid": "prometheus" 330 | }, 331 | "editorMode": "code", 332 | "expr": "histogram_quantile(0.95, sum(rate(pg_stat_statements_exec_time_seconds_bucket{datname=~\"$database\"}[$__rate_interval])) by (le, datname))*1000", 333 | "legendFormat": "{{datname}} - p95", 334 | "range": true, 335 | "refId": "A" 336 | }, 337 | { 338 | "datasource": { 339 | "type": "prometheus", 340 | "uid": "prometheus" 341 | }, 342 | "editorMode": "code", 343 | "expr": "histogram_quantile(0.5, sum(rate(pg_stat_statements_exec_time_seconds_bucket{datname=~\"$database\"}[$__rate_interval])) by (le, datname))*1000", 344 | "legendFormat": "{{datname}} - p50", 345 | "range": true, 346 | "refId": "B" 347 | } 348 | ], 349 | "title": "Query Execution Time", 350 | "type": "timeseries" 351 | }, 352 | { 353 | "datasource": { 354 | "type": "prometheus", 355 | "uid": "prometheus" 356 | }, 357 | "description": "Shows tuple operations (inserts, updates, deletes, fetches) to understand database workload patterns.", 358 | "fieldConfig": { 359 | "defaults": { 360 | "color": { 361 | "mode": "palette-classic" 362 | }, 363 | "custom": { 364 | "axisCenteredZero": false, 365 | "axisColorMode": "text", 366 | "axisLabel": "", 367 | "axisPlacement": "auto", 368 | "barAlignment": 0, 369 | "drawStyle": "line", 370 | "fillOpacity": 10, 371 | "gradientMode": "none", 372 | "hideFrom": { 373 | "legend": false, 374 | "tooltip": false, 375 | "viz": false 376 | }, 377 | "lineInterpolation": "linear", 378 | "lineWidth": 1, 379 | "pointSize": 5, 380 | "scaleDistribution": { 381 | "type": "linear" 382 | }, 383 | "showPoints": "never", 384 | "spanNulls": false, 385 | "stacking": { 386 | "group": "A", 387 | "mode": "none" 388 | }, 389 | "thresholdsStyle": { 390 | "mode": "off" 391 | } 392 | }, 393 | "mappings": [], 394 | "thresholds": { 395 | "mode": "absolute", 396 | "steps": [ 397 | { 398 | "color": "green", 399 | "value": null 400 | } 401 | ] 402 | }, 403 | "unit": "ops" 404 | }, 405 | "overrides": [] 406 | }, 407 | "gridPos": { 408 | "h": 8, 409 | "w": 12, 410 | "x": 0, 411 | "y": 16 412 | }, 413 | "id": 5, 414 | "options": { 415 | "legend": { 416 | "calcs": ["mean", "max"], 417 | "displayMode": "table", 418 | "placement": "bottom", 419 | "showLegend": true 420 | }, 421 | "tooltip": { 422 | "mode": "multi", 423 | "sort": "none" 424 | } 425 | }, 426 | "targets": [ 427 | { 428 | "datasource": { 429 | "type": "prometheus", 430 | "uid": "prometheus" 431 | }, 432 | "editorMode": "code", 433 | "expr": "rate(pg_stat_database_tup_inserted{datname=~\"$database\"}[$__rate_interval])", 434 | "legendFormat": "{{datname}} - inserts", 435 | "range": true, 436 | "refId": "A" 437 | }, 438 | { 439 | "datasource": { 440 | "type": "prometheus", 441 | "uid": "prometheus" 442 | }, 443 | "editorMode": "code", 444 | "expr": "rate(pg_stat_database_tup_updated{datname=~\"$database\"}[$__rate_interval])", 445 | "legendFormat": "{{datname}} - updates", 446 | "range": true, 447 | "refId": "B" 448 | }, 449 | { 450 | "datasource": { 451 | "type": "prometheus", 452 | "uid": "prometheus" 453 | }, 454 | "editorMode": "code", 455 | "expr": "rate(pg_stat_database_tup_deleted{datname=~\"$database\"}[$__rate_interval])", 456 | "legendFormat": "{{datname}} - deletes", 457 | "range": true, 458 | "refId": "C" 459 | }, 460 | { 461 | "datasource": { 462 | "type": "prometheus", 463 | "uid": "prometheus" 464 | }, 465 | "editorMode": "code", 466 | "expr": "rate(pg_stat_database_tup_fetched{datname=~\"$database\"}[$__rate_interval])", 467 | "legendFormat": "{{datname}} - fetches", 468 | "range": true, 469 | "refId": "D" 470 | } 471 | ], 472 | "title": "Tuple Operations", 473 | "type": "timeseries" 474 | }, 475 | { 476 | "datasource": { 477 | "type": "prometheus", 478 | "uid": "prometheus" 479 | }, 480 | "description": "Shows lock acquisition metrics to help identify contention issues that may impact performance.", 481 | "fieldConfig": { 482 | "defaults": { 483 | "color": { 484 | "mode": "palette-classic" 485 | }, 486 | "custom": { 487 | "axisCenteredZero": false, 488 | "axisColorMode": "text", 489 | "axisLabel": "", 490 | "axisPlacement": "auto", 491 | "barAlignment": 0, 492 | "drawStyle": "line", 493 | "fillOpacity": 10, 494 | "gradientMode": "none", 495 | "hideFrom": { 496 | "legend": false, 497 | "tooltip": false, 498 | "viz": false 499 | }, 500 | "lineInterpolation": "linear", 501 | "lineWidth": 1, 502 | "pointSize": 5, 503 | "scaleDistribution": { 504 | "type": "linear" 505 | }, 506 | "showPoints": "never", 507 | "spanNulls": false, 508 | "stacking": { 509 | "group": "A", 510 | "mode": "none" 511 | }, 512 | "thresholdsStyle": { 513 | "mode": "off" 514 | } 515 | }, 516 | "mappings": [], 517 | "thresholds": { 518 | "mode": "absolute", 519 | "steps": [ 520 | { 521 | "color": "green", 522 | "value": null 523 | } 524 | ] 525 | } 526 | }, 527 | "overrides": [] 528 | }, 529 | "gridPos": { 530 | "h": 8, 531 | "w": 12, 532 | "x": 12, 533 | "y": 16 534 | }, 535 | "id": 6, 536 | "options": { 537 | "legend": { 538 | "calcs": ["mean", "max"], 539 | "displayMode": "table", 540 | "placement": "bottom", 541 | "showLegend": true 542 | }, 543 | "tooltip": { 544 | "mode": "multi", 545 | "sort": "none" 546 | } 547 | }, 548 | "targets": [ 549 | { 550 | "datasource": { 551 | "type": "prometheus", 552 | "uid": "prometheus" 553 | }, 554 | "editorMode": "code", 555 | "expr": "pg_locks_count{datname=~\"$database\", mode=\"accessexclusivelock\"}", 556 | "legendFormat": "{{datname}} - exclusive", 557 | "range": true, 558 | "refId": "A" 559 | }, 560 | { 561 | "datasource": { 562 | "type": "prometheus", 563 | "uid": "prometheus" 564 | }, 565 | "editorMode": "code", 566 | "expr": "pg_locks_count{datname=~\"$database\", mode=\"exclusivelock\"}", 567 | "legendFormat": "{{datname}} - exclusive row", 568 | "range": true, 569 | "refId": "B" 570 | }, 571 | { 572 | "datasource": { 573 | "type": "prometheus", 574 | "uid": "prometheus" 575 | }, 576 | "editorMode": "code", 577 | "expr": "pg_locks_count{datname=~\"$database\", mode=\"sharelock\"}", 578 | "legendFormat": "{{datname}} - share", 579 | "range": true, 580 | "refId": "C" 581 | }, 582 | { 583 | "datasource": { 584 | "type": "prometheus", 585 | "uid": "prometheus" 586 | }, 587 | "editorMode": "code", 588 | "expr": "pg_locks_count{datname=~\"$database\", mode=\"accesssharelock\"}", 589 | "legendFormat": "{{datname}} - read", 590 | "range": true, 591 | "refId": "D" 592 | } 593 | ], 594 | "title": "Lock Metrics", 595 | "type": "timeseries" 596 | }, 597 | { 598 | "datasource": { 599 | "type": "prometheus", 600 | "uid": "prometheus" 601 | }, 602 | "description": "Shows disk I/O operations to identify potential disk pressure causing performance issues.", 603 | "fieldConfig": { 604 | "defaults": { 605 | "color": { 606 | "mode": "palette-classic" 607 | }, 608 | "custom": { 609 | "axisCenteredZero": false, 610 | "axisColorMode": "text", 611 | "axisLabel": "", 612 | "axisPlacement": "auto", 613 | "barAlignment": 0, 614 | "drawStyle": "line", 615 | "fillOpacity": 10, 616 | "gradientMode": "none", 617 | "hideFrom": { 618 | "legend": false, 619 | "tooltip": false, 620 | "viz": false 621 | }, 622 | "lineInterpolation": "linear", 623 | "lineWidth": 1, 624 | "pointSize": 5, 625 | "scaleDistribution": { 626 | "type": "linear" 627 | }, 628 | "showPoints": "never", 629 | "spanNulls": false, 630 | "stacking": { 631 | "group": "A", 632 | "mode": "none" 633 | }, 634 | "thresholdsStyle": { 635 | "mode": "off" 636 | } 637 | }, 638 | "mappings": [], 639 | "thresholds": { 640 | "mode": "absolute", 641 | "steps": [ 642 | { 643 | "color": "green", 644 | "value": null 645 | } 646 | ] 647 | }, 648 | "unit": "Bps" 649 | }, 650 | "overrides": [] 651 | }, 652 | "gridPos": { 653 | "h": 8, 654 | "w": 12, 655 | "x": 0, 656 | "y": 24 657 | }, 658 | "id": 7, 659 | "options": { 660 | "legend": { 661 | "calcs": ["mean", "max"], 662 | "displayMode": "table", 663 | "placement": "bottom", 664 | "showLegend": true 665 | }, 666 | "tooltip": { 667 | "mode": "multi", 668 | "sort": "none" 669 | } 670 | }, 671 | "targets": [ 672 | { 673 | "datasource": { 674 | "type": "prometheus", 675 | "uid": "prometheus" 676 | }, 677 | "editorMode": "code", 678 | "expr": "rate(pg_stat_database_blks_read{datname=~\"$database\"}[$__rate_interval]) * 8192", 679 | "legendFormat": "{{datname}} - read bytes", 680 | "range": true, 681 | "refId": "A" 682 | }, 683 | { 684 | "datasource": { 685 | "type": "prometheus", 686 | "uid": "prometheus" 687 | }, 688 | "editorMode": "code", 689 | "expr": "rate(pg_stat_database_blks_hit{datname=~\"$database\"}[$__rate_interval]) * 8192", 690 | "legendFormat": "{{datname}} - cache hit bytes", 691 | "range": true, 692 | "refId": "B" 693 | } 694 | ], 695 | "title": "I/O Statistics", 696 | "type": "timeseries" 697 | }, 698 | { 699 | "datasource": { 700 | "type": "prometheus", 701 | "uid": "prometheus" 702 | }, 703 | "description": "Shows index vs. sequential scan usage, which helps identify inefficient queries that may be causing CPU spikes.", 704 | "fieldConfig": { 705 | "defaults": { 706 | "color": { 707 | "mode": "palette-classic" 708 | }, 709 | "custom": { 710 | "axisCenteredZero": false, 711 | "axisColorMode": "text", 712 | "axisLabel": "", 713 | "axisPlacement": "auto", 714 | "barAlignment": 0, 715 | "drawStyle": "line", 716 | "fillOpacity": 10, 717 | "gradientMode": "none", 718 | "hideFrom": { 719 | "legend": false, 720 | "tooltip": false, 721 | "viz": false 722 | }, 723 | "lineInterpolation": "linear", 724 | "lineWidth": 1, 725 | "pointSize": 5, 726 | "scaleDistribution": { 727 | "type": "linear" 728 | }, 729 | "showPoints": "never", 730 | "spanNulls": false, 731 | "stacking": { 732 | "group": "A", 733 | "mode": "none" 734 | }, 735 | "thresholdsStyle": { 736 | "mode": "off" 737 | } 738 | }, 739 | "mappings": [], 740 | "thresholds": { 741 | "mode": "absolute", 742 | "steps": [ 743 | { 744 | "color": "green", 745 | "value": null 746 | } 747 | ] 748 | }, 749 | "unit": "ops" 750 | }, 751 | "overrides": [] 752 | }, 753 | "gridPos": { 754 | "h": 8, 755 | "w": 12, 756 | "x": 12, 757 | "y": 24 758 | }, 759 | "id": 8, 760 | "options": { 761 | "legend": { 762 | "calcs": ["mean", "max"], 763 | "displayMode": "table", 764 | "placement": "bottom", 765 | "showLegend": true 766 | }, 767 | "tooltip": { 768 | "mode": "multi", 769 | "sort": "none" 770 | } 771 | }, 772 | "targets": [ 773 | { 774 | "datasource": { 775 | "type": "prometheus", 776 | "uid": "prometheus" 777 | }, 778 | "editorMode": "code", 779 | "expr": "sum(rate(pg_stat_user_tables_idx_scan{datname=~\"$database\"}[$__rate_interval])) by (datname)", 780 | "legendFormat": "{{datname}} - index scans", 781 | "range": true, 782 | "refId": "A" 783 | }, 784 | { 785 | "datasource": { 786 | "type": "prometheus", 787 | "uid": "prometheus" 788 | }, 789 | "editorMode": "code", 790 | "expr": "sum(rate(pg_stat_user_tables_seq_scan{datname=~\"$database\"}[$__rate_interval])) by (datname)", 791 | "legendFormat": "{{datname}} - sequential scans", 792 | "range": true, 793 | "refId": "B" 794 | } 795 | ], 796 | "title": "Index Usage", 797 | "type": "timeseries" 798 | }, 799 | { 800 | "datasource": { 801 | "type": "prometheus", 802 | "uid": "prometheus" 803 | }, 804 | "description": "Shows database specific metrics for transaction rollbacks, which can identify failing operations.", 805 | "fieldConfig": { 806 | "defaults": { 807 | "color": { 808 | "mode": "palette-classic" 809 | }, 810 | "custom": { 811 | "axisCenteredZero": false, 812 | "axisColorMode": "text", 813 | "axisLabel": "", 814 | "axisPlacement": "auto", 815 | "barAlignment": 0, 816 | "drawStyle": "line", 817 | "fillOpacity": 10, 818 | "gradientMode": "none", 819 | "hideFrom": { 820 | "legend": false, 821 | "tooltip": false, 822 | "viz": false 823 | }, 824 | "lineInterpolation": "linear", 825 | "lineWidth": 1, 826 | "pointSize": 5, 827 | "scaleDistribution": { 828 | "type": "linear" 829 | }, 830 | "showPoints": "never", 831 | "spanNulls": false, 832 | "stacking": { 833 | "group": "A", 834 | "mode": "none" 835 | }, 836 | "thresholdsStyle": { 837 | "mode": "off" 838 | } 839 | }, 840 | "mappings": [], 841 | "thresholds": { 842 | "mode": "absolute", 843 | "steps": [ 844 | { 845 | "color": "green", 846 | "value": null 847 | } 848 | ] 849 | }, 850 | "unit": "ops" 851 | }, 852 | "overrides": [] 853 | }, 854 | "gridPos": { 855 | "h": 8, 856 | "w": 12, 857 | "x": 0, 858 | "y": 32 859 | }, 860 | "id": 9, 861 | "options": { 862 | "legend": { 863 | "calcs": ["mean", "max"], 864 | "displayMode": "table", 865 | "placement": "bottom", 866 | "showLegend": true 867 | }, 868 | "tooltip": { 869 | "mode": "multi", 870 | "sort": "none" 871 | } 872 | }, 873 | "targets": [ 874 | { 875 | "datasource": { 876 | "type": "prometheus", 877 | "uid": "prometheus" 878 | }, 879 | "editorMode": "code", 880 | "expr": "rate(pg_stat_database_xact_rollback{datname=~\"$database\"}[$__rate_interval])", 881 | "legendFormat": "{{datname}} - rollbacks", 882 | "range": true, 883 | "refId": "A" 884 | }, 885 | { 886 | "datasource": { 887 | "type": "prometheus", 888 | "uid": "prometheus" 889 | }, 890 | "editorMode": "code", 891 | "expr": "rate(pg_stat_database_xact_commit{datname=~\"$database\"}[$__rate_interval])", 892 | "legendFormat": "{{datname}} - commits", 893 | "range": true, 894 | "refId": "B" 895 | } 896 | ], 897 | "title": "Transaction Commits vs Rollbacks", 898 | "type": "timeseries" 899 | }, 900 | { 901 | "datasource": { 902 | "type": "prometheus", 903 | "uid": "prometheus" 904 | }, 905 | "description": "Shows statistics about long-running queries that might be causing performance issues.", 906 | "fieldConfig": { 907 | "defaults": { 908 | "color": { 909 | "mode": "palette-classic" 910 | }, 911 | "custom": { 912 | "axisCenteredZero": false, 913 | "axisColorMode": "text", 914 | "axisLabel": "", 915 | "axisPlacement": "auto", 916 | "barAlignment": 0, 917 | "drawStyle": "line", 918 | "fillOpacity": 10, 919 | "gradientMode": "none", 920 | "hideFrom": { 921 | "legend": false, 922 | "tooltip": false, 923 | "viz": false 924 | }, 925 | "lineInterpolation": "linear", 926 | "lineWidth": 1, 927 | "pointSize": 5, 928 | "scaleDistribution": { 929 | "type": "linear" 930 | }, 931 | "showPoints": "never", 932 | "spanNulls": false, 933 | "stacking": { 934 | "group": "A", 935 | "mode": "none" 936 | }, 937 | "thresholdsStyle": { 938 | "mode": "off" 939 | } 940 | }, 941 | "mappings": [], 942 | "thresholds": { 943 | "mode": "absolute", 944 | "steps": [ 945 | { 946 | "color": "green", 947 | "value": null 948 | } 949 | ] 950 | } 951 | }, 952 | "overrides": [] 953 | }, 954 | "gridPos": { 955 | "h": 8, 956 | "w": 12, 957 | "x": 12, 958 | "y": 32 959 | }, 960 | "id": 10, 961 | "options": { 962 | "legend": { 963 | "calcs": ["mean", "max"], 964 | "displayMode": "table", 965 | "placement": "bottom", 966 | "showLegend": true 967 | }, 968 | "tooltip": { 969 | "mode": "multi", 970 | "sort": "none" 971 | } 972 | }, 973 | "targets": [ 974 | { 975 | "datasource": { 976 | "type": "prometheus", 977 | "uid": "prometheus" 978 | }, 979 | "editorMode": "code", 980 | "expr": "pg_stat_activity_count{datname=~\"$database\", state=\"active\"}", 981 | "legendFormat": "{{datname}} - active queries", 982 | "range": true, 983 | "refId": "A" 984 | }, 985 | { 986 | "datasource": { 987 | "type": "prometheus", 988 | "uid": "prometheus" 989 | }, 990 | "editorMode": "code", 991 | "expr": "pg_stat_activity_max_tx_duration{datname=~\"$database\"}", 992 | "legendFormat": "{{datname}} - longest transaction (s)", 993 | "range": true, 994 | "refId": "B" 995 | }, 996 | { 997 | "datasource": { 998 | "type": "prometheus", 999 | "uid": "prometheus" 1000 | }, 1001 | "editorMode": "code", 1002 | "expr": "pg_stat_activity_count{datname=~\"$database\", state=\"idle in transaction\"}", 1003 | "legendFormat": "{{datname}} - idle in transaction", 1004 | "range": true, 1005 | "refId": "C" 1006 | } 1007 | ], 1008 | "title": "Long-Running Queries", 1009 | "type": "timeseries" 1010 | } 1011 | ], 1012 | "refresh": "5s", 1013 | "schemaVersion": 38, 1014 | "style": "dark", 1015 | "tags": ["postgresql", "queryhawk"], 1016 | "templating": { 1017 | "list": [ 1018 | { 1019 | "current": { 1020 | "selected": false, 1021 | "text": "All", 1022 | "value": "$__all" 1023 | }, 1024 | "datasource": { 1025 | "type": "prometheus", 1026 | "uid": "prometheus" 1027 | }, 1028 | "definition": "label_values(pg_stat_database_tup_fetched, datname)", 1029 | "hide": 0, 1030 | "includeAll": true, 1031 | "label": "Database", 1032 | "multi": false, 1033 | "name": "database", 1034 | "options": [], 1035 | "query": { 1036 | "query": "label_values(pg_stat_database_tup_fetched, datname)", 1037 | "refId": "StandardVariableQuery" 1038 | }, 1039 | "refresh": 1, 1040 | "regex": "", 1041 | "skipUrlSync": false, 1042 | "sort": 1, 1043 | "type": "query" 1044 | } 1045 | ] 1046 | }, 1047 | "time": { 1048 | "from": "now-6h", 1049 | "to": "now" 1050 | }, 1051 | "timepicker": {}, 1052 | "timezone": "", 1053 | "title": "PostgreSQL Overview", 1054 | "uid": "postgresql-overview", 1055 | "version": 1, 1056 | "weekStart": "" 1057 | } -------------------------------------------------------------------------------- /grafana/provisioning/dashboards/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | # Dashboard Providers Configuration 4 | # Defines how Grafana should load and manage dashboards 5 | providers: 6 | - name: 'QueryHawk' # Unique name for the provider 7 | orgId: 1 # Organization ID in Grafana 8 | type: file # Provider type - loads dashboards from files 9 | disableDeletion: false # Allow dashboard deletion 10 | allowUiUpdates: true # Allow editing dashboards through the UI 11 | options: 12 | # Path where dashboard JSON files are stored 13 | path: /var/lib/grafana/dashboards 14 | # Create folders based on dashboard file directory structure 15 | foldersFromFilesStructure: true -------------------------------------------------------------------------------- /grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | # Grafana API version - required for all provisioning configs 2 | apiVersion: 1 3 | # Data Sources Configuration 4 | # First, remove any existing data sources to prevent duplicates 5 | deleteDatasources: 6 | - name: Prometheus 7 | orgId: 1 8 | - name: PostgreSQL 9 | orgId: 1 10 | # Define all data sources that should be automatically configured 11 | datasources: 12 | - name: Prometheus 13 | type: prometheus 14 | access: proxy # How Grafana accesses the data source 15 | uid: prometheus 16 | url: http://prometheus:9090 # Internal Docker network URL 17 | isDefault: true # Make this the default data source 18 | jsonData: 19 | timeInterval: 15s # Minimum time interval between data points 20 | queryTimeout: "60s" # Maximum time for queries to run 21 | httpMethod: "POST" # HTTP method for queries 22 | editable: true # Allow editing through the UI 23 | version: 1 24 | # PostgreSQL Data Source 25 | - name: PostgreSQL 26 | type: postgres 27 | url: supabase:5432 28 | user: postgres # Database user 29 | secureJsonData: 30 | password: ${POSTGRES_PASSWORD} 31 | jsonData: 32 | sslmode: "disable" # Disable SSL for local development 33 | maxOpenConns: 100 # Maximum number of open connections 34 | maxIdleConns: 100 # Maximum number of idle connections 35 | connMaxLifetime: 14400 # Maximum connection lifetime in seconds 36 | postgresVersion: 1300 # PostgreSQL version (13.00) 37 | timescaledb: false # TimescaleDB support disabled 38 | editable: true 39 | version: 1 40 | # Jaeger Data Source for distributed tracing 41 | - name: Jaeger 42 | type: jaeger 43 | access: proxy 44 | url: http://jaeger:16686 # Internal Docker network URL for Jaeger 45 | jsonData: 46 | nodeGraph: 47 | enabled: true # Enable node graph visualization 48 | editable: true 49 | version: 1 -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | QueryHawk 7 | 8 | 9 |
10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /opentelemetry/otel-config.yml: -------------------------------------------------------------------------------- 1 | # Q: how does this file relate to other files? 2 | # A: Your App → Collector → Monitoring Tools 3 | # (tracing.ts) (otel-config.yml) (Jaeger/Prometheus) 4 | # relevant documentation: https://opentelemetry.io/docs/collector/configuration/ 5 | # Create folder: opentelemetry/otel-config.yml 6 | 7 | # Receivers: Where data comes in 8 | receivers: 9 | otlp: # OpenTelemetry protocol 10 | protocols: 11 | grpc: # For applications using gRPC 12 | endpoint: 0.0.0.0:4316 # Port where it listens 13 | #i commented out tls, it's a security encryption feature i dunno if we need? 14 | # tls: 15 | # cert_file: cert.pem 16 | # key_file: cert-key.pem 17 | http: # For applications using HTTP 18 | endpoint: 0.0.0.0:4319 # Port where it listens 19 | 20 | # Processors: What to do with the data before sending 21 | processors: 22 | batch: # Groups data into batches for efficiency 23 | timeout: 1s # Send at least every second 24 | send_batch_size: 1024 # Or when we have 1024 items 25 | 26 | # Exporters: Where to send the processed data 27 | exporters: 28 | otlp: # Sends traces to opentel - jaeger 29 | endpoint: jaeger:4317 # Docker service name & port 30 | tls: 31 | insecure: true # Dev setup - don't need security 32 | 33 | prometheus: # Sends metrics to Prometheus 34 | endpoint: '0.0.0.0:8889' # Where Prometheus can collect metrics 35 | namespace: 'queryhawk' # Groups metrics under this name 36 | 37 | # Service: How everything connects together 38 | #pipeline is where pipelines are configured and can be of types traces, metrics, or logs 39 | #a pipeline is a set of receivers, processors, and exporters, each defined earlier in the file 40 | service: 41 | pipelines: 42 | traces: # Pipeline for trace data 43 | receivers: [otlp] # Get data from receivers 44 | processors: [batch] # Process it 45 | exporters: [otlp] # Send to Jaeger 46 | 47 | metrics: # Pipeline for metrics 48 | receivers: [otlp] # Get data from receivers 49 | processors: [batch] # Process it 50 | exporters: [prometheus] # Send to Prometheus 51 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sql", 3 | "private": true, 4 | "version": "0.0.0", 5 | "main": "./src/App.tsx", 6 | "type": "module", 7 | "scripts": { 8 | "dev": "vite", 9 | "client": "npm run dev", 10 | "build": "tsc -b && vite build", 11 | "server": "nodemon --watch server --ext ts --exec tsx server/server.ts", 12 | "lint": "eslint .", 13 | "preview": "vite preview", 14 | "start": "concurrently \"npm run server\" \"npm run client\"" 15 | }, 16 | "dependencies": { 17 | "@emotion/react": "^11.14.0", 18 | "@emotion/styled": "^11.14.0", 19 | "@mui/icons-material": "^5.16.14", 20 | "@mui/material": "^5.16.14", 21 | "@opentelemetry/auto-instrumentations-node": "^0.56.0", 22 | "@opentelemetry/exporter-metrics-otlp-http": "^0.57.1", 23 | "@opentelemetry/exporter-trace-otlp-http": "^0.57.1", 24 | "@opentelemetry/instrumentation-document-load": "^0.44.0", 25 | "@opentelemetry/instrumentation-user-interaction": "^0.44.0", 26 | "@opentelemetry/sdk-node": "^0.57.1", 27 | "@opentelemetry/sdk-trace-web": "^1.30.1", 28 | "@radix-ui/react-alert-dialog": "^1.1.5", 29 | "@radix-ui/react-slot": "^1.1.1", 30 | "concurrently": "^9.1.2", 31 | "cors": "^2.8.5", 32 | "dockerode": "^4.0.4", 33 | "dotenv": "^16.4.7", 34 | "express": "^4.21.2", 35 | "jsonwebtoken": "^9.0.2", 36 | "lucide-react": "^0.474.0", 37 | "pg": "^8.13.3", 38 | "prom-client": "^15.1.3", 39 | "react": "^18.3.1", 40 | "react-dom": "^18.3.1", 41 | "react-router-dom": "^7.1.5", 42 | "recharts": "^2.15.1" 43 | }, 44 | "devDependencies": { 45 | "@eslint/js": "^9.17.0", 46 | "@types/cors": "^2.8.17", 47 | "@types/express": "^5.0.0", 48 | "@types/jsonwebtoken": "^9.0.9", 49 | "@types/node": "^22.13.0", 50 | "@types/node-fetch": "^2.6.12", 51 | "@types/pg": "^8.11.11", 52 | "@types/react": "^18.3.18", 53 | "@types/react-dom": "^18.3.5", 54 | "@vitejs/plugin-react": "^4.3.4", 55 | "eslint": "^9.17.0", 56 | "eslint-plugin-react-hooks": "^5.0.0", 57 | "eslint-plugin-react-refresh": "^0.4.16", 58 | "globals": "^15.14.0", 59 | "nodemon": "^3.1.9", 60 | "ts-node": "^10.9.2", 61 | "tsx": "^4.19.2", 62 | "typescript": "~5.6.2", 63 | "typescript-eslint": "^8.18.2", 64 | "vite": "^6.0.5" 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # Global config 2 | global: 3 | scrape_interval: 15s # How often Prometheus will scrape metrics 4 | evaluation_interval: 15s # How often Prometheus will evaluate rules 5 | 6 | # For each job below: 7 | 8 | # job_name: Label for this monitoring task 9 | # static_configs: List of things to watch 10 | # targets: Where to look for metrics 11 | 12 | # Uses Docker service names 13 | # Includes the port where metrics are exposed 14 | 15 | # What Prometheus needs to monitor 16 | scrape_configs: 17 | # Monitor Prometheus itself 18 | - job_name: 'prometheus' 19 | static_configs: 20 | - targets: ['localhost:9090'] 21 | 22 | # Monitor our Express backend to collect metrics about queries 23 | - job_name: 'express' 24 | metrics_path: '/api/metrics' 25 | static_configs: 26 | - targets: ['queryhawk-backend-1:4002'] 27 | # - targets: ['backend:4002'] # Uses Docker service name.Prometheus scrapes metrics from the backend on port 4002 (host port). this wasa backend:4001 but was not working for me (bryan) 28 | 29 | # Configure Prometheus to scrape metrics from PostgreSQL 30 | # The postgres_exporter translates database-specific metrics 31 | # into a format Prometheus can understand and visualize 32 | # We are scrape_config 2 times might need to remove this one. 33 | 34 | # - job_name: 'postgresql' 35 | # static_configs: 36 | # # - targets: ['test_user_db:9187'] 37 | # - targets: ['queryhawk-postgres-exporter-1:9187'] 38 | - job_name: 'postgres_exporter' 39 | file_sd_configs: 40 | - files: 41 | - /var/prometheus/postgres_targets/*.yml 42 | relabel_configs: 43 | - source_labels: [__address__] 44 | regex: '(.*)' 45 | target_label: 'instance' 46 | replacement: '$1' 47 | 48 | # have to change jaeger 49 | # OpenTelemetry collector 50 | - job_name: 'otel-collector' 51 | static_configs: 52 | - targets: ['otel-collector:8889'] # Port where collector exposes metrics 53 | 54 | # #global config notes: 55 | # Think of it like setting a timer: 56 | 57 | # scrape_interval: 58 | 59 | # Like a camera taking snapshots of your system 60 | # Every 15 seconds, Prometheus: 61 | 62 | # Checks all your services 63 | # Collects their metrics 64 | # Stores the numbers 65 | 66 | # If set to 30s, you'd have less detailed data 67 | # If set to 5s, you'd have more detailed data but use more storage 68 | 69 | # evaluation_interval: 70 | 71 | # How often Prometheus checks its rules 72 | # Rules might be like: 73 | 74 | # "Alert if query takes > 1 second" 75 | # "Alert if database is slow" 76 | # "Alert if errors are high" 77 | 78 | # Every 15 seconds it evaluates these rules 79 | 80 | # To break down what metrics each job will probably collect (we configure the 'what to collect' in other files): 81 | 82 | # Prometheus Self-Monitoring Metrics: 83 | 84 | # Copyprometheus_targets # How many targets are being monitored 85 | # prometheus_notifications_sent # Alert notifications 86 | # prometheus_scrape_duration # How long scrapes take 87 | # prometheus_storage_samples # How many data points stored 88 | 89 | # Express Backend Metrics (What you'll set up): 90 | 91 | # Copyhttp_request_duration_seconds # How long requests take 92 | # http_requests_total # Number of requests 93 | # query_execution_time_seconds # How long SQL queries take 94 | # query_errors_total # Number of failed queries 95 | # active_connections # Current number of connections 96 | # memory_usage_bytes # Server memory use 97 | 98 | # PostgreSQL/Supabase Metrics: 99 | 100 | # Copypg_stat_database_tup_fetched # Rows fetched 101 | # pg_stat_database_tup_inserted # Rows inserted 102 | # pg_stat_database_tup_updated # Rows updated 103 | # pg_stat_database_tup_deleted # Rows deleted 104 | # pg_stat_database_conflicts # Query conflicts 105 | # pg_stat_activity_count # Active connections 106 | # pg_stat_bgwriter_buffers # Buffer usage 107 | -------------------------------------------------------------------------------- /server/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /server/Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage for image 2 | FROM node:22 AS builder 3 | 4 | #creates directory called app in container 5 | #for subsequent instructions in this file it'll run those in the container 6 | WORKDIR /app 7 | 8 | #copies package.json to working directory app 9 | #this path is correct due to the context line for backend in compose file 10 | #that context lines logically brings us to root level for writing paths here 11 | COPY package*.json ./ 12 | COPY tsconfig.json ./ 13 | 14 | #install dependencies in container 15 | RUN npm install 16 | # RUN npm ci 17 | # . . copies all files from ./server into /app container 18 | COPY server/ ./server/ 19 | COPY types/ ./types/ 20 | 21 | #express app runs on port 5000 in container 22 | # EXPOSE command is used to declare which port the container will listen to internally. 23 | EXPOSE 4002 24 | 25 | #execute! uses server script from json file 26 | CMD ["npm", "run", "server"] 27 | 28 | 29 | 30 | # Container port shold match the app port ideally. 31 | 32 | # RUN is like building a house (installing fixtures, painting walls) 33 | # CMD is like what you do when you move in (turning on lights) 34 | 35 | # Think of it as: 36 | 37 | # RUN: "Do this as part of setup" 38 | # CMD: "Do this to start the app" 39 | 40 | # Currently we are using tsx to auto transpile our TS to JS. 41 | # For final product we would need to change tsx to tsc for better perfomance. 42 | # This means our code will be located in a different location then server. 43 | # It will most likely be located in ./dist 44 | # We need to change our tsconfig.json outDir to "./dist" (When project is ready for production). 45 | # docker build -t my-server -f server/Dockerfile . -------------------------------------------------------------------------------- /server/app.ts: -------------------------------------------------------------------------------- 1 | import express, { ErrorRequestHandler, Request, Response } from 'express'; 2 | import cors from 'cors'; 3 | import 'dotenv/config'; 4 | import { ServerError } from '../types/types.ts'; 5 | import apiRoutes from './routes/apiRoutes.ts'; 6 | 7 | const app = express(); 8 | 9 | app.use( 10 | cors({ 11 | origin: 'http://localhost:5173', // Your frontend's URL 12 | methods: ['GET', 'POST', 'DELETE', 'PUT'], 13 | allowedHeaders: ['Content-Type', 'Authorization'], 14 | credentials: true, // Allow cookies and credentials 15 | }) 16 | ); 17 | app.use(express.json()); 18 | 19 | //debugging 20 | app.use((req, res, next) => { 21 | console.log(`[${new Date().toISOString()}] ${req.method} ${req.path}`); 22 | console.log('Headers:', req.headers); 23 | console.log('Body:', req.body); 24 | next(); 25 | }); 26 | 27 | app.use('/api', apiRoutes); 28 | 29 | app.use('*', (req: Request, res: Response) => { 30 | res.status(404).send('Endpoint does not exist.'); 31 | }); 32 | 33 | const errorHandler: ErrorRequestHandler = ( 34 | err: ServerError, 35 | _req, 36 | res, 37 | _next 38 | ) => { 39 | const defaultErr: ServerError = { 40 | log: 'Express error handler caught unknown middleware error', 41 | status: 500, 42 | message: { err: 'An error occurred' }, 43 | }; 44 | const errorObj: ServerError = { ...defaultErr, ...err }; 45 | console.log(errorObj.log); 46 | res.status(errorObj.status).json(errorObj.message); 47 | }; 48 | 49 | app.use(errorHandler); 50 | 51 | export default app; 52 | -------------------------------------------------------------------------------- /server/controllers/OAuthController.ts: -------------------------------------------------------------------------------- 1 | import { Request, Response } from 'express'; 2 | import fetch from 'node-fetch'; 3 | import jwt from 'jsonwebtoken'; 4 | import OAuthModel from '../models/OAuthModel'; 5 | import { 6 | GithubTokenResponse, 7 | GithubUser, 8 | AuthenticatedUser, 9 | } from '../types/auth'; 10 | 11 | const githubClientSecret = process.env.GITHUB_CLIENT_SECRET; 12 | const githubClientId = process.env.GITHUB_CLIENT_ID; 13 | const jwtSecret = process.env.JWT_SECRET; 14 | 15 | interface CustomError extends Error { 16 | statusCode?: number; 17 | } 18 | 19 | interface DecodedToken { 20 | userId: number; 21 | username: string; 22 | email: string; 23 | iat: number; 24 | exp: number; 25 | } 26 | 27 | class OAuthController { 28 | // Private method to handle errors consistently throughout the controller 29 | private createError(message: string, statusCode: number = 500): CustomError { 30 | const error: CustomError = new Error(message); 31 | error.statusCode = statusCode; 32 | return error; 33 | } 34 | 35 | // Private method to exchange the authorization code for an access token 36 | private async getAccessToken(code: string): Promise { 37 | const tokenResponse = await fetch( 38 | 'https://github.com/login/oauth/access_token', 39 | { 40 | method: 'POST', 41 | headers: { 42 | 'Content-Type': 'application/json', 43 | Accept: 'application/json', 44 | }, 45 | body: JSON.stringify({ 46 | client_id: githubClientId, 47 | client_secret: githubClientSecret, 48 | code: code, 49 | }), 50 | } 51 | ); 52 | 53 | const tokenData = (await tokenResponse.json()) as GithubTokenResponse; 54 | 55 | console.log('📟 GitHub token response:', { 56 | status: tokenResponse.status, 57 | error: tokenData.error, 58 | error_description: tokenData.error_description, 59 | }); 60 | 61 | if (tokenData.error) { 62 | throw this.createError( 63 | tokenData.error_description || 'Failed to exchange code for token', 64 | 401 65 | ); 66 | } 67 | 68 | if (!tokenData.access_token) { 69 | throw this.createError('No access token received from GitHub', 401); 70 | } 71 | 72 | return tokenData.access_token; 73 | } 74 | 75 | // Private method to fetch user data from GitHub using the access token 76 | private async getGithubUser(accessToken: string): Promise { 77 | const userResponse = await fetch('https://api.github.com/user', { 78 | headers: { 79 | Authorization: `Bearer ${accessToken}`, 80 | Accept: 'application/json', 81 | }, 82 | }); 83 | 84 | if (!userResponse.ok) { 85 | throw this.createError('Failed to fetch user data from GitHub', 401); 86 | } 87 | 88 | return userResponse.json() as Promise; 89 | } 90 | 91 | // Private method to generate a JWT token for our authenticated user 92 | private generateToken(user: AuthenticatedUser): string { 93 | return jwt.sign( 94 | { 95 | userId: user.id, 96 | username: user.username, 97 | email: user.email, 98 | }, 99 | process.env.JWT_SECRET || '', 100 | { expiresIn: '24h' } 101 | ); 102 | } 103 | 104 | // Public method to validate the JWT token 105 | public validateToken(token: string): DecodedToken { 106 | try { 107 | const decoded = jwt.verify(token, jwtSecret) as DecodedToken; 108 | return decoded; 109 | } catch (error) { 110 | throw this.createError('Invalid token', 401); 111 | } 112 | } 113 | 114 | // Public method to handle the OAuth callback 115 | public async handleCallback(req: Request, res: Response): Promise { 116 | try { 117 | const { code, provider } = req.body; 118 | console.log('🐸 Received request body:', req.body); 119 | 120 | // Validate input 121 | if (!code) { 122 | throw this.createError('Authorization code is required', 400); 123 | } 124 | 125 | if (provider !== 'github') { 126 | throw this.createError('Invalid provider', 400); 127 | } 128 | 129 | // Get the access token from GitHub 130 | const accessToken = await this.getAccessToken(code); 131 | 132 | // Get the user data from GitHub 133 | const githubUser = await this.getGithubUser(accessToken); 134 | 135 | // Persist or update the user in the database 136 | const user = await OAuthModel.findOrCreateUser(githubUser); 137 | 138 | // Create our authenticated user object 139 | const authenticatedUser: AuthenticatedUser = { 140 | id: user.id, 141 | username: user.username, 142 | email: user.email, 143 | name: user.first_name, 144 | avatarUrl: githubUser.avatar_url, 145 | }; 146 | 147 | // Generate a session token 148 | const token = this.generateToken(authenticatedUser); 149 | 150 | // Send the response 151 | res.json({ 152 | token, 153 | user: authenticatedUser, 154 | }); 155 | } catch (error) { 156 | console.error('Authentication error:', error); 157 | const statusCode = (error as CustomError).statusCode || 500; 158 | res.status(statusCode).json({ 159 | error: (error as Error).message || 'Authentication failed', 160 | }); 161 | } 162 | } 163 | // Added method to get the current user 164 | public async getCurrentUser(userId: number): Promise { 165 | try { 166 | const user = await OAuthModel.findUserById(userId); 167 | 168 | if (!user) { 169 | throw this.createError('User not found', 404); 170 | } 171 | 172 | return { 173 | id: user.id, 174 | username: user.username, 175 | email: user.email, 176 | name: user.first_name, 177 | // Since we don't have GitHub data here, we use a default avatar or null 178 | avatarUrl: null, 179 | }; 180 | } catch (error) { 181 | console.error('Error getting current user:', error); 182 | throw error; 183 | } 184 | } 185 | } 186 | 187 | // Export a singleton instance 188 | export default new OAuthController(); 189 | -------------------------------------------------------------------------------- /server/controllers/monitoringController.ts: -------------------------------------------------------------------------------- 1 | import { NextFunction, Request, Response } from 'express'; 2 | import pkg from 'pg'; 3 | const { Pool } = pkg; 4 | import promClient from 'prom-client'; 5 | 6 | const register = new promClient.Registry(); 7 | 8 | // Basic connection metrics 9 | const dbConnectionGauge = new promClient.Gauge({ 10 | name: 'database_connection_status', 11 | help: 'Current database connection status (1 for connected, 0 for disconnected)', 12 | labelNames: ['datname'], // Changed to match postgres_exporter 13 | }); 14 | 15 | const dbConnectionCounter = new promClient.Counter({ 16 | name: 'database_connection_attempts_total', 17 | help: 'Total number of database connection attempts', 18 | labelNames: ['status'], // This one is fine as is - it's not a postgres metric 19 | }); 20 | 21 | // Database performance metrics 22 | const dbTransactionRate = new promClient.Gauge({ 23 | name: 'pg_stat_database_xact_commit', 24 | help: 'Number of transactions per second', 25 | labelNames: ['datname'], // Changed to match postgres_exporter 26 | }); 27 | 28 | const dbCacheHitRatio = new promClient.Gauge({ 29 | name: 'pg_stat_database_blks_hit', 30 | help: 'Number of blocks hit in cache', 31 | labelNames: ['datname'], // Already correct 32 | }); 33 | 34 | const dbActiveConnections = new promClient.Gauge({ 35 | name: 'database_active_connections', 36 | help: 'Number of active database connections', 37 | labelNames: ['datname'], // Changed to match postgres_exporter 38 | }); 39 | 40 | const dbBlocksRead = new promClient.Gauge({ 41 | name: 'pg_stat_database_blks_read', 42 | help: 'Number of blocks read from disk', 43 | labelNames: ['datname'], 44 | }); 45 | 46 | const queryExecutionTimeHistogram = new promClient.Histogram({ 47 | name: 'query_execution_duration_seconds', 48 | help: 'Histogram of query execution times', 49 | labelNames: ['query_type'], 50 | buckets: [0.1, 0.5, 1, 2, 5, 10], // Adjust buckets as needed 51 | }); 52 | 53 | const queryErrorCounter = new promClient.Counter({ 54 | name: 'query_errors_total', 55 | help: 'Total number of query errors', 56 | labelNames: ['query_type', 'error_type'], 57 | }); 58 | 59 | // Register all metrics 60 | register.registerMetric(dbConnectionGauge); 61 | register.registerMetric(dbConnectionCounter); 62 | register.registerMetric(dbTransactionRate); 63 | register.registerMetric(dbCacheHitRatio); 64 | register.registerMetric(dbActiveConnections); 65 | register.registerMetric(dbBlocksRead); 66 | register.registerMetric(queryExecutionTimeHistogram); 67 | register.registerMetric(queryErrorCounter); 68 | 69 | let currentDatabaseUrl: string = ''; 70 | let pool: pkg.Pool | null = null; 71 | let metricsInterval: NodeJS.Timeout | null = null; 72 | 73 | async function collectMetrics(pool: pkg.Pool, databaseUrl: string) { 74 | try { 75 | const client = await pool.connect(); 76 | try { 77 | // Get current database name 78 | const dbNameResult = await client.query( 79 | 'SELECT current_database() as dbname' 80 | ); 81 | const dbName = dbNameResult.rows[0]?.dbname || 'postgres'; 82 | 83 | console.log('Currently collecting metrics from database:', dbName); 84 | console.log( 85 | 'Using connection URL:', 86 | databaseUrl.replace(/:[^:@]+@/, ':****@') 87 | ); 88 | 89 | // Get transaction rate 90 | const txnResult = await client.query(` 91 | SELECT xact_commit + xact_rollback AS total_transactions 92 | FROM pg_stat_database 93 | WHERE datname = current_database(); 94 | `); 95 | const transactionCount = parseFloat( 96 | txnResult.rows[0]?.total_transactions 97 | ); 98 | if (!isNaN(transactionCount)) { 99 | dbTransactionRate.set({ datname: dbName }, transactionCount); 100 | } 101 | 102 | // Get cache hit ratio 103 | const cacheResult = await client.query(` 104 | SELECT 105 | sum(heap_blks_hit) as blocks_hit, 106 | sum(heap_blks_read) as blocks_read 107 | FROM pg_statio_user_tables; 108 | `); 109 | const blocksHit = parseFloat(cacheResult.rows[0]?.blocks_hit); 110 | const blocksRead = parseFloat(cacheResult.rows[0]?.blocks_read); 111 | 112 | if (!isNaN(blocksHit)) { 113 | dbCacheHitRatio.set({ datname: dbName }, blocksHit); 114 | } 115 | if (!isNaN(blocksRead)) { 116 | dbBlocksRead.set({ datname: dbName }, blocksRead); 117 | } 118 | 119 | // Get active connections 120 | const connectionsResult = await client.query(` 121 | SELECT count(*)::integer as count 122 | FROM pg_stat_activity 123 | WHERE state = 'active'; 124 | `); 125 | const activeConnections = parseInt(connectionsResult.rows[0]?.count); 126 | if (!isNaN(activeConnections)) { 127 | dbActiveConnections.set({ datname: dbName }, activeConnections); 128 | } 129 | } finally { 130 | client.release(); 131 | } 132 | } catch (err) { 133 | console.error('Error collecting metrics:', err); 134 | dbConnectionGauge.set({ datname: 'postgres' }, 0); 135 | } 136 | } 137 | 138 | async function cleanup() { 139 | if (metricsInterval) { 140 | clearInterval(metricsInterval); 141 | metricsInterval = null; 142 | } 143 | if (pool) { 144 | await pool.end(); 145 | dbConnectionGauge.set({ datname: 'postgres' }, 0); 146 | } 147 | } 148 | 149 | function setupMetricsCollection(databaseUrl: string) { 150 | metricsInterval = setInterval(() => { 151 | if (pool) { 152 | collectMetrics(pool, databaseUrl).catch((err) => { 153 | console.error('Error in metrics collection interval:', err); 154 | }); 155 | } 156 | }, 15000); 157 | } 158 | 159 | function getErrorMessage(err: Error): string { 160 | if (err.message.includes('SASL')) { 161 | return 'Authentication failed. Please check your credentials.'; 162 | } 163 | if (err.message.includes('self-signed certificate')) { 164 | return 'SSL certificate validation failed. Try adding ?sslmode=require to your connection string.'; 165 | } 166 | if (err.message.includes('connect ECONNREFUSED')) { 167 | return 'Could not connect to database. Please check if the database is running and accessible.'; 168 | } 169 | if (err.message.includes('Connection timeout')) { 170 | return 'Connection timed out. Please check your database URL and network connection.'; 171 | } 172 | return err.message; 173 | } 174 | 175 | const monitoringController = { 176 | recordQueryMetrics: (metrics: { 177 | executionTime?: number; 178 | cacheHitRatio?: number; 179 | error?: string; 180 | }) => { 181 | try { 182 | // Record execution time in Prometheus histogram 183 | if (metrics.executionTime) { 184 | queryExecutionTimeHistogram.observe( 185 | { query_type: 'user_query' }, 186 | metrics.executionTime / 1000 // Convert milliseconds to seconds 187 | ); 188 | } 189 | 190 | // Record cache hit ratio 191 | if (metrics.cacheHitRatio !== undefined) { 192 | dbCacheHitRatio.set({ datname: 'postgres' }, metrics.cacheHitRatio); 193 | } 194 | 195 | // Track query errors 196 | if (metrics.error) { 197 | queryErrorCounter.inc({ 198 | query_type: 'user_query', 199 | error_type: metrics.error, 200 | }); 201 | } 202 | } catch (err) { 203 | console.error('Error recording query metrics:', err); 204 | } 205 | }, 206 | 207 | setupMonitoring: async ( 208 | req: Request, 209 | res: Response, 210 | next: NextFunction 211 | ): Promise => { 212 | const { databaseUrl } = req.body; 213 | console.log('API ENDPOINT HIT'); 214 | console.log('Setting up monitoring for:', databaseUrl); 215 | console.log( 216 | 'Request received with database URL:', 217 | databaseUrl.replace(/:[^:@]+@/, ':****@') 218 | ); 219 | console.log('Attempting to connect to database...'); 220 | 221 | if (!databaseUrl) { 222 | dbConnectionCounter.inc({ status: 'failed_missing_url' }); 223 | queryErrorCounter.inc({ 224 | query_type: 'connection', 225 | error_type: 'missing_url', 226 | }); 227 | res.status(400).json({ message: 'Database URI string is required.' }); 228 | return; 229 | } 230 | 231 | // Validate URL format 232 | try { 233 | new URL(databaseUrl); 234 | } catch (err) { 235 | dbConnectionCounter.inc({ status: 'failed_invalid_url' }); 236 | queryErrorCounter.inc({ 237 | query_type: 'connection', 238 | error_type: 'invalid_url', 239 | }); 240 | res.status(400).json({ message: 'Invalid database URL format.' }); 241 | return; 242 | } 243 | 244 | try { 245 | // Clean up existing resources 246 | await cleanup(); 247 | 248 | // Create connection configuration 249 | const config = { 250 | connectionString: databaseUrl, 251 | max: 20, 252 | idleTimeoutMillis: 30000, 253 | connectionTimeoutMillis: 10000, 254 | ssl: { 255 | rejectUnauthorized: false, 256 | sslmode: 'require', 257 | }, 258 | }; 259 | 260 | console.log('Connecting with config:', { 261 | ...config, 262 | connectionString: config.connectionString.replace(/:[^:@]+@/, ':****@'), 263 | }); 264 | 265 | // Create new connection pool 266 | pool = new Pool(config); 267 | 268 | // Test connection with timeout 269 | const connectionTest = await Promise.race([ 270 | pool.connect(), 271 | new Promise((_, reject) => 272 | setTimeout(() => reject(new Error('Connection timeout')), 10000) 273 | ), 274 | ]); 275 | 276 | const client = connectionTest as pkg.PoolClient; 277 | 278 | try { 279 | const result = await client.query('SELECT version()'); 280 | console.log('Database version:', result.rows[0].version); 281 | currentDatabaseUrl = databaseUrl; 282 | 283 | // Update metrics 284 | dbConnectionCounter.inc({ status: 'success' }); 285 | dbConnectionGauge.set({ datname: 'postgres' }, 1); 286 | 287 | // Setup metrics collection 288 | setupMetricsCollection(databaseUrl); 289 | 290 | // Collect metrics immediately 291 | await collectMetrics(pool, databaseUrl); 292 | 293 | res.status(200).json({ 294 | success: true, 295 | message: 'Database monitoring connection established successfully', 296 | url: databaseUrl.replace(/:[^:@]+@/, ':****@'), 297 | }); 298 | } finally { 299 | client.release(); 300 | } 301 | } catch (err) { 302 | console.error('Detailed connection error:', err); 303 | dbConnectionCounter.inc({ status: 'failed' }); 304 | if (databaseUrl) { 305 | dbConnectionGauge.set({ datname: 'postgres' }, 0); 306 | } 307 | 308 | let errorMessage = 'Failed to set up database monitoring.'; 309 | if (err instanceof Error) { 310 | errorMessage = getErrorMessage(err); 311 | // Record the specific error in Prometheus 312 | queryErrorCounter.inc({ 313 | query_type: 'connection', 314 | error_type: errorMessage, 315 | }); 316 | } 317 | 318 | return next({ 319 | log: 'Error in setupMonitoring middleware', 320 | status: 500, 321 | message: { err: errorMessage }, 322 | }); 323 | } 324 | }, 325 | 326 | // Modified getMetrics endpoint 327 | getMetrics: async (_req: Request, res: Response): Promise => { 328 | try { 329 | // Set correct content type for Prometheus 330 | res.set('Content-Type', 'text/plain; version=0.0.4'); 331 | 332 | const metrics = await register.metrics(); // Get metrics from the registry 333 | res.end(metrics); 334 | } catch (err) { 335 | console.error('Error collecting metrics:', err); 336 | res.status(500).send('Error collecting metrics'); 337 | } 338 | }, 339 | }; 340 | 341 | export { register }; 342 | export default monitoringController; 343 | -------------------------------------------------------------------------------- /server/controllers/userDatabaseController.ts: -------------------------------------------------------------------------------- 1 | import { NextFunction, Request, RequestHandler, Response } from 'express'; 2 | import pg from 'pg'; 3 | import monitoringController from './monitoringController'; 4 | 5 | // Creating a pool for our app database to save metrics. 6 | const appDbPool = new pg.Pool({ 7 | connectionString: process.env.DATABASE_URL, 8 | }); 9 | 10 | type userDatabaseController = { 11 | fetchUserMetrics: RequestHandler; 12 | saveMetricsToDB: RequestHandler; 13 | getSavedQueries: RequestHandler; 14 | }; 15 | 16 | const userDatabaseController: userDatabaseController = { 17 | fetchUserMetrics: async ( 18 | req: Request, 19 | res: Response, 20 | next: NextFunction 21 | ): Promise => { 22 | const { queryName, uri_string, query } = req.body; 23 | 24 | if (!queryName || !uri_string || !query) { 25 | console.log('Missing query name, uri string, or query.'); 26 | res 27 | .status(400) 28 | .json({ message: 'query name, uri string and query is required.' }); 29 | return; 30 | } 31 | const { Pool } = pg; 32 | try { 33 | console.log('Connecting to users database...'); 34 | const userDBPool = new Pool({ 35 | connectionString: uri_string, 36 | ssl: { 37 | rejectUnauthorized: false, // Required for Supabase connections 38 | }, 39 | }); 40 | 41 | // formatted 42 | const result = await userDBPool.query( 43 | 'EXPLAIN (ANALYZE true, COSTS true, SETTINGS true, BUFFERS true, WAL true, SUMMARY true, FORMAT JSON)' + 44 | `${query}` 45 | ); 46 | 47 | // once done with pool we close connection to save resources. 48 | await userDBPool.end(); 49 | 50 | // used to see full result of JSON 51 | // console.log( 52 | // 'EXPLAIN ANALYZE Result:', 53 | // JSON.stringify(result.rows, null, 2) 54 | // ); 55 | 56 | const queryPlan = result.rows[0]['QUERY PLAN'][0]; 57 | 58 | if (!queryPlan) { 59 | monitoringController.recordQueryMetrics({ 60 | error: 'No query plan retrieved', 61 | }); 62 | res.status(500).json({ message: 'Could not retrieve plan data' }); 63 | return; 64 | } 65 | 66 | // // Log the full result for inspection (debugging purposes) 67 | // console.log( 68 | // 'Settings Field:', 69 | // JSON.stringify(queryPlan['Settings'], null, 2) 70 | // ); 71 | 72 | // debugging 73 | // console.log('Query Plan:', JSON.stringify(queryPlan, null, 2)); 74 | 75 | const sharedHitBlocks = queryPlan['Planning']?.['Shared Hit Blocks'] || 0; 76 | const sharedReadBlocks = 77 | queryPlan['Planning']?.['Shared Read Blocks'] || 0; 78 | const cacheHitRatio = 79 | sharedHitBlocks + sharedReadBlocks > 0 80 | ? (sharedHitBlocks / (sharedHitBlocks + sharedReadBlocks)) * 100 81 | : 0; 82 | 83 | const metrics = { 84 | executionTime: queryPlan['Execution Time'], // This is the execution time in milliseconds 85 | planningTime: queryPlan['Planning Time'], // This is the planning time in milliseconds 86 | rowsReturned: queryPlan['Plan']?.['Actual Rows'], // Rows actually returned 87 | actualLoops: queryPlan['Plan']?.['Actual Loops'], // # of loops in the plan 88 | sharedHitBlocks: queryPlan['Planning']?.['Shared Hit Blocks'], 89 | sharedReadBlocks: queryPlan['Planning']?.['Shared Read Blocks'], 90 | workMem: queryPlan['Settings']?.['work_mem'], 91 | cacheHitRatio: cacheHitRatio, 92 | startupCost: queryPlan['Plan']?.['Startup Cost'], 93 | totalCost: queryPlan['Plan']?.['Total Cost'], 94 | }; 95 | 96 | //Record metrics with prometheus 97 | monitoringController.recordQueryMetrics({ 98 | executionTime: metrics.executionTime, 99 | cacheHitRatio: metrics.cacheHitRatio, 100 | }); 101 | 102 | // console.log('Query Metrics:', metrics); 103 | res.locals.queryMetrics = metrics; 104 | res.locals.queryName = queryName; 105 | res.locals.originalQuery = query; 106 | return next(); 107 | } catch (err) { 108 | monitoringController.recordQueryMetrics({ 109 | error: err instanceof Error ? err.message : 'Unknown query error', 110 | }); 111 | console.error('Error running query', err); 112 | return next({ 113 | log: 'Error in connectDB middleware', 114 | status: 500, 115 | message: { err: 'Failed to get query metrics from database.' }, 116 | }); 117 | } 118 | }, 119 | 120 | // This method will save the metrics into the user's metrics table 121 | saveMetricsToDB: async ( 122 | req: Request, 123 | res: Response, 124 | next: NextFunction 125 | ): Promise => { 126 | console.log('res.locals before check: ', res.locals); 127 | const { queryName, originalQuery, queryMetrics } = res.locals; // Get metrics from previous middleware 128 | const userId = res.locals.userId; 129 | 130 | if (!queryName || !queryMetrics || !userId || !originalQuery) { 131 | res.status(400).json({ 132 | message: 'Query name, Metrics, userId, or query text are missing.', 133 | }); 134 | return; 135 | } 136 | 137 | try { 138 | const queryResult = await appDbPool.query( 139 | 'INSERT INTO queries (query_name, query_text, user_id, created_at) VALUES ($1, $2, $3, NOW()) RETURNING id', 140 | [queryName, originalQuery, userId] 141 | ); 142 | 143 | const queryId = queryResult.rows[0].id; 144 | // Save the metrics into the database 145 | await appDbPool.query( 146 | `INSERT INTO metrics ( 147 | execution_time, 148 | planning_time, 149 | rows_returned, 150 | actual_loops, 151 | shared_hit_blocks, 152 | shared_read_blocks, 153 | work_mem, 154 | cache_hit_ratio, 155 | startup_cost, 156 | total_cost, 157 | query_id, 158 | created_at 159 | ) VALUES ( 160 | $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, NOW() 161 | )`, 162 | [ 163 | parseFloat(queryMetrics.executionTime) || 0, 164 | parseFloat(queryMetrics.planningTime) || 0, 165 | parseInt(queryMetrics.rowsReturned, 10) || 0, 166 | parseInt(queryMetrics.actualLoops, 10) || 0, 167 | parseInt(queryMetrics.sharedHitBlocks, 10) || 0, 168 | parseInt(queryMetrics.sharedReadBlocks, 10) || 0, 169 | parseInt(queryMetrics.workMem, 10) || 0, 170 | parseFloat(queryMetrics.cacheHitRatio) || 0, 171 | parseFloat(queryMetrics.startupCost) || 0, 172 | parseFloat(queryMetrics.totalCost) || 0, 173 | queryId, 174 | ] 175 | ); 176 | // returning queryMetrics to front end 177 | res.status(200).json(queryMetrics); 178 | } catch (err) { 179 | console.error('Error saving metrics', err); 180 | return next({ 181 | log: 'Error in saveMetricsToDB middleware', 182 | status: 500, 183 | message: { err: 'Failed to save metrics to the database.' }, 184 | }); 185 | } 186 | }, 187 | // create getSavedQueries method 188 | getSavedQueries: async ( 189 | req: Request, 190 | res: Response, 191 | next: NextFunction 192 | ): Promise => { 193 | try { 194 | const userId = res.locals.userId; 195 | 196 | if (!userId) { 197 | res.status(400).json({ message: 'User ID is required' }); 198 | return; 199 | } 200 | 201 | // Query to get all saved queries with their metrics for this user 202 | const queryResult = await appDbPool.query( 203 | ` 204 | SELECT 205 | q.id, 206 | q.query_name AS "queryName", 207 | q.query_text AS "queryText", 208 | q.created_at AS "createdAt", 209 | m.execution_time AS "executionTime", 210 | m.planning_time AS "planningTime", 211 | m.rows_returned AS "rowsReturned", 212 | m.actual_loops AS "actualLoops", 213 | m.shared_hit_blocks AS "sharedHitBlocks", 214 | m.shared_read_blocks AS "sharedReadBlocks", 215 | m.work_mem AS "workMem", 216 | m.cache_hit_ratio AS "cacheHitRatio", 217 | m.startup_cost AS "startupCost", 218 | m.total_cost AS "totalCost" 219 | FROM queries q 220 | JOIN metrics m ON q.id = m.query_id 221 | WHERE q.user_id = $1 222 | ORDER BY q.created_at DESC 223 | `, 224 | [userId] 225 | ); 226 | 227 | // Transform results to match frontend expected format 228 | const savedQueries = queryResult.rows.map((row) => ({ 229 | id: row.id, 230 | queryName: row.queryName, 231 | queryText: row.queryText, 232 | createdAt: row.createdAt, 233 | metrics: { 234 | executionTime: parseFloat(row.executionTime), 235 | planningTime: parseFloat(row.planningTime), 236 | rowsReturned: parseInt(row.rowsReturned), 237 | actualLoops: parseInt(row.actualLoops), 238 | sharedHitBlocks: parseInt(row.sharedHitBlocks), 239 | sharedReadBlocks: parseInt(row.sharedReadBlocks), 240 | workMem: parseInt(row.workMem) || 0, 241 | cacheHitRatio: parseFloat(row.cacheHitRatio), 242 | startupCost: parseFloat(row.startupCost), 243 | totalCost: parseFloat(row.totalCost), 244 | }, 245 | })); 246 | 247 | res.status(200).json(savedQueries); 248 | } catch (err) { 249 | console.error('Error fetching saved queries', err); 250 | return next({ 251 | log: 'Error in getSavedQueries middleware', 252 | status: 500, 253 | message: { err: 'Failed to fetch saved queries.' }, 254 | }); 255 | } 256 | }, 257 | }; 258 | 259 | export default userDatabaseController; 260 | -------------------------------------------------------------------------------- /server/db/db.ts: -------------------------------------------------------------------------------- 1 | import 'dotenv/config'; 2 | import pg from 'pg'; 3 | 4 | const { Pool } = pg; 5 | 6 | if (!process.env.DATABASE_URL) { 7 | throw new Error('DATABASE_URL environment variable is not set'); 8 | } 9 | 10 | // Debug line 11 | console.log('Attempting database connection with:', process.env.DATABASE_URL); 12 | 13 | const pool = new Pool({ 14 | connectionString: process.env.DATABASE_URL, 15 | ssl: { 16 | rejectUnauthorized: false 17 | } 18 | }); 19 | 20 | export { pool }; 21 | -------------------------------------------------------------------------------- /server/middleware/authMiddleware.ts: -------------------------------------------------------------------------------- 1 | import { Request, Response, NextFunction } from 'express'; 2 | import OAuthController from '../controllers/OAuthController'; 3 | 4 | export const authenticateUser = ( 5 | req: Request, 6 | res: Response, 7 | next: NextFunction 8 | ): void => { 9 | try { 10 | const authHeader = req.headers.authorization; 11 | if (!authHeader) { 12 | res.status(401).json({ error: 'No authorization header' }); 13 | return; 14 | } 15 | 16 | const token = authHeader.split(' ')[1]; // Bearer 17 | if (!token) { 18 | res.status(401).json({ error: 'No token provided' }); 19 | return; 20 | } 21 | 22 | const decodedToken = OAuthController.validateToken(token); 23 | // const user = OAuthController.validateToken(token); 24 | // res.locals.user = user; // Store user info for route handlers 25 | res.locals.user = decodedToken; 26 | res.locals.userId = decodedToken.userId; 27 | next(); 28 | } catch (error) { 29 | console.error('Authentication error:', error); 30 | res.status(401).json({ error: 'Invalid token' }); 31 | return; 32 | } 33 | }; 34 | -------------------------------------------------------------------------------- /server/models/OAuthModel.ts: -------------------------------------------------------------------------------- 1 | import pkg from 'pg'; 2 | const { Pool } = pkg; 3 | import { GithubUser, DbUser } from '../types/auth.js'; 4 | 5 | // Initialize pool with Supabase configuration 6 | const pool = new Pool({ 7 | connectionString: process.env.DATABASE_URL, 8 | ssl: { 9 | rejectUnauthorized: false, // Required for Supabase connections 10 | }, 11 | }); 12 | 13 | // Add connection error handling 14 | pool.on('error', (err) => { 15 | console.error('Unexpected error on idle client', err); 16 | process.exit(-1); 17 | }); 18 | 19 | export class OAuthModel { 20 | async findOrCreateUser(githubUser: GithubUser): Promise { 21 | console.log( 22 | '🔍 Connection string:', 23 | process.env.DATABASE_URL.replace(/:[^:]*@/, ':****@') 24 | ); 25 | const client = await pool.connect(); 26 | 27 | try { 28 | // Debug database connection 29 | console.log('🔌 Connected to database, running diagnostics...'); 30 | 31 | // Get database name 32 | const dbNameResult = await client.query('SELECT current_database()'); 33 | console.log( 34 | '📊 Current database:', 35 | dbNameResult.rows[0].current_database 36 | ); 37 | 38 | // Get schema search path 39 | const schemaResult = await client.query('SHOW search_path'); 40 | console.log('🔍 Search path:', schemaResult.rows[0].search_path); 41 | 42 | // Check if table exists 43 | const tableCheckResult = await client.query(` 44 | SELECT EXISTS ( 45 | SELECT 1 46 | FROM information_schema.tables 47 | WHERE table_schema = 'public' 48 | AND table_name = 'user_account' 49 | ); 50 | `); 51 | console.log( 52 | '📋 Does user_account table exist?', 53 | tableCheckResult.rows[0].exists 54 | ); 55 | 56 | // Get current user 57 | const userResult = await client.query('SELECT current_user'); 58 | console.log('👤 Connected as user:', userResult.rows[0].current_user); 59 | 60 | // List tables in public schema 61 | const tablesResult = await client.query(` 62 | SELECT table_name 63 | FROM information_schema.tables 64 | WHERE table_schema = 'public' 65 | `); 66 | console.log( 67 | '📑 Tables in public schema:', 68 | tablesResult.rows.map((r) => r.table_name) 69 | ); 70 | 71 | await client.query('BEGIN'); 72 | 73 | // First check if we have a user in the auth.users table with this GitHub email 74 | let authUserId = null; 75 | const sameEmailResult = await client.query( 76 | 'SELECT id FROM auth.users WHERE email = $1', 77 | [githubUser.email] 78 | ); 79 | 80 | if (sameEmailResult.rows.length > 0) { 81 | authUserId = sameEmailResult.rows[0].id; 82 | console.log('✅ Found existing auth user with same email:', authUserId); 83 | } else { 84 | // Create a new user in auth.users if none exists 85 | console.log( 86 | '🆕 Creating new auth user for GitHub user:', 87 | githubUser.login 88 | ); 89 | 90 | const insertAuthUserResult = await client.query( 91 | `INSERT INTO auth.users ( 92 | instance_id, 93 | id, 94 | aud, 95 | role, 96 | email, 97 | encrypted_password, 98 | email_confirmed_at, 99 | recovery_sent_at, 100 | last_sign_in_at, 101 | raw_app_meta_data, 102 | raw_user_meta_data, 103 | created_at, 104 | updated_at, 105 | confirmation_token, 106 | email_change, 107 | email_change_token_new, 108 | recovery_token 109 | ) 110 | VALUES ( 111 | '00000000-0000-0000-0000-000000000000', 112 | gen_random_uuid(), 113 | 'authenticated', 114 | 'authenticated', 115 | $1, 116 | '', 117 | NOW(), 118 | null, 119 | NOW(), 120 | '{"provider": "github", "providers": ["github"]}', 121 | $2, 122 | NOW(), 123 | NOW(), 124 | '', 125 | '', 126 | '', 127 | '' 128 | ) 129 | RETURNING id`, 130 | [ 131 | githubUser.email, 132 | JSON.stringify({ 133 | name: githubUser.name, 134 | avatar_url: githubUser.avatar_url, 135 | github_id: githubUser.id.toString(), 136 | }), 137 | ] 138 | ); 139 | 140 | authUserId = insertAuthUserResult.rows[0].id; 141 | console.log('✅ Created new auth user with ID:', authUserId); 142 | } 143 | 144 | // Check if we have a user in user_account 145 | console.log( 146 | '🔍 Attempting to find user with GitHub ID:', 147 | githubUser.id.toString() 148 | ); 149 | const findResult = await client.query( 150 | 'SELECT * FROM user_account WHERE github_id = $1 OR email = $2', 151 | [githubUser.id.toString(), githubUser.email] 152 | ); 153 | console.log('✅ Find result:', findResult.rows); 154 | 155 | if (findResult.rows[0]) { 156 | // Update existing user's information 157 | console.log('✅ Found existing user, updating...'); 158 | const updateResult = await client.query( 159 | `UPDATE user_account 160 | SET username = $1, 161 | first_name = $2, 162 | last_name = $3, 163 | github_id = $4, 164 | email = COALESCE($5, email), 165 | user_id = $6 166 | WHERE id = $7 167 | RETURNING id, username, email, first_name, last_name, github_id, user_id`, 168 | [ 169 | githubUser.login, 170 | githubUser.name?.split(' ')[0] || '', // First name 171 | githubUser.name?.split(' ').slice(1).join(' ') || '', // Last name 172 | githubUser.id.toString(), 173 | githubUser.email, 174 | authUserId, 175 | findResult.rows[0].id, 176 | ] 177 | ); 178 | 179 | await client.query('COMMIT'); 180 | return updateResult.rows[0]; 181 | } 182 | 183 | // Create new user if not found 184 | const insertResult = await client.query( 185 | `INSERT INTO user_account ( 186 | username, 187 | email, 188 | password, 189 | first_name, 190 | last_name, 191 | github_id, 192 | created_at, 193 | user_id 194 | ) 195 | VALUES ($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP, $7) 196 | RETURNING id, username, email, first_name, last_name, github_id, user_id`, 197 | [ 198 | githubUser.login, 199 | githubUser.email, 200 | 'github-oauth', 201 | githubUser.name?.split(' ')[0] || '', 202 | githubUser.name?.split(' ').slice(1).join(' ') || '', 203 | githubUser.id.toString(), 204 | authUserId, 205 | ] 206 | ); 207 | 208 | await client.query('COMMIT'); 209 | return insertResult.rows[0]; 210 | } catch (error) { 211 | await client.query('ROLLBACK'); 212 | console.error('Database error:', error); 213 | throw new Error('Failed to create or update user'); 214 | } finally { 215 | client.release(); 216 | } 217 | } 218 | 219 | async findUserById(id: number): Promise { 220 | try { 221 | const result = await pool.query( 222 | 'SELECT id, username, email, first_name, last_name, github_id, user_id FROM user_account WHERE id = $1', 223 | [id] 224 | ); 225 | return result.rows[0] || null; 226 | } catch (error) { 227 | console.error('Database error:', error); 228 | throw new Error('Failed to find user'); 229 | } 230 | } 231 | } 232 | 233 | export default new OAuthModel(); 234 | -------------------------------------------------------------------------------- /server/routes/apiRoutes.ts: -------------------------------------------------------------------------------- 1 | //handles both auth and protected routes 2 | import express, { Request, Response, NextFunction } from 'express'; 3 | import userDatabaseController from '../controllers/userDatabaseController'; 4 | import monitoringController from '../controllers/monitoringController'; 5 | import { authenticateUser } from '../middleware/authMiddleware'; 6 | import OAuthController from '../controllers/OAuthController'; 7 | import { 8 | setDatabaseUriToPostgresExporter, 9 | cleanupExporter, 10 | } from '../utils/dockerPostgresExporter'; 11 | const router = express.Router(); 12 | 13 | // ===== Auth Routes (public) ===== 14 | router.post('/auth/github/callback', (req: Request, res: Response): void => { 15 | OAuthController.handleCallback(req, res); 16 | }); 17 | 18 | // Get current user 19 | router.get( 20 | '/auth/me', 21 | authenticateUser, 22 | async (req: Request, res: Response, next: NextFunction): Promise => { 23 | try { 24 | const userId = res.locals.userId; 25 | const user = await OAuthController.getCurrentUser(userId); 26 | res.status(200).json({ user }); 27 | } catch (error) { 28 | next(error); 29 | } 30 | } 31 | ); 32 | 33 | // Logout endpoint 34 | router.post( 35 | '/auth/logout', 36 | authenticateUser, 37 | (req: Request, res: Response): void => { 38 | try { 39 | res.status(200).json({ message: 'Logged out successfully' }); 40 | } catch (error) { 41 | res.status(500).json({ error: 'Logout failed' }); 42 | } 43 | } 44 | ); 45 | 46 | // Add monitoring routes 47 | router.post('/connect', authenticateUser, monitoringController.setupMonitoring); 48 | 49 | // Add the metrics endpoint 50 | router.get('/metrics', monitoringController.getMetrics); 51 | 52 | // ===== Protected API Routes ===== 53 | router.post( 54 | '/query-metrics', 55 | authenticateUser, // Add authentication middleware 56 | userDatabaseController.fetchUserMetrics, 57 | userDatabaseController.saveMetricsToDB 58 | ); 59 | 60 | // Add the route to get saved queries 61 | router.get( 62 | '/saved-queries', 63 | authenticateUser, 64 | userDatabaseController.getSavedQueries 65 | ); 66 | 67 | //Docker exporter routes 68 | router.post( 69 | '/monitoring/start', 70 | authenticateUser, 71 | async (req: Request, res: Response, next: NextFunction): Promise => { 72 | try { 73 | const userId = res.locals.userId; 74 | const { uri_string, port } = req.body; 75 | 76 | if (!uri_string) { 77 | res.status(400).json({ 78 | success: false, 79 | message: 'Missing required field: uri_string is required', 80 | }); 81 | return; 82 | } 83 | 84 | const result = await setDatabaseUriToPostgresExporter({ 85 | userId: userId.toString(), 86 | uri_string, 87 | port, 88 | }); 89 | 90 | res.status(200).json(result); 91 | } catch (error) { 92 | console.error('Error starting monitoring:', error); 93 | next({ 94 | log: 'Error in setupExporter middleware', 95 | status: 500, 96 | message: { 97 | err: 98 | error instanceof Error 99 | ? error.message 100 | : 'Failed to start monitoring', 101 | }, 102 | }); 103 | } 104 | } 105 | ); 106 | 107 | router.post( 108 | '/monitoring/stop', 109 | authenticateUser, 110 | async (req: Request, res: Response, next: NextFunction): Promise => { 111 | try { 112 | const userId = res.locals.userId; 113 | await cleanupExporter(userId.toString()); 114 | 115 | res.status(200).json({ 116 | success: true, 117 | message: 'Monitoring stopped successfully', 118 | }); 119 | } catch (error) { 120 | console.error('Error stopping monitoring:', error); 121 | next({ 122 | log: 'Error in cleanupExporter middleware', 123 | status: 500, 124 | message: { 125 | err: 126 | error instanceof Error 127 | ? error.message 128 | : 'Failed to stop monitoring', 129 | }, 130 | }); 131 | } 132 | } 133 | ); 134 | 135 | export default router; 136 | -------------------------------------------------------------------------------- /server/server.ts: -------------------------------------------------------------------------------- 1 | import './tracing'; 2 | import app from './app.ts'; 3 | 4 | const PORT = process.env.PORT || 4002; 5 | 6 | app.listen(PORT, () => { 7 | console.log(`Server running on port ${PORT}`); 8 | }); 9 | -------------------------------------------------------------------------------- /server/tests/integrations/postgres-exporter.test.ts: -------------------------------------------------------------------------------- 1 | await setDatabaseUriToPostgresExporter({ 2 | userId: 'test-user-1', 3 | uri_string: 4 | 'postgresql://testuser:testpass123@test_user_db:5432/testdb?sslmode=disable', 5 | }); 6 | -------------------------------------------------------------------------------- /server/tracing.ts: -------------------------------------------------------------------------------- 1 | // Import core OpenTelemetry packages 2 | import { NodeSDK } from '@opentelemetry/sdk-node'; // Main SDK for Node.js applications 3 | import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; // Automatic instrumentation for Node.js libraries 4 | import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http'; // Exports traces to your collector (Jaeger in our case) 5 | import { Resource } from '@opentelemetry/resources'; // Adds context/metadata to your traces 6 | import { 7 | ATTR_SERVICE_NAME, 8 | ATTR_SERVICE_VERSION, 9 | } from '@opentelemetry/semantic-conventions'; // Standard naming for resource attributes 10 | import { SimpleSpanProcessor } from '@opentelemetry/sdk-trace-base'; // Processes and exports spans as they are ended 11 | 12 | // Initialize the OpenTelemetry SDK 13 | const sdk = new NodeSDK({ 14 | // Resource: Identifies your application in the traces 15 | resource: new Resource({ 16 | // SERVICE_NAME: How your app will appear in Jaeger UI 17 | [ATTR_SERVICE_NAME]: 'sql-optimizer', 18 | // SERVICE_VERSION: Helps track which version generated the traces 19 | [ATTR_SERVICE_VERSION]: '1.0.0', 20 | // Custom attribute to distinguish development from production 21 | environment: 'development', 22 | }), 23 | 24 | // Trace Exporter: Configures where to send the traces 25 | // In this case, sending to big Docker file, specifically to otel-collector service which represents opentelemetry 26 | traceExporter: new OTLPTraceExporter({ 27 | url: 'http://otel-collector:4318/v1/traces', // Points to Docker service 28 | }), 29 | 30 | // Span Processor: Handles each span (trace segment) as it's completed 31 | // SimpleSpanProcessor: Exports spans immediately (good for development) 32 | // For production, consider BatchSpanProcessor instead 33 | spanProcessor: new SimpleSpanProcessor(new OTLPTraceExporter()), 34 | 35 | // Auto-instrumentations: Automatically traces common Node.js libraries 36 | instrumentations: [ 37 | getNodeAutoInstrumentations({ 38 | // Enable Express instrumentation to track: 39 | // - Route handling 40 | // - Middleware execution 41 | // - Response time 42 | '@opentelemetry/instrumentation-express': { 43 | enabled: true, 44 | }, 45 | // Enable HTTP instrumentation to track: 46 | // - Incoming requests 47 | // - Outgoing requests 48 | // - Response status 49 | '@opentelemetry/instrumentation-http': { 50 | enabled: true, 51 | }, 52 | }), 53 | ], 54 | }); 55 | 56 | // Start the SDK and log status 57 | sdk.start(); 58 | console.log('Tracing initialized'); 59 | 60 | // shutdown handler 61 | // ensures all pending traces are exported before app exits 62 | process.on('SIGTERM', () => { 63 | sdk 64 | .shutdown() 65 | .then(() => console.log('Tracing terminated')) 66 | .catch((error) => console.log('Error terminating tracing', error)) 67 | .finally(() => process.exit(0)); 68 | }); 69 | export default sdk; 70 | 71 | //OpenTelemetry collects 3 types of monitoring data: 72 | //1. Traces - following requests through the system 73 | //2. Metrics - numbers about how your system is performing 74 | //3.Logs - detailed records of what happened 75 | //Example: 76 | // const sdk = new NodeSDK({ 77 | // // Who am I? - Identifies your application 78 | // resource: new Resource({...}), 79 | 80 | // // Where to send traces? - Points to Jaeger 81 | // traceExporter: new OTLPTraceExporter({...}), 82 | 83 | // // How to process traces? - Handles each piece of trace data 84 | // spanProcessor: new SimpleSpanProcessor(...), 85 | 86 | // // What to trace automatically? - Sets up automatic tracking 87 | // instrumentations: [...] 88 | // }); 89 | 90 | //the flow is: 91 | // Your App (tracing.ts) 92 | // → OpenTelemetry Collector (otel-config.yml) 93 | // → Jaeger (for traces) 94 | // → Prometheus (for metrics) 95 | -------------------------------------------------------------------------------- /server/types/auth.ts: -------------------------------------------------------------------------------- 1 | // server/types/auth.ts 2 | 3 | // export interface GithubTokenResponse { 4 | // access_token: string; 5 | // error?: string; 6 | // error_description?: string; 7 | // } 8 | 9 | // export interface GithubUser { 10 | // id: number; 11 | // login: string; 12 | // email: string; 13 | // name: string; 14 | // avatar_url: string; 15 | // } 16 | 17 | // export interface DbUser { 18 | // id: number; 19 | // username: string; 20 | // email: string; 21 | // first_name: string; 22 | // last_name: string; 23 | // github_id?: string; 24 | // user_id?: string; 25 | // } 26 | 27 | // export interface AuthenticatedUser { 28 | // id: number; 29 | // username: string; 30 | // email: string; 31 | // name: string; 32 | // avatarUrl: string | null; 33 | // } 34 | 35 | export interface GithubTokenResponse { 36 | access_token?: string; 37 | token_type?: string; 38 | scope?: string; 39 | error?: string; 40 | error_description?: string; 41 | } 42 | 43 | export interface GithubUser { 44 | id: number; 45 | login: string; 46 | name: string | null; 47 | email: string | null; 48 | avatar_url: string; 49 | [key: string]: any; // For any other properties GitHub might return 50 | } 51 | 52 | export interface AuthenticatedUser { 53 | id: number; 54 | username: string; 55 | email: string | null; 56 | name: string | null; 57 | avatarUrl: string | null; 58 | } 59 | 60 | export interface DbUser { 61 | id: number; 62 | username: string; 63 | email: string | null; 64 | first_name: string | null; 65 | last_name: string | null; 66 | github_id: string; 67 | user_id: string; 68 | [key: string]: any; // For any other fields from the database 69 | } 70 | -------------------------------------------------------------------------------- /server/utils/dockerPostgresExporter.ts: -------------------------------------------------------------------------------- 1 | import Docker from 'dockerode'; 2 | import fs from 'fs/promises'; 3 | import path from 'path'; 4 | import * as yaml from 'js-yaml'; 5 | 6 | const docker = new Docker(); 7 | const NETWORK_NAME = 'queryhawk_monitoring_network'; 8 | 9 | interface ExporterConfig { 10 | userId: string; 11 | uri_string: string; 12 | port?: number; 13 | } 14 | 15 | // Helper function to check if directory is writable 16 | async function isDirectoryWritable(dir: string): Promise { 17 | try { 18 | const testFile = path.join(dir, '.write-test'); 19 | await fs.writeFile(testFile, ''); 20 | await fs.unlink(testFile); 21 | return true; 22 | } catch { 23 | return false; 24 | } 25 | } 26 | export const setDatabaseUriToPostgresExporter = async ({ 27 | userId, 28 | uri_string, 29 | port, 30 | }: ExporterConfig) => { 31 | const containerName = `postgres-exporter-${userId}`; 32 | const hostPort = port || (await findAvailablePort(9187, 9999)); 33 | const targetDir = '/var/prometheus/postgres_targets'; 34 | 35 | // Verify directory access 36 | if (!(await isDirectoryWritable(targetDir))) { 37 | throw new Error( 38 | `Directory ${targetDir} is not writable by the backend service` 39 | ); 40 | } 41 | 42 | // checking if network eists before creating any containers 43 | try { 44 | await docker.getNetwork('queryhawk_monitoring_network').inspect(); 45 | } catch (err) { 46 | console.error('Network not found:', err); 47 | throw new Error( 48 | 'Required Docker network not found: queryhawk_monitoring_network' 49 | ); 50 | } 51 | 52 | try { 53 | // Check if container already exists 54 | const existingContainer = docker.getContainer(containerName); 55 | try { 56 | const containerInfo = await existingContainer.inspect(); 57 | if (containerInfo.State.Running) { 58 | console.log( 59 | `Container ${containerName} already running, stopping first` 60 | ); 61 | await existingContainer.stop(); 62 | } 63 | await existingContainer.remove(); 64 | } catch (e) { 65 | // Container doesn't exist, which is fine 66 | // Log the error when inspecting/removing the container 67 | console.error( 68 | `Error inspecting or removing container ${containerName}:`, 69 | e 70 | ); 71 | } 72 | 73 | // Create new container 74 | const container = await docker.createContainer({ 75 | Image: 'prometheuscommunity/postgres-exporter', 76 | name: containerName, 77 | Env: [`DATA_SOURCE_NAME=${uri_string}`], 78 | ExposedPorts: { 79 | '9187/tcp': {}, 80 | }, 81 | HostConfig: { 82 | PortBindings: { 83 | '9187/tcp': [{ HostPort: hostPort.toString() }], 84 | }, 85 | RestartPolicy: { 86 | Name: 'always', 87 | }, 88 | NetworkMode: 'queryhawk_monitoring_network', 89 | }, 90 | Labels: { 91 | 'user.id': userId, 92 | 'exporter.type': 'postgres', 93 | 'com.docker.compose.project': 'queryhawk', //for prometheus discover 94 | 'com.docker.compose.service': 'postgres_exporter', 95 | }, 96 | }); 97 | 98 | await container.start(); 99 | 100 | const yamlContent = `- targets: 101 | - "postgres-exporter-${userId}:9187" 102 | labels: 103 | user_id: "${userId}" 104 | instance: "postgres-exporter-${userId}" 105 | `; 106 | 107 | // Ensure target directory exists 108 | await fs.mkdir(targetDir, { recursive: true }); 109 | 110 | console.log('YAML Content (with visible whitespace):'); 111 | console.log( 112 | yamlContent 113 | .split('\n') 114 | .map((line) => `"${line}"`) 115 | .join('\n') 116 | ); 117 | // Write the YAML content to the file 118 | await fs.writeFile(path.join(targetDir, `${userId}.yml`), yamlContent); 119 | 120 | // console.log('Generated YAML:', yamlContent); 121 | const writtenContent = await fs.readFile( 122 | path.join(targetDir, `${userId}.yml`), 123 | 'utf8' 124 | ); 125 | console.log('Written Content (with visible whitespace):'); 126 | console.log( 127 | writtenContent 128 | .split('\n') 129 | .map((line) => `"${line}"`) 130 | .join('\n') 131 | ); 132 | 133 | // Trigger Prometheus configuration reload 134 | try { 135 | // 'http://queryhawk-prometheus:9090/-/reload' 136 | // const response = await fetch('http://prometheus:9090/-/reload', { 137 | const response = await fetch( 138 | 'http://queryhawk-prometheus:9090/-/reload', 139 | { 140 | method: 'POST', 141 | } 142 | ); 143 | if (!response.ok) { 144 | console.warn( 145 | 'Prometheus reload returned non-200 status:', 146 | response.status 147 | ); 148 | } 149 | console.log('Prometheus reload successful'); 150 | } catch (error) { 151 | console.warn('Failed to reload Prometheus config:', error); 152 | // Don't fail the operation if Prometheus reload fails 153 | } 154 | 155 | return { 156 | containerId: container.id, 157 | port: hostPort, 158 | name: containerName, 159 | }; 160 | } catch (err) { 161 | console.error('Error managing Postgres Exporter:', err); 162 | 163 | if (err?.json?.message?.includes('No such image')) { 164 | try { 165 | console.log('Pulling postgres-exporter image...'); 166 | await docker.pull('prometheuscommunity/postgres-exporter:latest'); 167 | // Retry container creation 168 | return setDatabaseUriToPostgresExporter({ userId, uri_string }); 169 | } catch (pullError) { 170 | throw new Error(`Failed to pull image: ${pullError.message}`); 171 | } 172 | } 173 | throw err; 174 | } 175 | }; 176 | 177 | // Helper function to find an available port 178 | async function findAvailablePort(start: number, end: number): Promise { 179 | const containers = await docker.listContainers(); 180 | const usedPorts = new Set( 181 | containers.flatMap((container) => 182 | Object.values(container.Ports) 183 | .filter((port) => port.PublicPort) 184 | .map((port) => port.PublicPort) 185 | ) 186 | ); 187 | 188 | for (let port = start; port <= end; port++) { 189 | if (!usedPorts.has(port)) { 190 | return port; 191 | } 192 | } 193 | throw new Error('No available ports found in range'); 194 | } 195 | 196 | // Cleanup function for when monitoring is stopped 197 | export const cleanupExporter = async (userId: string) => { 198 | const containerName = `postgres-exporter-${userId}`; 199 | try { 200 | // Stop and remove container 201 | const container = docker.getContainer(containerName); 202 | await container.stop(); 203 | await container.remove(); 204 | 205 | // Remove Prometheus target configuration 206 | await fs.unlink(`/var/prometheus/postgres_targets/${userId}.yml`); 207 | // await fs.unlink(`/etc/prometheus/postgres_targets/${userId}.yml`); 208 | 209 | // Trigger Prometheus reload 210 | await fetch('http://queryhawk-prometheus:9090/-/reload', { 211 | method: 'POST', 212 | }); 213 | } catch (error) { 214 | console.error(`Error cleaning up exporter for user ${userId}:`, error); 215 | throw error; 216 | } 217 | }; -------------------------------------------------------------------------------- /src/App.tsx: -------------------------------------------------------------------------------- 1 | import QueryMonitor from './components/QueryMonitor'; 2 | import TestQueryPage from './components/QueryPerformance/TestQueryPage'; 3 | import AuthPage from './components/QueryMonitor/AuthPage'; 4 | import AuthCallback from './components/QueryMonitor/AuthCallback'; 5 | import ProtectedRoute from './components/ProtectedRoute'; 6 | import { Route, Routes } from 'react-router-dom'; 7 | 8 | function App() { 9 | return ( 10 |
11 | 12 | {/* Auth routes */} 13 | } /> 14 | } /> 15 | {/* Protected routes */} 16 | 20 | 21 | 22 | } 23 | /> 24 | 28 | 29 | 30 | } 31 | /> 32 | 33 |
34 | ); 35 | } 36 | 37 | export default App; 38 | -------------------------------------------------------------------------------- /src/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22 AS builder 2 | 3 | WORKDIR /app 4 | 5 | # Copy package files from project root 6 | COPY package*.json ./ 7 | COPY vite.config.ts ./ 8 | COPY tsconfig.json ./ 9 | 10 | RUN npm install 11 | 12 | # Copy source code into src directory 13 | COPY . . 14 | 15 | EXPOSE 5173 16 | 17 | CMD ["npm", "run", "dev", "--", "--host"] 18 | -------------------------------------------------------------------------------- /src/components/ProtectedRoute.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Navigate } from 'react-router-dom'; 3 | 4 | interface ProtectedRouteProps { 5 | children: React.ReactNode; // The children to render if the user is authenticated 6 | } 7 | 8 | const ProtectedRoute: React.FC = ({ children }) => { 9 | const token = localStorage.getItem('authToken'); 10 | 11 | if (!token) { 12 | return ; 13 | } 14 | 15 | return <>{children}; 16 | }; 17 | 18 | export default ProtectedRoute; -------------------------------------------------------------------------------- /src/components/QueryMonitor/AuthCallback.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useState } from "react"; 2 | import { useNavigate, useLocation } from "react-router-dom"; 3 | import { 4 | Box, 5 | Container, 6 | Paper, 7 | Typography, 8 | Button, 9 | CircularProgress, 10 | CssBaseline, 11 | createTheme, 12 | ThemeProvider, 13 | } from "@mui/material"; 14 | import Alert from "@mui/material/Alert"; 15 | import AlertTitle from "@mui/material/AlertTitle"; 16 | 17 | // Create the dark theme matching the one from QueryMonitor 18 | const darkTheme = createTheme({ 19 | palette: { 20 | mode: "dark", 21 | primary: { 22 | main: "#a594fd", 23 | }, 24 | secondary: { 25 | main: "#ff4081", 26 | }, 27 | background: { 28 | // default: '#000000', 29 | // paper: '#181b1f', 30 | }, 31 | }, 32 | }); 33 | 34 | const AuthCallback: React.FC = () => { 35 | const [error, setError] = useState(""); 36 | const [isLoading, setIsLoading] = useState(true); 37 | const navigate = useNavigate(); 38 | const location = useLocation(); 39 | 40 | useEffect(() => { 41 | const handleCallback = async () => { 42 | try { 43 | const params = new URLSearchParams(location.search); 44 | const code = params.get("code"); 45 | console.log("📟 Received GitHub code:", code); 46 | console.log( 47 | "🕰️ Received GitHub code:", 48 | code, 49 | "at:", 50 | new Date().toISOString() 51 | ); 52 | 53 | if (!code) { 54 | throw new Error("No authorization code received from GitHub"); 55 | } 56 | 57 | // Make request to your backend 58 | const response = await fetch( 59 | "http://localhost:4002/api/auth/github/callback", 60 | { 61 | method: "POST", 62 | headers: { 63 | "Content-Type": "application/json", 64 | }, 65 | body: JSON.stringify({ code, provider: "github" }), 66 | credentials: "include", 67 | } 68 | ); 69 | 70 | if (!response.ok) { 71 | const errorData = await response.text(); 72 | console.error("Server error:", errorData); 73 | throw new Error(`Server error: ${response.status}`); 74 | } 75 | 76 | const data = await response.json(); 77 | console.log("Auth success, received token"); 78 | 79 | if (data.token) { 80 | localStorage.setItem("authToken", data.token); 81 | if (data.user) { 82 | localStorage.setItem("user", JSON.stringify(data.user)); 83 | } 84 | navigate("/", { replace: true }); 85 | } else { 86 | throw new Error("No authentication token received"); 87 | } 88 | } catch (err) { 89 | console.error("Authentication error:", err); 90 | setError(err instanceof Error ? err.message : "Failed to authenticate"); 91 | setIsLoading(false); 92 | } 93 | }; 94 | 95 | handleCallback(); 96 | }, [navigate, location]); 97 | 98 | if (error) { 99 | return ( 100 | 101 | 102 | 110 | 111 | 123 | 124 | Authentication Failed 125 | {error} 126 | 127 | 128 | 145 | 146 | 147 | 148 | 149 | ); 150 | } 151 | 152 | return ( 153 | 154 | 155 | 164 | 175 | 176 | 177 | Authenticating with GitHub... 178 | 179 | 180 | 181 | 182 | ); 183 | }; 184 | 185 | export default AuthCallback; 186 | -------------------------------------------------------------------------------- /src/components/QueryMonitor/AuthPage.tsx: -------------------------------------------------------------------------------- 1 | // src/components/QueryMonitor/AuthPage.tsx 2 | import React, { useState } from 'react'; 3 | import { useNavigate } from 'react-router-dom'; 4 | import { ThemeProvider, createTheme } from '@mui/material/styles'; 5 | import { 6 | Box, 7 | Button, 8 | Container, 9 | Paper, 10 | Typography, 11 | } from '@mui/material'; 12 | import { GitHub as GitHubIcon } from '@mui/icons-material'; 13 | import logo from '../assets/logo_queryhawk.svg'; 14 | 15 | // Create dark theme 16 | const darkTheme = createTheme({ 17 | palette: { 18 | mode: 'dark', 19 | primary: { 20 | main: '#9d7fff', 21 | }, 22 | secondary: { 23 | main: '#FFB4E1', 24 | }, 25 | background: { 26 | default: '#0A0A0F', 27 | paper: '#16121F', 28 | }, 29 | }, 30 | typography: { 31 | fontFamily: '"Pacifico", sans-serif', 32 | }, 33 | components: { 34 | MuiButton: { 35 | styleOverrides: { 36 | root: { 37 | borderRadius: 12, 38 | textTransform: 'none', 39 | }, 40 | }, 41 | }, 42 | MuiPaper: { 43 | styleOverrides: { 44 | root: { 45 | borderRadius: 16, 46 | }, 47 | }, 48 | }, 49 | }, 50 | }); 51 | 52 | 53 | const AuthPage: React.FC = () => { 54 | const navigate = useNavigate(); 55 | 56 | const handleGitHubLogin = () => { 57 | const githubClientId = import.meta.env.VITE_GITHUB_CLIENT_ID; 58 | const redirectUri = encodeURIComponent(window.location.origin + '/auth/github/callback'); 59 | window.location.href = `https://github.com/login/oauth/authorize?client_id=${githubClientId}&redirect_uri=${redirectUri}&scope=user:email`; 60 | }; 61 | 62 | return ( 63 | 64 | 72 | 73 | 82 | {/* Logo and Title */} 83 | 84 | 90 | 96 | QueryHawk 97 | 98 | 99 | 100 | {/* GitHub OAuth Button */} 101 | 110 | 111 | 112 | 113 | 114 | ); 115 | }; 116 | 117 | export default AuthPage; -------------------------------------------------------------------------------- /src/components/QueryMonitor/DatabaseHealthMetrics.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import { 3 | Grid, 4 | Typography, 5 | Card, 6 | CardHeader, 7 | CardContent, 8 | Box, 9 | Skeleton, 10 | } from '@mui/material'; 11 | import { Storage as DatabaseIcon } from '@mui/icons-material'; 12 | 13 | //this interface defines a health metric item 14 | //unit and formatter are optional. formatter formats ensures the metric is formatted currently with its unit 15 | interface MetricItem { 16 | name: string; 17 | query: string; 18 | unit?: string; 19 | formatter?: (value: number) => string; 20 | } 21 | 22 | interface DatabaseHealthMetricsProps { 23 | prometheusUrl: string; 24 | refreshInterval?: number; 25 | } 26 | 27 | //Huge function to display database health metrics fetched from prometheus server 28 | //the first curly brace creates a react component that accepts prometheusurl and refreshinterval as props 29 | //React.FC is function component in typescript...so this little section essentially creates component and say what props it takes 30 | //The next section sets 3 states variables (metrics, loading, error) 31 | //metrics state variable wukk stire the fetched metrics. 32 | //The type annotation record means metrics will be an object. Then the same line specifies that the key will be a string and the value can be either a string or number 33 | //Next, metricItems defines an array of four metrics to fetch. query here is a prometheus query string 34 | //fetchMetrics function actually fetches the four metrics. it does so in parallel with promise.all 35 | const DatabaseHealthMetrics: React.FC = ({ 36 | prometheusUrl = 'http://localhost:9090', 37 | refreshInterval = 30000, 38 | }) => { 39 | const [metrics, setMetrics] = useState>({}); 40 | const [loading, setLoading] = useState(true); 41 | const [error, setError] = useState(null); 42 | 43 | // Define the metrics to fetch 44 | const metricItems: MetricItem[] = [ 45 | { 46 | name: 'Active Connections', 47 | query: 'sum(pg_stat_activity_count{datname!~"template.*|postgres"})', 48 | }, 49 | { 50 | name: 'Cache Hit Ratio', 51 | query: 52 | 'sum(pg_stat_database_blks_hit) / (sum(pg_stat_database_blks_hit) + sum(pg_stat_database_blks_read)) * 100', 53 | unit: '%', 54 | formatter: (value) => value.toFixed(2), 55 | }, 56 | { 57 | name: 'Number of Deadlocks', 58 | query: 'sum(increase(pg_stat_database_deadlocks[1h]))', 59 | }, 60 | { 61 | name: 'Disk I/O Operations', 62 | query: 63 | 'sum(rate(pg_stat_database_blks_read[5m]) + rate(pg_stat_database_blks_written[5m]))', 64 | unit: 'ops/s', 65 | formatter: (value) => value.toFixed(2), 66 | }, 67 | ]; 68 | 69 | const fetchMetrics = async () => { 70 | setLoading(true); 71 | setError(null); 72 | 73 | try { 74 | //creates empty object to put results, w typescript ensuring key is string and value is string/number 75 | const results: Record = {}; 76 | 77 | // Uses map() to create an array of promises (one for each metric) 78 | // Promise.all() runs all these requests in parallel 79 | // await makes the function wait until all requests complete 80 | await Promise.all( 81 | metricItems.map(async (metric) => { 82 | const url = new URL(`${prometheusUrl}/api/v1/query`); 83 | url.searchParams.append('query', metric.query); 84 | 85 | const response = await fetch(url.toString()); 86 | 87 | if (!response.ok) { 88 | throw new Error(`HTTP error! Status: ${response.status}`); 89 | } 90 | 91 | const data = await response.json(); 92 | //process and format 93 | if (data.status === 'success' && data.data.result.length > 0) { 94 | let value = parseFloat(data.data.result[0].value[1]); 95 | 96 | if (metric.formatter) { 97 | results[metric.name] = metric.formatter(value); 98 | } else { 99 | results[metric.name] = Math.round(value); 100 | } 101 | 102 | if (metric.unit) { 103 | results[metric.name] = `${results[metric.name]}${metric.unit}`; 104 | } 105 | } else { 106 | results[metric.name] = 'N/A'; 107 | } 108 | }) 109 | ); 110 | //update component state with fethed metrics 111 | setMetrics(results); 112 | } catch (err) { 113 | console.error('Error fetching metrics:', err); 114 | setError('Failed to fetch metrics from Prometheus'); 115 | } finally { 116 | setLoading(false); 117 | } 118 | }; 119 | 120 | useEffect(() => { 121 | fetchMetrics(); 122 | 123 | // Set up polling for refreshing metrics 124 | const intervalId = setInterval(fetchMetrics, refreshInterval); 125 | 126 | return () => clearInterval(intervalId); 127 | }, [prometheusUrl, refreshInterval]); 128 | 129 | //creates a visual card layout for showing database metrics. 130 | // The outer structure: 131 | // creates a container with border 132 | // shows a title "Database Health Metrics" and a database icon 133 | // holds all the metric displays 134 | 135 | // The metrics display loops through each metric, 136 | //creates a row for each metric with name : value, value being either the mtric result, 'loading, or 'error' 137 | 138 | // The metric name (like "Active Connections") in a subtitle style 139 | // If loading: Shows a skeleton loading animation 140 | // If loaded: Shows the actual value in large, bold text 141 | // If no value: Shows "N/A" 142 | return ( 143 | 144 | 145 |

Database Health

146 |
147 | 148 |
149 | {metricItems.map((metric) => ( 150 |
151 | {metric.name}: 152 | 153 | {loading ? 'Loading...' : metrics[metric.name] || 'N/A'} 154 | 155 |
156 | ))} 157 |
158 | {error &&
{error}
} 159 |
160 |
161 | ); 162 | }; 163 | 164 | export default DatabaseHealthMetrics; 165 | -------------------------------------------------------------------------------- /src/components/QueryMonitor/GrafanaDashboard.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useCallback } from 'react'; 2 | import { 3 | Box, Paper, CardContent, CircularProgress, Alert 4 | } from '@mui/material'; 5 | 6 | // Props interface for the GrafanaDashboard component 7 | interface GrafanaPanelProps { 8 | panelId: string; // ID of the Grafana panel we want to display 9 | title: string; // Title to show above the panel 10 | } 11 | 12 | /** 13 | * A React component that embeds a Grafana dashboard or panel using an iframe. 14 | * Supports authentication, auto-refresh, fullscreen mode, and theme customization. 15 | */ 16 | const GrafanaDashboard: React.FC = ({ panelId, title }) => { 17 | // State management 18 | const [isLoading, setIsLoading] = useState(true); 19 | const [error, setError] = useState(null); 20 | 21 | // Construct the URL for the Grafana dashboard 22 | const constructUrl = useCallback(() => { 23 | try { 24 | // Base URL from our Docker setup 25 | const baseUrl = new URL('http://localhost:3001'); 26 | // Set the path for the specific dashboard 27 | baseUrl.pathname = `/d-solo/postgresql-overview/postgresql-overview`;// Ensure the URL is valid 28 | // All the query parameters we need 29 | const params = { 30 | orgId: '1', 31 | from: 'now-6h', 32 | to: 'now', 33 | theme: 'dark', 34 | refresh: '5s', 35 | panelId, 36 | 'auth.anonymous': 'true', 37 | kiosk: 'true', 38 | 'var-database': 'postgres', 39 | }; 40 | // Add all params to the URL 41 | Object.entries(params).forEach(([key, value]) => { 42 | baseUrl.searchParams.set(key, value); 43 | }); 44 | return baseUrl.toString(); 45 | } catch (err) { 46 | setError('Invalid dashboard URL'); 47 | return ''; 48 | } 49 | }, [panelId]); 50 | 51 | // Component render 52 | return ( 53 | 54 | {/* Main content area */} 55 | 56 | {isLoading && ( 57 | 61 | 62 | 63 | )} 64 | 65 | {/* Error message */} 66 | {error && ( 67 | 68 | {error} 69 | 70 | )} 71 | 72 | {/* Panel container */} 73 | 74 | {/* Grafana iframe */} 75 |