├── sql_docker ├── gitignore └── docker-compose.yml ├── sql_monitor ├── stack │ ├── .gitignore │ ├── grafana │ │ ├── dashboard-provider.yml │ │ ├── alerts-and-notifiers.yml │ │ ├── datasources.yml │ │ └── grafana.ini │ ├── README.md │ ├── docker-compose.yml │ └── docs │ │ ├── general.md │ │ ├── jobs_monitoring.md │ │ ├── buffer_index_management.md │ │ ├── query_perfomance.md │ │ ├── other_metrics.md │ │ ├── server_performance.md │ │ └── database_space_usage.md └── sql-server-metrics │ ├── README.md │ ├── .python-version │ ├── compose.yaml │ ├── pyproject.toml │ └── main.py ├── helps ├── comments.sql ├── iterate_clean_tables.sql ├── create_credential_proxy.sql ├── change_language.sql ├── check_cascade.sql ├── spatial_data │ ├── polygons_pt2.sql │ ├── polygons_pt1.sql │ └── route.sql └── median_of_total_points.sql ├── query_store ├── enable.sql ├── last_queries_executed.sql ├── execution_count.sql ├── highest_wait_durations.sql ├── force_plan.sql ├── highest_avg_row_count.sql ├── longest_avg_exec_time.sql ├── queries_that_recently_regressed_performance.sql ├── queries_w_multiple_plans.sql └── historical_regression_performance.sql ├── sql_maintenance ├── playbook.sql └── use_cases │ ├── dbcc_check_ola_hallengren.sql │ ├── backup_ola_hallengren.sql │ └── index_statistics_ola_hallengren.sql ├── sql_performance_tuning ├── queries │ ├── update_statistics.sql │ ├── unused_indexes.sql │ ├── missing_indexes.sql │ └── wait_statistics.sql ├── sqlserver_performance_tuning_workbook.xlsx └── README.md ├── database_size ├── sp_spaceused.sql ├── total_used_storage.sql ├── database_files_detail.sql ├── database_storage_breakdown.sql ├── largest_table.sql ├── memory_settings.sql └── database_storage_alert.sql ├── sql_profiler ├── check_traces.sql └── automatically_profiler.sql ├── server ├── linked_server.sql ├── query_with_other_server.sql ├── server_info.sql └── server_roles.sql ├── lock ├── specific_session.sql ├── sp_who2.sql └── blocking_sessions_report.sql ├── index ├── clustered_index.sql ├── nonclustered_columnstore_index.sql ├── clustered_columnstore_index.sql ├── nonclustered_index.sql ├── nonclustered_index_include.sql ├── reorganize_index.sql ├── rebuild_index.sql ├── check_index_fragmentation.sql ├── ola_hallengren.sql └── statistics.sql ├── custom_alert_emails ├── enable_database_mail.sql └── dbo.usp_send_job_custom_email.sql ├── ssrs ├── list_ssrs_objects.sql ├── users_access_ssrs.sql ├── subscriptions.sql ├── check_ssrs_permissions.sql ├── info_reports.sql └── analyze.sql ├── sql_access ├── grant_view_access.sql ├── deny_view_access.sql ├── drop_user.sql ├── grant_statements_access.sql ├── role_and_login.sql ├── who_is_trying_access.sql └── users_info.sql ├── backup_recovery ├── restore │ ├── restore_script.sql │ └── restore_start_time.sql ├── recovery-model │ ├── check.sql │ └── change_simple_to_full.sql └── backup │ ├── clean_backup_history.sql │ ├── backup_history.sql │ ├── search_backups_folder.sql │ └── olla_hallengreen_backup.sql ├── sql_agent ├── check_running_and_stop_jobs.sql ├── purge_job_history.sql ├── grant_access.sql ├── sql_agent_sessions.sql └── monitor_currently_agent_jobs.sql ├── ssis ├── ssis_maintenance.sql ├── create_proc_trigger_job.sql └── create_jobs_schedule.sql ├── free_space ├── shrink.sql ├── database_cleanup.sql └── partition_filegroup_allocation.sql ├── audit ├── audit_users_table.sql └── audit_toolkit.sql ├── triggers └── trg_prevent_delete_where.sql ├── README.md └── functions └── dbo.google_data.sql /sql_docker/gitignore: -------------------------------------------------------------------------------- 1 | mnt/* -------------------------------------------------------------------------------- /sql_monitor/stack/.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /sql_monitor/sql-server-metrics/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /sql_monitor/sql-server-metrics/.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /helps/comments.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lorenzouriel/coding-sql/HEAD/helps/comments.sql -------------------------------------------------------------------------------- /query_store/enable.sql: -------------------------------------------------------------------------------- 1 | ALTER DATABASE [fin_pulse] 2 | SET QUERY_STORE = ON (OPERATION_MODE = READ_WRITE) -------------------------------------------------------------------------------- /sql_maintenance/playbook.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lorenzouriel/coding-sql/HEAD/sql_maintenance/playbook.sql -------------------------------------------------------------------------------- /sql_monitor/sql-server-metrics/compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | otel: 3 | image: grafana/otel-lgtm 4 | ports: 5 | - 3000:3000 6 | - 4317:4317 -------------------------------------------------------------------------------- /sql_performance_tuning/queries/update_statistics.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lorenzouriel/coding-sql/HEAD/sql_performance_tuning/queries/update_statistics.sql -------------------------------------------------------------------------------- /sql_performance_tuning/sqlserver_performance_tuning_workbook.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lorenzouriel/coding-sql/HEAD/sql_performance_tuning/sqlserver_performance_tuning_workbook.xlsx -------------------------------------------------------------------------------- /database_size/sp_spaceused.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Procedure: sp_spaceused 3 | -- Description: Returns database size summary and unallocated space 4 | -- ============================================= 5 | USE [DB] 6 | GO 7 | 8 | EXEC sp_spaceused; -------------------------------------------------------------------------------- /sql_profiler/check_traces.sql: -------------------------------------------------------------------------------- 1 | -- ======================================================== 2 | -- Check if any trace is currently running 3 | -- ======================================================== 4 | -- sys.traces lists all active traces on the server, including default traces. 5 | SELECT * 6 | FROM sys.traces; -------------------------------------------------------------------------------- /server/linked_server.sql: -------------------------------------------------------------------------------- 1 | EXEC sp_addlinkedserver 2 | @server = N'server.lorenzo.com.br,14330', 3 | @srvproduct = N'SQL Server'; 4 | 5 | EXEC sp_addlinkedsrvlogin 6 | @rmtsrvname = N'server.lorenzo.com.br,14330', 7 | @useself = 'false', 8 | @locallogin = NULL, 9 | @rmtuser = '', 10 | @rmtpassword= ''; -------------------------------------------------------------------------------- /lock/specific_session.sql: -------------------------------------------------------------------------------- 1 | -- ======================================================== 2 | -- Check the last executed command for a specific session 3 | -- ======================================================== 4 | -- Replace 225 with the session_id you want to inspect. 5 | -- DBCC INPUTBUFFER shows the last statement executed by a session. 6 | DBCC INPUTBUFFER(225); -------------------------------------------------------------------------------- /sql_monitor/stack/grafana/dashboard-provider.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Blackbox Exporter' 5 | orgId: 1 6 | folder: 'Alerts' 7 | type: file 8 | disableDeletion: false 9 | updateIntervalSeconds: 10 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards/json 13 | foldersFromFilesStructure: true -------------------------------------------------------------------------------- /index/clustered_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- CLUSTERED INDEX 3 | -- PURPOSE: Create clustered index on 'id' column in descending order 4 | -- NOTES: 5 | -- - Determines the physical order of data in the table 6 | -- - Only one clustered index per table 7 | -- ============================================= 8 | CREATE CLUSTERED INDEX cix_YourTable_Id 9 | ON YourTable(id DESC); -------------------------------------------------------------------------------- /sql_monitor/stack/grafana/alerts-and-notifiers.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | # NOTIFICATION CONTACTS 4 | contactPoints: 5 | - orgId: 1 6 | name: grafana-default-email 7 | receivers: 8 | - uid: begcbwa87905cf 9 | type: email 10 | settings: 11 | addresses: ${GRAFANA_NOTIFICATION_ADDRESSES} 12 | singleEmail: false 13 | disableResolveMessage: false -------------------------------------------------------------------------------- /sql_monitor/sql-server-metrics/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "sql-server-metrics" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "opentelemetry-exporter-otlp-proto-grpc>=1.31.0", 9 | "opentelemetry-exporter-prometheus>=0.52b0", 10 | "opentelemetry-sdk>=1.31.0", 11 | "pymssql>=2.3.2", 12 | ] 13 | -------------------------------------------------------------------------------- /index/nonclustered_columnstore_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- NONCLUSTERED COLUMNSTORE INDEX 3 | -- PURPOSE: Create columnstore index on selected columns for DW/analytics 4 | -- NOTES: 5 | -- - Can include multiple columns 6 | -- - Optional DROP_EXISTING for replacement 7 | -- ============================================= 8 | CREATE NONCLUSTERED COLUMNSTORE INDEX ncix_YourTable_Name_Date_Columnstore 9 | ON YourTable(name, date); -------------------------------------------------------------------------------- /lock/sp_who2.sql: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------- 2 | -- Basic Active Sessions Overview 3 | ---------------------------------------------------------- 4 | -- Description: Uses the built-in stored procedure sp_who2 5 | -- to show all current sessions, their status, CPU usage, I/O, 6 | -- and if they are being blocked. 7 | -- Useful for a quick check of active connections. 8 | 9 | USE [master]; 10 | GO 11 | 12 | EXEC sp_who2; 13 | GO -------------------------------------------------------------------------------- /index/clustered_columnstore_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- CLUSTERED COLUMNSTORE INDEX 3 | -- PURPOSE: Store data in column-oriented format for analytics 4 | -- NOTES: 5 | -- - Drop existing clustered index if present 6 | -- - Useful for aggregations on large datasets 7 | -- ============================================= 8 | CREATE CLUSTERED COLUMNSTORE INDEX cix_YourTable_Columnstore 9 | ON YourTable 10 | WITH (DROP_EXISTING = ON); -------------------------------------------------------------------------------- /server/query_with_other_server.sql: -------------------------------------------------------------------------------- 1 | -- Allow advanced options 2 | EXEC sp_configure 'show advanced options', 1; 3 | RECONFIGURE; 4 | 5 | -- Enable Ad Hoc Distributed Queries 6 | EXEC sp_configure 'Ad Hoc Distributed Queries', 1; 7 | RECONFIGURE 8 | 9 | SELECT 10 | * 11 | FROM [fact].[earth] AS local 12 | JOIN OPENDATASOURCE( 13 | 'SQLNCLI', 14 | 'Server=server.lorenzo,14330;Uid=;Pwd=;' 15 | ).[turritopsis].[fact].[earth] AS remote 16 | ON local.id = remote.id; -------------------------------------------------------------------------------- /database_size/total_used_storage.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Query: Total Storage Used 3 | -- Description: Calculates total allocated storage across all databases 4 | -- Data Source: sys.master_files 5 | -- Units: MB and GB 6 | -- ============================================= 7 | SELECT 8 | SUM(CAST(size AS BIGINT)) * 8 / 1024 AS total_size_mb, 9 | SUM(CAST(size AS BIGINT)) * 8 / 1024.0 / 1024.0 AS total_size_gb 10 | FROM sys.master_files; -------------------------------------------------------------------------------- /index/nonclustered_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- NONCLUSTERED INDEX (single/multiple columns) 3 | -- PURPOSE: Create nonclustered index on 'name' and 'date' columns 4 | -- NOTES: 5 | -- - Nonclustered indexes point to rows without changing physical order 6 | -- - Can have multiple nonclustered indexes per table 7 | -- ============================================= 8 | CREATE NONCLUSTERED INDEX ncix_YourTable_Name_Date 9 | ON YourTable(name DESC, date DESC); -------------------------------------------------------------------------------- /index/nonclustered_index_include.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- NONCLUSTERED INDEX WITH INCLUDE 3 | -- PURPOSE: Optimize queries using 'name' and 'phone' 4 | -- NOTES: 5 | -- - INCLUDE columns are stored in index for direct query access 6 | -- - Avoids reading main table if query uses indexed and included columns 7 | -- ============================================= 8 | CREATE NONCLUSTERED INDEX ncix_YourTable_Name_Include 9 | ON YourTable(name) 10 | INCLUDE (phone); -------------------------------------------------------------------------------- /sql_monitor/stack/README.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | # docker-compose.yml 3 | GF_SECURITY_ADMIN_USER= 4 | GF_SECURITY_ADMIN_PASSWORD= 5 | 6 | # grafana.ini 7 | GRAFANA_SMTP_HOST= 8 | GRAFANA_SMTP_USER= 9 | GRAFANA_SMTP_PASSWORD= 10 | GRAFANA_SMTP_FROM_ADDRESS= 11 | GRAFANA_SMTP_FROM_NAME= 12 | 13 | # alerts-and-notifications.yml 14 | GRAFANA_NOTIFICATION_ADDRESSES= 15 | 16 | # datasources.yml 17 | DB_HOST= 18 | DB_PORT= 19 | DB_NAME= 20 | DB_USER= 21 | DB_PASSWORD= 22 | PROMETHEUS_URL= 23 | DS_MSSQL= 24 | ``` -------------------------------------------------------------------------------- /index/reorganize_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- REORGANIZE INDEX 3 | -- PURPOSE: Light-weight index maintenance 4 | -- NOTES: 5 | -- - Reorganizes index pages without full rebuild 6 | -- - Keeps index online, low resource impact 7 | -- ============================================= 8 | ALTER INDEX cix_YourTable_Id ON [YourTable] REORGANIZE; -- Clustered 9 | ALTER INDEX ncix_YourTable_Name_Date ON [YourTable] REORGANIZE; -- Nonclustered 10 | ALTER INDEX ALL ON [YourTable] REORGANIZE; -- All indexes -------------------------------------------------------------------------------- /index/rebuild_index.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- REBUILD INDEX 3 | -- PURPOSE: Remove fragmentation completely and recreate index 4 | -- NOTES: 5 | -- - Use for high fragmentation or after bulk operations 6 | -- - Resource intensive, may lock table 7 | -- ============================================= 8 | ALTER INDEX cix_YourTable_Id ON [YourTable] REBUILD; -- Clustered 9 | ALTER INDEX ncix_YourTable_Name_Date ON [YourTable] REBUILD; -- Nonclustered 10 | ALTER INDEX ALL ON [YourTable] REBUILD; -- All indexes -------------------------------------------------------------------------------- /sql_monitor/stack/grafana/datasources.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: prometheus 5 | type: prometheus 6 | uid: befn4bj036x34e # Specific UID you want to use 7 | access: proxy 8 | url: ${PROMETHEUS_URL} 9 | isDefault: false 10 | version: 1 11 | editable: false 12 | 13 | - name: ${DS_MSSQL} 14 | type: mssql 15 | uid: sqlserver_ds 16 | access: proxy 17 | url: ${DB_HOST} 18 | user: ${DB_USER} 19 | secureJsonData: 20 | password: ${DB_PASSWORD} 21 | database: ${DB_NAME} 22 | isDefault: false 23 | editable: true -------------------------------------------------------------------------------- /custom_alert_emails/enable_database_mail.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PURPOSE: Enable Database Mail to allow SQL Server 3 | -- to send emails programmatically 4 | -- ============================================= 5 | EXEC sp_configure 'show advanced options', 1; -- Allow changing advanced settings 6 | RECONFIGURE; 7 | 8 | EXEC sp_configure 'Database Mail XPs', 1; -- Enable Database Mail extended stored procedures 9 | RECONFIGURE; 10 | 11 | -- ============================================= 12 | -- CHECK YOUR DATABASE MAIL PROFILE 13 | SELECT 14 | * 15 | FROM msdb.dbo.sysmail_profile; -------------------------------------------------------------------------------- /ssrs/list_ssrs_objects.sql: -------------------------------------------------------------------------------- 1 | -- ============================================ 2 | -- List all reports under the '/Reports' folder 3 | -- ============================================ 4 | 5 | SELECT * 6 | FROM [dbo].[Catalog] 7 | WHERE [Path] LIKE '%/Reports%'; 8 | 9 | 10 | -- ============================================ 11 | -- List all data sources used by reports 12 | -- Only consider reports under '/Reports' 13 | -- ============================================ 14 | 15 | SELECT * 16 | FROM [dbo].[DataSource] 17 | WHERE [ItemID] IN ( 18 | SELECT [ItemID] 19 | FROM [dbo].[Catalog] 20 | WHERE [Path] LIKE '%/Reports%' 21 | ); -------------------------------------------------------------------------------- /query_store/last_queries_executed.sql: -------------------------------------------------------------------------------- 1 | -- Last queries executed on the database 2 | -- The last n queries executed on the database within the last hour 3 | 4 | SELECT TOP 10 qt.query_sql_text, 5 | q.query_id, 6 | qt.query_text_id, 7 | p.plan_id, 8 | rs.last_execution_time 9 | FROM sys.query_store_query_text AS qt 10 | INNER JOIN sys.query_store_query AS q 11 | ON qt.query_text_id = q.query_text_id 12 | INNER JOIN sys.query_store_plan AS p 13 | ON q.query_id = p.query_id 14 | INNER JOIN sys.query_store_runtime_stats AS rs 15 | ON p.plan_id = rs.plan_id 16 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 17 | ORDER BY rs.last_execution_time DESC; -------------------------------------------------------------------------------- /query_store/execution_count.sql: -------------------------------------------------------------------------------- 1 | -- Execution counts 2 | -- Number of executions for each query within the last hour 3 | 4 | SELECT q.query_id, 5 | qt.query_text_id, 6 | qt.query_sql_text, 7 | SUM(rs.count_executions) AS total_execution_count 8 | FROM sys.query_store_query_text AS qt 9 | INNER JOIN sys.query_store_query AS q 10 | ON qt.query_text_id = q.query_text_id 11 | INNER JOIN sys.query_store_plan AS p 12 | ON q.query_id = p.query_id 13 | INNER JOIN sys.query_store_runtime_stats AS rs 14 | ON p.plan_id = rs.plan_id 15 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 16 | GROUP BY q.query_id, 17 | qt.query_text_id, 18 | qt.query_sql_text 19 | ORDER BY total_execution_count DESC; -------------------------------------------------------------------------------- /ssrs/users_access_ssrs.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-20 4 | -- Purpose: Retrieve all user accounts from the ReportServer database. 5 | -- Description: This query selects all users from the dbo.Users table, including their 6 | -- unique ID, username, user type, authentication type, and the date when 7 | -- the user record was last modified. 8 | ************************************************************************************************/ 9 | SELECT 10 | Users.UserID, 11 | Users.UserName, 12 | Users.UserType, 13 | Users.AuthType, 14 | Users.ModifiedDate 15 | FROM dbo.Users; -------------------------------------------------------------------------------- /database_size/database_files_detail.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Query: Database Files Detail 3 | -- Description: Lists logical/physical file names, type, and size 4 | -- Data Source: sys.master_files, sys.databases 5 | -- ============================================= 6 | SELECT 7 | db.name AS database_name, 8 | mf.name AS file_logical_name, 9 | mf.type_desc AS file_type, 10 | mf.physical_name, 11 | CONVERT(DECIMAL(18,2), mf.size * 8 / 1024.0) AS size_mb, 12 | CONVERT(DECIMAL(18,2), mf.size * 8 / 1024.0 / 1024.0) AS size_gb 13 | FROM sys.master_files mf 14 | JOIN sys.databases db ON db.database_id = mf.database_id 15 | WHERE db.database_id > 4 -- Ignore system databases 16 | ORDER BY db.name, mf.type_desc; -------------------------------------------------------------------------------- /database_size/database_storage_breakdown.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Query: Database Storage Breakdown 3 | -- Description: Calculates row, log, total sizes per database 4 | -- Data Source: sys.master_files 5 | -- ============================================= 6 | SELECT 7 | DB_NAME() AS database_name, 8 | CAST(SUM(CASE WHEN type_desc = 'LOG' THEN size END) * 8. / 1024 AS DECIMAL(8,2)) AS log_size_mb, 9 | CAST(SUM(CASE WHEN type_desc = 'ROWS' THEN size END) * 8. / 1024 AS DECIMAL(8,2)) AS row_size_mb, 10 | CONVERT(DECIMAL(18,2), SUM(size) * 8 / 1024.0) AS total_size_mb, 11 | CONVERT(DECIMAL(18,2), SUM(size) * 8 / 1024.0 / 1024.0) AS total_size_gb 12 | FROM sys.master_files 13 | WHERE database_id = DB_ID() 14 | GROUP BY database_id; -------------------------------------------------------------------------------- /sql_access/grant_view_access.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Grant Database Access and Change Ownership 3 | Description: Grants a user access to a database and changes the database owner. 4 | ********************************************************************************************/ 5 | 6 | -- =Grant the user permission to view the database 7 | USE [master]; 8 | GO 9 | GRANT VIEW [dev_os] DATABASE TO [dev]; 10 | -- Explanation: Gives the user 'dev' permission to view metadata of the database 'dev_os'. 11 | GO 12 | 13 | -- Change the database owner 14 | USE [master]; 15 | GO 16 | ALTER AUTHORIZATION ON DATABASE::[dev_os] TO [dev]; 17 | -- Explanation: Sets 'dev' as the owner of the database 'dev_os'. 18 | GO 19 | -------------------------------------------------------------------------------- /query_store/highest_wait_durations.sql: -------------------------------------------------------------------------------- 1 | -- Highest wait durations 2 | -- This query returns the top 10 queries with the highest wait durations for the last hour: 3 | SELECT TOP 10 qt.query_text_id, 4 | q.query_id, 5 | p.plan_id, 6 | sum(total_query_wait_time_ms) AS sum_total_wait_ms 7 | FROM sys.query_store_wait_stats ws 8 | INNER JOIN sys.query_store_plan p 9 | ON ws.plan_id = p.plan_id 10 | INNER JOIN sys.query_store_query q 11 | ON p.query_id = q.query_id 12 | INNER JOIN sys.query_store_query_text qt 13 | ON q.query_text_id = qt.query_text_id 14 | INNER JOIN sys.query_store_runtime_stats AS rs 15 | ON p.plan_id = rs.plan_id 16 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 17 | GROUP BY qt.query_text_id, 18 | q.query_id, 19 | p.plan_id 20 | ORDER BY sum_total_wait_ms DESC; -------------------------------------------------------------------------------- /sql_access/deny_view_access.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Database Access Control and Ownership Change 3 | Description: Denies the ability to view all databases and changes the database owner. 4 | ********************************************************************************************/ 5 | 6 | -- Deny permission to view all databases 7 | USE [master]; 8 | GO 9 | DENY VIEW ANY DATABASE TO [lorenzo.dev]; 10 | -- Explanation: Prevents the user 'lorenzo.dev' from seeing databases they do not own. 11 | GO 12 | 13 | -- Change the owner of a specific database 14 | USE [master]; 15 | GO 16 | ALTER AUTHORIZATION ON DATABASE::[database] TO [lorenzo.dev]; 17 | -- Explanation: Sets 'lorenzo.dev' as the owner of the specified database. 18 | -- Replace [database] with the actual database name. 19 | GO -------------------------------------------------------------------------------- /query_store/force_plan.sql: -------------------------------------------------------------------------------- 1 | -- Force a plan for a query (apply forcing policy) 2 | -- SQL Server tries to force the plan in the optimizer. If plan forcing fails, an Extended Event is fired and the optimizer is instructed to optimize in the normal way. 3 | 4 | EXEC sp_query_store_force_plan @query_id = 48, @plan_id = 49; 5 | 6 | -- When you use sp_query_store_force_plan, you can only force plans recorded by Query Store as a plan for that query. 7 | -- In other words, the only plans available for a query are plans that were already used to execute that query while Query Store was active. 8 | 9 | -- Remove plan forcing for a query 10 | -- To rely again on the SQL Server query optimizer to calculate the optimal query plan, use sp_query_store_unforce_plan to unforce the plan that was selected for the query. 11 | 12 | EXEC sp_query_store_unforce_plan @query_id = 48, @plan_id = 49; -------------------------------------------------------------------------------- /backup_recovery/restore/restore_script.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Restore SQL Server Database from Full Backup 3 | Purpose: Restore the database from a full backup file. 4 | ********************************************************************************************/ 5 | 6 | -- =Restore the database from the first full backup 7 | -- Replace the file path with the actual location of your backup. 8 | USE [master]; -- Always use master when performing restores 9 | GO 10 | 11 | RESTORE DATABASE [database_name] -- Replace with your database name 12 | FROM DISK = N'C:\Backup\FULL\database.bak' -- Path to the backup file 13 | WITH 14 | FILE = 1, -- Specifies which backup set to restore from (first in this case) 15 | NOUNLOAD, -- Keeps the media loaded after restore (default for disk backups) 16 | STATS = 5; -- Shows progress in percentage 17 | GO -------------------------------------------------------------------------------- /sql_monitor/stack/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [smtp] 2 | enabled = true 3 | host = ${GRAFANA_SMTP_HOST} 4 | user = ${GRAFANA_SMTP_USER} 5 | password = ${GRAFANA_SMTP_PASSWORD} 6 | from_address = ${GRAFANA_SMTP_FROM_ADDRESS} 7 | from_name = ${GRAFANA_SMTP_FROM_NAME} 8 | 9 | startTLS_policy = Opportunistic 10 | skip_verify = false 11 | 12 | [server] 13 | root_url = https://monitor.lorenzo.com.br 14 | 15 | [auth.azuread] 16 | name = Azure AD 17 | enabled = true 18 | allow_sign_up = true 19 | client_id = ${AZURE_CLIENT_ID} 20 | client_secret = ${AZURE_CLIENT_SECRET} 21 | scopes = openid email profile 22 | auth_url = https://login.microsoftonline.com/${AZURE_TENANT_ID}/oauth2/v2.0/authorize 23 | token_url = https://login.microsoftonline.com/${AZURE_TENANT_ID}/oauth2/v2.0/token 24 | api_url = https://graph.microsoft.com/oidc/userinfo 25 | role_attribute_path = contains(roles[*], 'GrafanaAdmin') && 'Admin' || 'Viewer' 26 | allowed_domains = lorenzo.com.br 27 | 28 | [auth.generic_oauth] 29 | enabled = false -------------------------------------------------------------------------------- /sql_access/drop_user.sql: -------------------------------------------------------------------------------- 1 | -- Revoke ability to connect to the database 2 | REVOKE CONNECT TO [user.name]; 3 | 4 | -- Drop the user from the database 5 | DROP USER [user.name]; 6 | 7 | -- Then drop the login at the server level 8 | DROP LOGIN [user.name]; 9 | 10 | -- If you just want to disable the login (instead of deleting it): 11 | ALTER LOGIN [user.name] DISABLE; 12 | 13 | -- ############## 14 | -- ERROR 15 | -- Msg 15138, Level 16, State 1, Line 5 16 | -- The database principal owns a schema in the database, and cannot be dropped. 17 | -- ############## 18 | -- Check which schema(s) the user owns 19 | SELECT 20 | s.name AS SchemaName, 21 | u.name AS OwnerName 22 | FROM sys.schemas s 23 | JOIN sys.database_principals u ON s.principal_id = u.principal_id 24 | WHERE u.name = 'user.name'; 25 | 26 | -- Change schema ownership to another user (commonly dbo) 27 | ALTER AUTHORIZATION ON SCHEMA::[db_datareader] TO dbo; 28 | 29 | -- Drop the user from the database 30 | DROP USER [user.name]; -------------------------------------------------------------------------------- /query_store/highest_avg_row_count.sql: -------------------------------------------------------------------------------- 1 | -- Highest average physical I/O reads 2 | -- The number of queries that had the biggest average physical I/O reads in last 24 hours, with corresponding average row count and execution count 3 | 4 | SELECT TOP 10 rs.avg_physical_io_reads, 5 | qt.query_sql_text, 6 | q.query_id, 7 | qt.query_text_id, 8 | p.plan_id, 9 | rs.runtime_stats_id, 10 | rsi.start_time, 11 | rsi.end_time, 12 | rs.avg_rowcount, 13 | rs.count_executions 14 | FROM sys.query_store_query_text AS qt 15 | INNER JOIN sys.query_store_query AS q 16 | ON qt.query_text_id = q.query_text_id 17 | INNER JOIN sys.query_store_plan AS p 18 | ON q.query_id = p.query_id 19 | INNER JOIN sys.query_store_runtime_stats AS rs 20 | ON p.plan_id = rs.plan_id 21 | INNER JOIN sys.query_store_runtime_stats_interval AS rsi 22 | ON rsi.runtime_stats_interval_id = rs.runtime_stats_interval_id 23 | WHERE rsi.start_time >= DATEADD(hour, -24, GETUTCDATE()) 24 | ORDER BY rs.avg_physical_io_reads DESC; -------------------------------------------------------------------------------- /query_store/longest_avg_exec_time.sql: -------------------------------------------------------------------------------- 1 | -- Longest average execution time 2 | -- The number of queries with the highest average duration within last hour 3 | 4 | SELECT TOP 10 ROUND(CONVERT(FLOAT, SUM(rs.avg_duration * rs.count_executions)) / 5 | NULLIF(SUM(rs.count_executions), 0), 2) avg_duration, 6 | SUM(rs.count_executions) AS total_execution_count, 7 | qt.query_sql_text, 8 | q.query_id, 9 | qt.query_text_id, 10 | p.plan_id, 11 | GETUTCDATE() AS CurrentUTCTime, 12 | MAX(rs.last_execution_time) AS last_execution_time 13 | FROM sys.query_store_query_text AS qt 14 | INNER JOIN sys.query_store_query AS q 15 | ON qt.query_text_id = q.query_text_id 16 | INNER JOIN sys.query_store_plan AS p 17 | ON q.query_id = p.query_id 18 | INNER JOIN sys.query_store_runtime_stats AS rs 19 | ON p.plan_id = rs.plan_id 20 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 21 | GROUP BY qt.query_sql_text, 22 | q.query_id, 23 | qt.query_text_id, 24 | p.plan_id 25 | ORDER BY avg_duration DESC; -------------------------------------------------------------------------------- /sql_access/grant_statements_access.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Create Database User and Grant Schema Permissions 3 | Description: Creates a database user if it does not exist and grants standard DML 4 | permissions (SELECT, INSERT, UPDATE, DELETE) on the dbo schema. 5 | ********************************************************************************************/ 6 | 7 | -- Switch to the target database 8 | USE [dev_os]; 9 | GO 10 | 11 | -- Create the database user if it does not exist 12 | IF NOT EXISTS (SELECT * FROM sys.database_principals WHERE name = 'dev') 13 | BEGIN 14 | CREATE USER [dev] FOR LOGIN [dev]; 15 | -- Explanation: Creates a database-level user 'dev' linked to the server login 'dev'. 16 | END 17 | GO 18 | 19 | -- Grant standard DML permissions on the dbo schema 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [dev]; 21 | -- Explanation: Allows the user 'dev' to read and modify all objects in the dbo schema. 22 | GO 23 | -------------------------------------------------------------------------------- /database_size/largest_table.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Query: Largest Tables 3 | -- Description: Shows table size (total, used, data) and row count 4 | -- Data Source: sys.tables, sys.indexes, sys.partitions, sys.allocation_units 5 | -- ============================================= 6 | SELECT 7 | s.name AS schema_name, 8 | t.name AS table_name, 9 | p.rows AS row_count, 10 | CONVERT(DECIMAL(18,2), SUM(a.total_pages) * 8 / 1024.0) AS total_size_mb, 11 | CONVERT(DECIMAL(18,2), SUM(a.used_pages) * 8 / 1024.0) AS used_size_mb, 12 | CONVERT(DECIMAL(18,2), SUM(a.data_pages) * 8 / 1024.0) AS data_size_mb 13 | FROM sys.tables t 14 | JOIN sys.indexes i ON t.object_id = i.object_id 15 | JOIN sys.partitions p ON i.object_id = p.object_id AND i.index_id = p.index_id 16 | JOIN sys.allocation_units a ON p.partition_id = a.container_id 17 | JOIN sys.schemas s ON t.schema_id = s.schema_id 18 | WHERE t.is_ms_shipped = 0 -- Ignore system tables 19 | GROUP BY s.name, t.name, p.rows 20 | ORDER BY total_size_mb DESC; -------------------------------------------------------------------------------- /server/server_info.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PURPOSE: Retrieve key SQL Server instance properties 3 | -- ============================================= 4 | 5 | -- Step 1: Basic instance information 6 | SELECT 7 | SERVERPROPERTY('InstanceName') AS InstanceName, -- Name of the SQL Server instance 8 | SERVERPROPERTY('Edition') AS Edition, -- SQL Server edition (e.g., Standard, Enterprise) 9 | SERVERPROPERTY('ProductVersion') AS Version, -- Full product version number 10 | SERVERPROPERTY('InstallDate') AS InstallDate, -- Installation date of this instance 11 | SERVERPROPERTY('ServerName') AS ServerName, -- SQL Server network name 12 | SERVERPROPERTY('MachineName') AS MachineName, -- Physical machine name 13 | SERVERPROPERTY('SqlBinRoot') AS SqlBinRoot -- Path to SQL Server binaries 14 | 15 | -- Step 2: Default file locations for new databases 16 | SELECT 17 | SERVERPROPERTY('InstanceDefaultDataPath') AS DefaultDataPath, -- Default path for data files 18 | SERVERPROPERTY('InstanceDefaultLogPath') AS DefaultLogPath; -- Default path for log files 19 | -------------------------------------------------------------------------------- /sql_docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | mssql: 3 | image: mcr.microsoft.com/mssql/server:2022-latest 4 | container_name: mssql-prod 5 | restart: unless-stopped 6 | environment: 7 | ACCEPT_EULA: "Y" # Accept SQL Server license 8 | MSSQL_PID: "Standard" # SQL Server edition 9 | SA_PASSWORD: "${SA_PASSWORD}" # Password from environment variable 10 | MSSQL_AGENT_ENABLED: "true" # Enable SQL Server Agent 11 | ports: 12 | - "14330:1433" # Map host port 14330 to container 1433 13 | ulimits: 14 | nofile: 15 | soft: 65536 16 | hard: 65536 # Increase file descriptors for performance 17 | volumes: 18 | - ./mnt/mssql-data/data:/var/opt/mssql/data # Persistent database data 19 | - ./mnt/mssql-data/logs:/var/opt/mssql/log # Persistent logs 20 | - ./mnt/mssql-data/backups:/var/opt/mssql/backups # Persistent backups 21 | deploy: 22 | resources: 23 | limits: 24 | memory: 8G # Limit container memory 25 | cpus: '4.0' # Limit container CPU usage -------------------------------------------------------------------------------- /sql_performance_tuning/queries/unused_indexes.sql: -------------------------------------------------------------------------------- 1 | -- Unused Index Script 2 | -- Original Author: Pinal Dave 3 | SELECT TOP 25 4 | o.name AS ObjectName 5 | , i.name AS IndexName 6 | , i.index_id AS IndexID 7 | , dm_ius.user_seeks AS UserSeek 8 | , dm_ius.user_scans AS UserScans 9 | , dm_ius.user_lookups AS UserLookups 10 | , dm_ius.user_updates AS UserUpdates 11 | , p.TableRows 12 | , 'DROP INDEX ' + QUOTENAME(i.name) 13 | + ' ON ' + QUOTENAME(s.name) + '.' 14 | + QUOTENAME(OBJECT_NAME(dm_ius.OBJECT_ID)) AS 'drop statement' 15 | FROM sys.dm_db_index_usage_stats dm_ius 16 | INNER JOIN sys.indexes i ON i.index_id = dm_ius.index_id 17 | AND dm_ius.OBJECT_ID = i.OBJECT_ID 18 | INNER JOIN sys.objects o ON dm_ius.OBJECT_ID = o.OBJECT_ID 19 | INNER JOIN sys.schemas s ON o.schema_id = s.schema_id 20 | INNER JOIN (SELECT SUM(p.rows) TableRows, p.index_id, p.OBJECT_ID 21 | FROM sys.partitions p GROUP BY p.index_id, p.OBJECT_ID) p 22 | ON p.index_id = dm_ius.index_id AND dm_ius.OBJECT_ID = p.OBJECT_ID 23 | WHERE OBJECTPROPERTY(dm_ius.OBJECT_ID,'IsUserTable') = 1 24 | AND dm_ius.database_id = DB_ID() 25 | AND i.type_desc = 'nonclustered' 26 | AND i.is_primary_key = 0 27 | AND i.is_unique_constraint = 0 28 | ORDER BY (dm_ius.user_seeks + dm_ius.user_scans + dm_ius.user_lookups) ASC 29 | GO -------------------------------------------------------------------------------- /helps/iterate_clean_tables.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PURPOSE: Delete all data from all tables in the database 3 | -- while temporarily disabling constraints to avoid FK violations. 4 | -- 5 | -- NOTES: 6 | -- - Uses sp_msforeachtable to iterate over all tables 7 | -- - Disables constraints before deletion and re-enables afterward 8 | -- - QUOTED_IDENTIFIER is ON to allow quoted identifiers 9 | -- ============================================= 10 | 11 | -- Step 1: Ensure QUOTED_IDENTIFIER is ON 12 | -- This allows identifiers to be delimited by double quotes 13 | SET QUOTED_IDENTIFIER ON; 14 | 15 | -- Step 2: Disable all constraints on all tables 16 | -- NOCHECK prevents SQL Server from checking existing data during the operation 17 | EXEC sp_msforeachtable 'ALTER TABLE ? NOCHECK CONSTRAINT ALL'; 18 | 19 | -- Step 3: Delete all data from every table 20 | -- Each table is iterated and all rows are deleted 21 | EXEC sp_msforeachtable ' 22 | SET QUOTED_IDENTIFIER ON; -- Ensure QUOTED_IDENTIFIER is ON for each execution 23 | DELETE FROM ?; -- Delete all rows from the current table 24 | '; 25 | 26 | -- Step 4: Re-enable constraints on all tables 27 | -- WITH CHECK ensures that constraints are validated against the data 28 | EXEC sp_msforeachtable 'ALTER TABLE ? WITH CHECK CHECK CONSTRAINT ALL'; -------------------------------------------------------------------------------- /backup_recovery/recovery-model/check.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Check SQL Server Database Recovery Model 3 | Purpose: Verify the recovery model of one or all databases. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- Check the recovery model of a specific database 9 | SELECT 10 | name AS DatabaseName, -- Database name 11 | recovery_model_desc AS RecoveryModel -- Recovery model (FULL, SIMPLE, BULK_LOGGED) 12 | FROM sys.databases 13 | WHERE name = 'YourDatabaseName'; 14 | GO 15 | 16 | -- Check the recovery model of all databases on the server 17 | SELECT 18 | name AS DatabaseName, -- Database name 19 | recovery_model_desc AS RecoveryModel -- Recovery model (FULL, SIMPLE, BULK_LOGGED) 20 | FROM sys.databases; 21 | GO 22 | 23 | /******************************************************************************************** 24 | NOTES: 25 | - RecoveryModel can be: FULL, SIMPLE or BULK_LOGGED. 26 | - Use this query after changing a database to FULL to confirm the change. 27 | - Always verify before setting up transaction log backups. 28 | ********************************************************************************************/ -------------------------------------------------------------------------------- /sql_agent/check_running_and_stop_jobs.sql: -------------------------------------------------------------------------------- 1 | /* 2 | List only currently running SQL Server Agent jobs 3 | and show their active session (SPID) + SQL text 4 | */ 5 | 6 | SELECT 7 | j.name AS job_name, 8 | a.run_requested_date AS start_time, 9 | a.job_id, 10 | r.session_id AS spid, 11 | r.status, 12 | r.command, 13 | t.text AS running_sql 14 | FROM msdb.dbo.sysjobs j 15 | JOIN msdb.dbo.sysjobactivity a 16 | ON j.job_id = a.job_id 17 | LEFT JOIN sys.dm_exec_requests r 18 | ON a.session_id = r.session_id 19 | OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) t 20 | WHERE a.stop_execution_date IS NULL -- job not marked as stopped 21 | AND a.start_execution_date IS NOT NULL -- job has actually started 22 | AND r.session_id IS NOT NULL; -- ensure there is an active SPID 23 | 24 | -- Check Zombie Jobs 25 | SELECT 26 | j.name, 27 | a.run_requested_date, 28 | a.stop_execution_date, 29 | a.job_id 30 | FROM msdb.dbo.sysjobs j 31 | JOIN msdb.dbo.sysjobactivity a 32 | ON j.job_id = a.job_id 33 | WHERE a.stop_execution_date IS NULL 34 | AND a.start_execution_date IS NOT NULL; 35 | 36 | 37 | -- Stop Jobs 38 | USE msdb; 39 | GO 40 | EXEC sp_stop_job @job_name = N'YourJobName'; 41 | 42 | 43 | EXEC msdb.dbo.sp_stop_job @job_id = '7B9753CF-A4FE-4CC8-AF5C-229AD137E0D1'; -------------------------------------------------------------------------------- /sql_performance_tuning/queries/missing_indexes.sql: -------------------------------------------------------------------------------- 1 | -- Missing Index Script 2 | -- Original Author: Pinal Dave 3 | SELECT TOP 25 4 | dm_mid.database_id AS DatabaseID, 5 | dm_migs.avg_user_impact*(dm_migs.user_seeks+dm_migs.user_scans) Avg_Estimated_Impact, 6 | dm_migs.last_user_seek AS Last_User_Seek, 7 | OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) AS [TableName], 8 | 'CREATE INDEX [IX_' + OBJECT_NAME(dm_mid.OBJECT_ID,dm_mid.database_id) + '_' 9 | + REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.equality_columns,''),', ','_'),'[',''),']','') 10 | + CASE 11 | WHEN dm_mid.equality_columns IS NOT NULL 12 | AND dm_mid.inequality_columns IS NOT NULL THEN '_' 13 | ELSE '' 14 | END 15 | + REPLACE(REPLACE(REPLACE(ISNULL(dm_mid.inequality_columns,''),', ','_'),'[',''),']','') 16 | + ']' 17 | + ' ON ' + dm_mid.statement 18 | + ' (' + ISNULL (dm_mid.equality_columns,'') 19 | + CASE WHEN dm_mid.equality_columns IS NOT NULL AND dm_mid.inequality_columns 20 | IS NOT NULL THEN ',' ELSE 21 | '' END 22 | + ISNULL (dm_mid.inequality_columns, '') 23 | + ')' 24 | + ISNULL (' INCLUDE (' + dm_mid.included_columns + ')', '') AS Create_Statement 25 | FROM sys.dm_db_missing_index_groups dm_mig 26 | INNER JOIN sys.dm_db_missing_index_group_stats dm_migs 27 | ON dm_migs.group_handle = dm_mig.index_group_handle 28 | INNER JOIN sys.dm_db_missing_index_details dm_mid 29 | ON dm_mig.index_handle = dm_mid.index_handle 30 | WHERE dm_mid.database_ID = DB_ID() 31 | ORDER BY Avg_Estimated_Impact DESC 32 | GO -------------------------------------------------------------------------------- /sql_access/role_and_login.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Create Login, User, Role, and Assign Permissions 3 | Description: Creates a SQL Server login, a database user, a role, grants schema-level 4 | permissions, and assigns the user to the role. 5 | ********************************************************************************************/ 6 | 7 | -- Create a SQL Server login 8 | USE master; 9 | GO 10 | CREATE LOGIN Test123 WITH PASSWORD = 'Test'; 11 | -- Explanation: Creates a server-level login named 'Test123' with the specified password. 12 | GO 13 | 14 | -- Create a database user for the login 15 | USE AdventureWorks2019; 16 | GO 17 | CREATE USER Test123 FOR LOGIN Test123; 18 | -- Explanation: Creates a database-level user linked to the server login. 19 | GO 20 | 21 | -- Create a database role 22 | CREATE ROLE RoleTest; 23 | -- Explanation: Creates a new database role called 'RoleTest' to manage permissions. 24 | GO 25 | 26 | -- Grant SELECT permission on a specific schema to the role 27 | GRANT SELECT ON SCHEMA::HumanResources TO RoleTest; 28 | -- Explanation: Allows any member of 'RoleTest' to select data from all objects in the HumanResources schema. 29 | GO 30 | 31 | -- Add the user to the role 32 | ALTER ROLE RoleTest ADD MEMBER Test123; 33 | -- Explanation: Assigns the user 'Test123' to the role 'RoleTest', giving them the granted permissions. 34 | GO -------------------------------------------------------------------------------- /ssrs/subscriptions.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-20 4 | -- Purpose: Retrieve detailed information about report subscriptions. 5 | -- Description: This query joins the Subscriptions table with Users and Catalog tables to 6 | -- provide comprehensive details about each subscription. It includes the 7 | -- subscription description, last status, event type, last run time, parameters, 8 | -- subscription owner, associated report name, the user who last modified the 9 | -- subscription, and the modification date. 10 | ************************************************************************************************/ 11 | SELECT 12 | Subscriptions.Description, 13 | Subscriptions.LastStatus, 14 | Subscriptions.EventType, 15 | Subscriptions.LastRunTime, 16 | Subscriptions.Parameters, 17 | SUBSCRIPTION_OWNER.UserName AS SubscriptionOwner, 18 | Catalog.Name AS ReportName, 19 | MODIFIED_BY.UserName AS LastModifiedBy, 20 | Subscriptions.ModifiedDate 21 | FROM dbo.Subscriptions 22 | INNER JOIN dbo.Users SUBSCRIPTION_OWNER 23 | ON SUBSCRIPTION_OWNER.UserID = Subscriptions.OwnerID 24 | INNER JOIN dbo.Catalog 25 | ON Catalog.ItemID = Subscriptions.Report_OID 26 | INNER JOIN dbo.Users MODIFIED_BY 27 | ON MODIFIED_BY.UserID = Subscriptions.ModifiedByID; -------------------------------------------------------------------------------- /helps/create_credential_proxy.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- SCRIPT: CREATE CREDENTIAL AND SQL AGENT PROXY 3 | -- PURPOSE: Create a Windows credential and a SQL Server Agent proxy 4 | -- to allow jobs or SSIS packages to run under a specific Windows account 5 | -- NOTES: 6 | -- - CREDENTIAL stores Windows authentication info (username + password) 7 | -- - PROXY allows SQL Server Agent jobs to execute with this credential 8 | -- ============================================= 9 | 10 | -- Step 1: Switch to the master database (credentials are created in master) 11 | USE master; 12 | GO 13 | 14 | -- Step 2: Create a new credential 15 | CREATE CREDENTIAL [ProxyCredentialAdmin] 16 | WITH IDENTITY = 'WIN-COMPUTER\User', -- Windows account to run jobs 17 | SECRET = 'PasswordProxy'; -- Password for the Windows account 18 | GO 19 | 20 | -- Step 3: Switch to msdb database (SQL Agent jobs and proxies are managed here) 21 | USE msdb; 22 | GO 23 | 24 | -- Step 4: Create a new SQL Server Agent proxy 25 | EXEC dbo.sp_add_proxy 26 | @proxy_name = 'AdminProxy', -- Name of the proxy 27 | @credential_name = 'ProxyCredentialAdmin', -- Associate the proxy with the credential created above 28 | @enabled = 1; -- Enable the proxy immediately 29 | GO 30 | 31 | -- After this, you can assign the proxy to job steps, SSIS packages, or other SQL Agent operations -------------------------------------------------------------------------------- /database_size/memory_settings.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PURPOSE: Check and configure SQL Server memory settings 3 | -- ============================================= 4 | 5 | -- Enable advanced options to allow memory configuration changes 6 | EXEC sp_configure 'show advanced options', 1; 7 | RECONFIGURE; 8 | 9 | -- Display current configuration values for min and max server memory 10 | EXEC sp_configure 'min server memory (MB)'; -- Minimum memory SQL Server can use 11 | EXEC sp_configure 'max server memory (MB)'; -- Maximum memory SQL Server can use 12 | 13 | -- Check actual system memory usage 14 | SELECT 15 | total_physical_memory_kb / 1024 AS total_physical_memory_MB, -- Total RAM available on the server 16 | available_physical_memory_kb / 1024 AS available_physical_memory_MB, -- Free RAM currently available 17 | total_page_file_kb / 1024 AS total_page_file_MB, -- Total page file size 18 | available_page_file_kb / 1024 AS available_page_file_MB, -- Free page file size 19 | system_memory_state_desc -- Overall memory state description 20 | FROM sys.dm_os_sys_memory; 21 | 22 | -- Change the SQL Server maximum memory limit to 4096 MB (4 GB) 23 | EXEC sp_configure 'max server memory (MB)', 4096; 24 | RECONFIGURE; 25 | 26 | -- Change the SQL Server minimum memory limit to 1024 MB (1 GB) 27 | EXEC sp_configure 'min server memory (MB)', 1024; 28 | RECONFIGURE; 29 | -------------------------------------------------------------------------------- /lock/blocking_sessions_report.sql: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------- 2 | -- Detailed Blocking Sessions Report 3 | ---------------------------------------------------------- 4 | -- Description: Queries system DMVs to identify sessions that are 5 | -- being blocked and the sessions that are blocking them. 6 | -- Provides more detail than sp_who2, including executing query text. 7 | 8 | SELECT 9 | r.session_id, -- ID of the session currently waiting 10 | r.blocking_session_id, -- ID of the session causing the block 11 | r.status, -- Status of the blocked session 12 | r.command, -- Command being executed 13 | r.wait_type, -- Type of wait (e.g., locks) 14 | r.wait_time, -- Time the session has been waiting in ms 15 | r.wait_resource, -- Resource being waited on 16 | t.text AS query_text, -- Text of the SQL query being executed 17 | s.host_name, -- Client machine name 18 | s.program_name, -- Application name 19 | s.login_name -- Login executing the session 20 | FROM sys.dm_exec_requests r 21 | JOIN sys.dm_exec_sessions s 22 | ON r.session_id = s.session_id 23 | CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) t 24 | WHERE r.blocking_session_id <> 0 -- Only show sessions that are blocked 25 | ORDER BY r.wait_time DESC; -- Order by longest waiting sessions first 26 | -------------------------------------------------------------------------------- /sql_agent/purge_job_history.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Purge SQL Server Agent job history older than 30 days. 5 | -- Description: This script: 6 | -- 1. Sets NOCOUNT ON to prevent extra result messages. 7 | -- 2. Calculates the cutoff date (30 days before today). 8 | -- 3. Executes the system stored procedure sp_purge_jobhistory 9 | -- to delete job history records older than the cutoff date. 10 | ************************************************************************************************/ 11 | 12 | -- Prevent extra result messages from interfering with output 13 | SET NOCOUNT ON; 14 | 15 | -- =============================================== 16 | -- Step 1: Declare variable to store cutoff date 17 | -- =============================================== 18 | DECLARE @CleanupDate DATETIME; 19 | 20 | -- =============================================== 21 | -- Step 2: Calculate date 30 days ago from today 22 | -- =============================================== 23 | SET @CleanupDate = DATEADD(DAY, -30, GETDATE()); 24 | 25 | -- =============================================== 26 | -- Step 3: Execute system stored procedure to purge old job history 27 | -- =============================================== 28 | EXECUTE dbo.sp_purge_jobhistory 29 | @oldest_date = @CleanupDate; -------------------------------------------------------------------------------- /sql_monitor/stack/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | grafana: 3 | image: grafana/grafana:latest 4 | container_name: grafana 5 | ports: 6 | - "3000:3000" 7 | env_file: .env 8 | volumes: 9 | - grafana_data:/var/lib/grafana 10 | - ./grafana/grafana.ini:/etc/grafana/grafana.ini 11 | - ./grafana/alerts-and-notifiers.yml:/etc/grafana/provisioning/alerting/alerts-and-notifiers.yml 12 | - ./grafana/datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml 13 | - ./grafana/dashboard-provider.yml:/etc/grafana/provisioning/dashboards/dashboard-provider.yml 14 | - ./grafana/dashboards:/etc/grafana/provisioning/dashboards/json 15 | environment: 16 | - GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER} 17 | - GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD} 18 | - GRAFANA_NOTIFICATION_ADDRESSES=${GRAFANA_NOTIFICATION_ADDRESSES} 19 | - GRAFANA_SMTP_HOST=${GRAFANA_SMTP_HOST} 20 | - GRAFANA_SMTP_USER=${GRAFANA_SMTP_USER} 21 | - GRAFANA_SMTP_PASSWORD=${GRAFANA_SMTP_PASSWORD} 22 | - GRAFANA_SMTP_FROM_ADDRESS=${GRAFANA_SMTP_FROM_ADDRESS} 23 | - GRAFANA_SMTP_FROM_NAME=${GRAFANA_SMTP_FROM_NAME} 24 | depends_on: 25 | - prometheus 26 | restart: always 27 | 28 | prometheus: 29 | image: prom/prometheus:latest 30 | container_name: prometheus 31 | volumes: 32 | - prometheus_data:/prometheus 33 | restart: always 34 | 35 | volumes: 36 | grafana_data: 37 | prometheus_data: -------------------------------------------------------------------------------- /backup_recovery/backup/clean_backup_history.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Cleanup Old Backup History 3 | Purpose: Remove old backup and restore history entries from MSDB to keep system tables manageable. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- STEP 0: Prevent extra result sets from interfering with scripts 9 | SET NOCOUNT ON; 10 | GO 11 | 12 | -- STEP 1: Declare a variable to define the cutoff date 13 | DECLARE 14 | @CleanupDate DATETIME; 15 | 16 | -- STEP 2: Set cutoff date to 30 days ago from today 17 | SET @CleanupDate = DATEADD(DAY, -30, GETDATE()); 18 | 19 | -- STEP 3: Execute system stored procedure to delete old backup history 20 | -- Removes backup and restore history older than @CleanupDate 21 | EXECUTE dbo.sp_delete_backuphistory @oldest_date = @CleanupDate; 22 | GO 23 | 24 | /******************************************************************************************** 25 | NOTES: 26 | - sp_delete_backuphistory removes entries from MSDB tables: backupset, backupfile, backupmediafamily, restorehistory, etc. 27 | - Adjust the number of days (here 30) according to your retention policy. 28 | - Regular cleanup helps maintain performance of MSDB and prevents unnecessary growth. 29 | - This does NOT delete actual backup files on disk. 30 | ********************************************************************************************/ -------------------------------------------------------------------------------- /helps/change_language.sql: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------- 2 | -- 1. List All SQL Server Languages 3 | ---------------------------------------------------------- 4 | -- Returns all languages installed on the SQL Server instance 5 | SELECT * 6 | FROM sys.syslanguages; 7 | GO 8 | 9 | ---------------------------------------------------------- 10 | -- 2. Check Current Session Language 11 | ---------------------------------------------------------- 12 | -- Shows the language currently in use for this session 13 | SELECT @@LANGUAGE AS CurrentLanguage; 14 | GO 15 | 16 | ---------------------------------------------------------- 17 | -- 3. Check Default Language of SQL Server Logins 18 | ---------------------------------------------------------- 19 | -- Lists logins and their default language settings 20 | SELECT 21 | name, 22 | default_language_name 23 | FROM sys.server_principals 24 | WHERE type IN ('S', 'U'); -- 'S' = SQL login, 'U' = Windows login 25 | GO 26 | 27 | ---------------------------------------------------------- 28 | -- 4. Change Session Language 29 | ---------------------------------------------------------- 30 | -- Temporarily changes the language for the current session 31 | SET LANGUAGE 'us_english'; 32 | GO 33 | 34 | ---------------------------------------------------------- 35 | -- 5. Change Default Language for a SQL Server Login 36 | ---------------------------------------------------------- 37 | -- Permanently changes the default language for the specified login 38 | ALTER LOGIN [NT Service\MSSQLSERVER] 39 | WITH DEFAULT_LANGUAGE = us_english; 40 | GO 41 | ---------------------------------------------------------- -------------------------------------------------------------------------------- /ssis/ssis_maintenance.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | [name], 3 | [recovery_model_desc] 4 | FROM sys.databases 5 | WHERE name = 'SSISDB'; 6 | 7 | -- Clean execution logs 8 | EXEC [SSISDB].[internal].[cleanup_server_retention_window] 9 | 10 | -- TABLES THAT WILL BE CLEANED UP 11 | --[internal].[event_message_context_scaleout] 12 | --[internal].[event_messages_scaleout] 13 | --[internal].[executable_statistics] 14 | --[internal].[execution_component_phases] 15 | --[internal].[execution_data_statistics] 16 | --[internal].[execution_data_taps] 17 | --[internal].[execution_parameter_values] 18 | --[internal].[execution_parameter_values_noncatalog] 19 | --[internal].[execution_property_override_values] 20 | --[internal].[execution_property_override_values_noncatalog] 21 | --[internal].[executions] 22 | --[internal].[executions_noncatalog] 23 | --[internal].[extended_operation_info] 24 | --[internal].[operation_messages] 25 | --[internal].[operation_messages_scaleout] 26 | --[internal].[operation_permissions] 27 | --[internal].[operations] 28 | --[internal].[validations] 29 | 30 | -- Clean completed logs 31 | EXEC [SSISDB].[internal].[cleanup_completed_jobs_exclusive] 32 | 33 | -- TABLES THAT WILL BE CLEANED UP 34 | --[internal].[job_worker_agents] 35 | --[internal].[jobs] 36 | --[internal].[tasks] 37 | 38 | 39 | -- Check Total Space 40 | USE SSISDB; 41 | GO 42 | EXEC sp_spaceused; 43 | GO 44 | 45 | -- Backup the FULL log 46 | BACKUP DATABASE SSISDB 47 | TO DISK = 'C:\Backup\backup_full.trn'; 48 | 49 | -- Backup the LOG 50 | BACKUP LOG SSISDB 51 | TO DISK = 'C:\Backup\backup_log.trn'; 52 | 53 | -- Recover physical disk 54 | USE SSISDB; 55 | GO 56 | DBCC SHRINKDATABASE (SSISDB, 10); -------------------------------------------------------------------------------- /sql_monitor/stack/docs/general.md: -------------------------------------------------------------------------------- 1 | ### Database Name 2 | ```sql 3 | SELECT DB_NAME() AS DatabaseName; 4 | ``` 5 | 6 | * **Purpose:** Identify the **current database context** you are connected to. 7 | * **Metric:** 8 | * `DatabaseName` → The name of the database where the query is executed. 9 | * **Use case:** 10 | * Useful in scripts that are run across multiple databases or in automated reports to tag results by database. 11 | 12 | ### SQL Server Start Time 13 | ```sql 14 | SELECT 15 | sqlserver_start_time AS 'SQL SERVER START TIME' 16 | FROM sys.dm_os_sys_info; 17 | ``` 18 | 19 | * **Purpose:** Determine **how long the SQL Server instance has been running**. 20 | * **Metric:** 21 | * `SQL SERVER START TIME` → Date and time the SQL Server service started. 22 | * **Use case:** 23 | * Monitor **uptime** for maintenance windows, restarts, and troubleshooting. 24 | * Helps correlate performance issues with recent restarts or patches. 25 | 26 | ### Active User Sessions 27 | ```sql 28 | SELECT 29 | login_name AS LoginName, 30 | COUNT(session_id) AS SessionCount 31 | FROM sys.dm_exec_sessions 32 | WHERE is_user_process = 1 33 | GROUP BY login_name 34 | ORDER BY SessionCount DESC; 35 | ``` 36 | * **Purpose:** Identify the **number of active sessions per user login**. 37 | * **Metrics:** 38 | * `LoginName` → SQL Server login name of active user sessions. 39 | * `SessionCount` → Number of active sessions for each login. 40 | * **Use case:** 41 | * Detect **high activity users** or potential runaway processes. 42 | * Monitor **concurrent connections** and plan for resource usage. 43 | * Useful in performance dashboards or when investigating blocking/locking issues. -------------------------------------------------------------------------------- /index/check_index_fragmentation.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- QUERY: CHECK INDEX FRAGMENTATION 3 | -- PURPOSE: Identify internal and external fragmentation of indexes in user tables 4 | -- NOTES: 5 | -- - Uses sys.dm_db_index_physical_stats to retrieve fragmentation and page count info 6 | -- - Filters out HEAP tables (tables without clustered indexes) 7 | -- - Excludes system objects (names starting with "_") 8 | -- - Orders results by fragmentation descending to highlight the most fragmented indexes 9 | -- ============================================= 10 | 11 | SELECT 12 | OBJECT_NAME(B.object_id) AS TableName, -- Table name associated with the index 13 | B.name AS IndexName, -- Index name 14 | A.index_type_desc AS IndexType, -- Type of index (CLUSTERED, NONCLUSTERED, etc.) 15 | A.avg_fragmentation_in_percent, -- Average fragmentation percentage 16 | A.page_count -- Number of pages used by the index 17 | FROM sys.dm_db_index_physical_stats( 18 | DB_ID(), -- Current database 19 | NULL, -- All tables 20 | NULL, -- All indexes 21 | NULL, -- All partitions 22 | 'LIMITED' -- Limited scan mode (faster, less detailed) 23 | ) A 24 | JOIN sys.indexes B ON B.object_id = A.object_id 25 | AND B.index_id = A.index_id -- Join to get index metadata 26 | WHERE OBJECT_NAME(B.object_id) NOT LIKE '[_]%' -- Exclude system tables 27 | AND A.index_type_desc != 'HEAP' -- Exclude tables without clustered indexes 28 | ORDER BY A.avg_fragmentation_in_percent DESC; -- Show most fragmented indexes first -------------------------------------------------------------------------------- /sql_agent/grant_access.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Purpose: Grant SQL Server Agent roles to a specific user. 3 | -- Description: This script demonstrates how to add a user to different SQL Server Agent roles: 4 | -- 1. SQLAgentReaderRole - View jobs and their status (read-only). 5 | -- 2. SQLAgentOperatorRole - View, execute jobs, and read logs. 6 | -- 3. SQLAgentUserRole - Create and modify own jobs only. 7 | -- Note: Replace 'username' with the actual SQL Server login. 8 | ************************************************************************************************/ 9 | 10 | -- ============================================================ 11 | -- Grant SQL Server Agent Reader Role 12 | -- Purpose: Allows the user to view all SQL Server Agent jobs and their status. 13 | -- ============================================================ 14 | USE [msdb]; 15 | GO 16 | EXEC sp_addrolemember N'SQLAgentReaderRole', N'username'; 17 | GO 18 | 19 | -- ============================================================ 20 | -- Grant SQL Server Agent Operator Role 21 | -- Purpose: Allows the user to view and execute jobs, and read job history/logs. 22 | -- ============================================================ 23 | USE [msdb]; 24 | GO 25 | EXEC sp_addrolemember N'SQLAgentOperatorRole', N'username'; 26 | GO 27 | 28 | -- ============================================================ 29 | -- Grant SQL Server Agent User Role 30 | -- Purpose: Allows the user to create and modify their own jobs but not others' jobs. 31 | -- ============================================================ 32 | USE [msdb]; 33 | GO 34 | EXEC sp_addrolemember N'SQLAgentUserRole', N'username'; 35 | GO -------------------------------------------------------------------------------- /sql_access/who_is_trying_access.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Script: Failed Login Summary 3 | -- Description: Reads SQL Server error logs to find failed login attempts, 4 | -- counts occurrences and shows first/last times. 5 | -- Author: Lorenzo Uriel 6 | -- Date: 2025-08-20 7 | -- ============================================= 8 | 9 | -- Temporary table to store error log info 10 | DECLARE @LogFiles TABLE ( 11 | LogNumber INT, 12 | StartDate DATETIME, 13 | SizeInBytes INT 14 | ); 15 | 16 | -- Temporary table to store failed login entries 17 | DECLARE @Data TABLE ( 18 | [LogDate] DATETIME, 19 | [ProcessInfo] NVARCHAR(12), 20 | [Text] NVARCHAR(3999) 21 | ); 22 | 23 | -- Populate log files table (use 1 for SQL Server error logs) 24 | INSERT INTO @LogFiles 25 | EXEC sys.xp_enumerrorlogs 1; 26 | 27 | -- Variables for looping through logs 28 | DECLARE 29 | @Counter INT = 0, 30 | @Total INT = (SELECT COUNT(*) FROM @LogFiles); 31 | 32 | -- Loop through all error logs to get 'login failed' entries 33 | WHILE (@Counter < @Total) 34 | BEGIN 35 | INSERT INTO @Data 36 | EXEC sys.sp_readerrorlog @Counter, 1, 'login failed'; 37 | 38 | SET @Counter += 1; 39 | END; 40 | 41 | -- Summarize failed login attempts 42 | SELECT 43 | MIN(LogDate) AS FirstOccurrence, 44 | MAX(LogDate) AS LastOccurrence, 45 | -- Extract the message text before any '[' character (usually username info) 46 | SUBSTRING([Text], 1, IIF(CHARINDEX('[', [Text]) = 0, LEN([Text]), CHARINDEX('[', [Text]) - 1)) AS MessageText, 47 | COUNT(DISTINCT [Text]) AS Occurrences 48 | FROM 49 | @Data 50 | GROUP BY 51 | SUBSTRING([Text], 1, IIF(CHARINDEX('[', [Text]) = 0, LEN([Text]), CHARINDEX('[', [Text]) - 1)) 52 | ORDER BY 53 | Occurrences DESC; -------------------------------------------------------------------------------- /backup_recovery/recovery-model/change_simple_to_full.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Switching Database to FULL Recovery Model 3 | Purpose: Configure database for point-in-time recovery and prevent uncontrolled log growth. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- STEP 1: Switch to FULL Recovery Model 9 | -- This enables point-in-time recovery. However, it won’t be active until a full backup is taken. 10 | ALTER DATABASE [YourDatabaseName] SET RECOVERY FULL; 11 | GO 12 | 13 | -- STEP 2: Take a FULL BACKUP immediately 14 | -- This full backup is required to "activate" the FULL recovery model. 15 | -- Without this backup, transaction log backups will not work. 16 | BACKUP DATABASE [YourDatabaseName] 17 | TO DISK = 'C:\Backups\YourDatabaseName.bak' 18 | WITH INIT, NAME = 'Full Backup - YourDatabaseName'; 19 | GO 20 | 21 | -- STEP 3: Configure Transaction Log Backups 22 | -- Regular log backups are mandatory in FULL recovery model. 23 | -- They prevent uncontrolled transaction log (.ldf) growth and allow point-in-time restores. 24 | -- Recommended frequency: every 5–15 minutes (depending on your RPO). 25 | BACKUP LOG [YourDatabaseName] 26 | TO DISK = 'C:\Backups\YourDatabaseName_Log.trn' 27 | WITH INIT, NAME = 'Transaction Log Backup - YourDatabaseName'; 28 | GO 29 | 30 | /******************************************************************************************** 31 | NOTES: 32 | - Full Recovery Model requires BOTH full database backups and log backups. 33 | - To restore, you need: the last full backup + all subsequent log backups. 34 | - Store backups in a secure location (offsite/cloud if possible). 35 | ********************************************************************************************/ -------------------------------------------------------------------------------- /query_store/queries_that_recently_regressed_performance.sql: -------------------------------------------------------------------------------- 1 | -- Queries that recently regressed in performance 2 | -- The following query example returns all queries for which execution time doubled in the last 48 hours due to a plan choice change. This query compares all runtime stat intervals side by side: 3 | SELECT qt.query_sql_text, 4 | q.query_id, 5 | qt.query_text_id, 6 | rs1.runtime_stats_id AS runtime_stats_id_1, 7 | rsi1.start_time AS interval_1, 8 | p1.plan_id AS plan_1, 9 | rs1.avg_duration AS avg_duration_1, 10 | rs2.avg_duration AS avg_duration_2, 11 | p2.plan_id AS plan_2, 12 | rsi2.start_time AS interval_2, 13 | rs2.runtime_stats_id AS runtime_stats_id_2 14 | FROM sys.query_store_query_text AS qt 15 | INNER JOIN sys.query_store_query AS q 16 | ON qt.query_text_id = q.query_text_id 17 | INNER JOIN sys.query_store_plan AS p1 18 | ON q.query_id = p1.query_id 19 | INNER JOIN sys.query_store_runtime_stats AS rs1 20 | ON p1.plan_id = rs1.plan_id 21 | INNER JOIN sys.query_store_runtime_stats_interval AS rsi1 22 | ON rsi1.runtime_stats_interval_id = rs1.runtime_stats_interval_id 23 | INNER JOIN sys.query_store_plan AS p2 24 | ON q.query_id = p2.query_id 25 | INNER JOIN sys.query_store_runtime_stats AS rs2 26 | ON p2.plan_id = rs2.plan_id 27 | INNER JOIN sys.query_store_runtime_stats_interval AS rsi2 28 | ON rsi2.runtime_stats_interval_id = rs2.runtime_stats_interval_id 29 | WHERE rsi1.start_time > DATEADD(hour, -48, GETUTCDATE()) 30 | AND rsi2.start_time > rsi1.start_time 31 | AND p1.plan_id <> p2.plan_id 32 | AND rs2.avg_duration > 2 * rs1.avg_duration 33 | ORDER BY q.query_id, 34 | rsi1.start_time, 35 | rsi2.start_time; 36 | 37 | -- If you want to see all performance regressions (not only regressions related to plan choice change), remove condition AND p1.plan_id <> p2.plan_id from the previous query. -------------------------------------------------------------------------------- /index/ola_hallengren.sql: -------------------------------------------------------------------------------- 1 | ---------------------------------------------------------- 2 | -- SECTION: Index Optimization Using dbo.IndexOptimize 3 | ---------------------------------------------------------- 4 | -- Description: 5 | -- Executes the IndexOptimize stored procedure (from Ola Hallengren's Maintenance Solution) 6 | -- to reorganize or rebuild indexes and update statistics for the specified database. 7 | -- Helps improve performance by reducing fragmentation and keeping statistics up-to-date. 8 | 9 | USE [master]; 10 | GO 11 | 12 | EXECUTE [dbo].[IndexOptimize] 13 | @Databases = 'database', -- Target database for maintenance 14 | @FragmentationLow = NULL, -- No action on low fragmentation 15 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 16 | -- Actions for medium fragmentation indexes 17 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 18 | -- Actions for high fragmentation indexes 19 | @FragmentationLevel1 = 5, -- Threshold (%) for low fragmentation 20 | @FragmentationLevel2 = 30, -- Threshold (%) for medium fragmentation 21 | @PageCountLevel = 1, -- Minimum number of pages to consider an index 22 | @WaitAtLowPriorityMaxDuration = 1, -- Max duration (minutes) to wait when rebuilding at low priority 23 | @WaitAtLowPriorityAbortAfterWait = 'NONE', -- Behavior if wait exceeds max duration (NONE = do not abort) 24 | @UpdateStatistics = 'ALL', -- Update all statistics 25 | @OnlyModifiedStatistics = 'Y', -- Only update statistics if they have changed 26 | @LogToTable = 'Y'; -- Log results into the maintenance table -------------------------------------------------------------------------------- /backup_recovery/backup/backup_history.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: SQL Server Backup History for a Specific Database 3 | Purpose: Retrieve last backup dates, backup type, and physical backup files. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- STEP 1: Check backup history for a specific database 9 | -- Replace 'AdventureWorks2019' with your target database 10 | SELECT 11 | bs.database_name, -- Database name 12 | MAX(bs.backup_finish_date) AS last_backup_finish, -- Date & time of the most recent backup 13 | bmf.physical_device_name, -- Physical backup file location 14 | bs.type AS backup_type, -- Backup type code (D=Full, I=Differential, L=Log) 15 | CASE bs.type 16 | WHEN 'D' THEN 'Full' 17 | WHEN 'I' THEN 'Differential' 18 | WHEN 'L' THEN 'Transaction Log' 19 | ELSE bs.type 20 | END AS backup_type_desc -- Friendly description of backup type 21 | FROM msdb.dbo.backupset bs 22 | LEFT JOIN msdb.dbo.backupmediafamily bmf 23 | ON bs.media_set_id = bmf.media_set_id 24 | WHERE [database_name] = 'AdventureWorks2019' -- Target database filter 25 | GROUP BY bs.database_name, bmf.physical_device_name, bs.type 26 | ORDER BY bs.database_name, last_backup_finish DESC; -- Most recent backups first 27 | GO 28 | 29 | /******************************************************************************************** 30 | NOTES: 31 | - bs.type codes: 32 | D = Full backup 33 | I = Differential backup 34 | L = Transaction Log backup 35 | - This query shows last backup times per backup type and physical file. 36 | - Useful for verifying backup schedules and ensuring compliance with RPO/RTO. 37 | ********************************************************************************************/ -------------------------------------------------------------------------------- /ssrs/check_ssrs_permissions.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-20 4 | -- Purpose: Retrieve catalog items along with associated users and roles. 5 | -- Description: This query joins the ReportServer catalog, users, policy-user-role, and roles 6 | -- tables to produce a list of all catalog items with their type, path, and name, 7 | -- along with each user and their assigned role and role description. The item type 8 | -- numeric codes are mapped to descriptive labels. Results are ordered by item type, 9 | -- item name, and username. 10 | ************************************************************************************************/ 11 | SELECT 12 | CASE 13 | WHEN catalog.Type = 1 THEN 'Folder (1)' 14 | WHEN catalog.Type = 2 THEN 'Report (2)' 15 | WHEN catalog.Type = 3 THEN 'File (3)' 16 | WHEN catalog.Type = 4 THEN 'Linked Report (4)' 17 | WHEN catalog.Type = 5 THEN 'Data Source (5)' 18 | WHEN catalog.Type = 6 THEN 'Report Model (6)' 19 | WHEN catalog.Type = 7 THEN 'Report Part (7)' 20 | WHEN catalog.Type = 8 THEN 'Shared Data Set (8)' 21 | WHEN catalog.Type = 9 THEN 'Report Part (9)' 22 | WHEN catalog.Type = 11 THEN 'KPI (11)' 23 | WHEN catalog.Type = 12 THEN 'Mobile Report Folder (12)' 24 | WHEN catalog.Type = 13 THEN 'PowerBI Desktop Document (13)' 25 | END AS Item_Type, 26 | catalog.Path, 27 | catalog.Name, 28 | users.UserName, 29 | roles.RoleName, 30 | roles.Description 31 | FROM ReportServer.dbo.users 32 | INNER JOIN ReportServer.dbo.policyuserrole 33 | ON users.userid = policyuserrole.userid 34 | INNER JOIN ReportServer.dbo.roles 35 | ON roles.roleid = policyuserrole.roleid 36 | INNER JOIN ReportServer.dbo.catalog 37 | ON catalog.policyid = policyuserrole.policyid 38 | ORDER BY catalog.Type, catalog.Name, users.UserName; -------------------------------------------------------------------------------- /server/server_roles.sql: -------------------------------------------------------------------------------- 1 | /* 2 | ================================================================================ 3 | SQL SERVER FIXED SERVER ROLES DOCUMENTATION 4 | ================================================================================ 5 | Author: Lorenzo Uriel 6 | Date: 2025-08-20 7 | Purpose: List all fixed server roles and their permissions for reference 8 | ================================================================================ 9 | */ 10 | 11 | -- 1. sysadmin 12 | -- Members of this role have **unrestricted access** to all server and database functions. 13 | -- They can perform **any activity** in the SQL Server instance. 14 | EXEC sp_srvrolepermission 'sysadmin'; 15 | 16 | -- 2. serveradmin 17 | -- Members can configure server-wide settings and **shutdown the SQL Server instance**. 18 | -- Permissions include changing configuration options and setting server-wide parameters. 19 | EXEC sp_srvrolepermission 'serveradmin'; 20 | 21 | -- 3. setupadmin 22 | -- Members can manage linked servers and extended stored procedures. 23 | -- Typically used for **installation and setup tasks**. 24 | EXEC sp_srvrolepermission 'setupadmin'; 25 | 26 | -- 4. securityadmin 27 | -- Members manage server logins, their properties, and **permissions**. 28 | -- They can also reset passwords for SQL logins. 29 | EXEC sp_srvrolepermission 'securityadmin'; 30 | 31 | -- 5. processadmin 32 | -- Members can **terminate SQL Server processes** using KILL. 33 | EXEC sp_srvrolepermission 'processadmin'; 34 | 35 | -- 6. dbcreator 36 | -- Members can **create, alter, drop, and restore databases**. 37 | -- They cannot manage logins unless also part of securityadmin. 38 | EXEC sp_srvrolepermission 'dbcreator'; 39 | 40 | -- 7. diskadmin 41 | -- Members can manage **disk files** and allocate disk space for the SQL Server instance. 42 | EXEC sp_srvrolepermission 'diskadmin'; 43 | 44 | -- 8. bulkadmin 45 | -- Members can execute **bulk insert operations**. 46 | -- They can use BULK INSERT to import data from files into tables. 47 | EXEC sp_srvrolepermission 'bulkadmin'; -------------------------------------------------------------------------------- /sql_agent/sql_agent_sessions.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Monitor and optionally terminate SQL Server Agent sessions. 5 | -- Description: This script retrieves information about currently running sessions initiated 6 | -- by SQL Server Agent jobs. It can help identify long-running or stuck jobs. 7 | -- Note: Use the KILL command with caution; terminating a session may cause rollback 8 | -- and affect ongoing processes. 9 | ************************************************************************************************/ 10 | 11 | -- ============================================================ 12 | -- Retrieve running SQL Server Agent sessions 13 | -- ============================================================ 14 | SELECT 15 | r.session_id, -- ID of the request/session 16 | r.start_time, -- When the request started 17 | r.status, -- Current status (running, suspended, etc.) 18 | r.command, -- Type of command being executed 19 | t.text AS sql_text, -- SQL text of the command 20 | s.program_name, -- Program that initiated the session (SQLAgent) 21 | s.login_name, -- Login name of the user executing the session 22 | s.host_name -- Host machine of the session 23 | FROM sys.dm_exec_requests r 24 | JOIN sys.dm_exec_sessions s 25 | ON r.session_id = s.session_id 26 | CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) t 27 | WHERE s.program_name LIKE 'SQLAgent%'; -- Filters sessions started by SQL Server Agent 28 | 29 | -- ============================================================ 30 | -- Optionally terminate a session 31 | -- ============================================================ 32 | -- Replace with the actual session_id from the query above. 33 | -- WARNING: KILL will terminate the session, which may cause rollback and impact running jobs. 34 | -- Example: KILL 52; 35 | KILL ; 36 | -------------------------------------------------------------------------------- /ssis/create_proc_trigger_job.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-07-10 4 | -- Purpose: Trigger the SQL Server Agent job named 'job_name'. 5 | -- Description: This procedure checks if the specified job exists and is enabled, 6 | -- stops it if already running (errors ignored), and then starts the job. 7 | -- All errors during stop/start are safely suppressed. 8 | ************************************************************************************************/ 9 | 10 | CREATE PROCEDURE [dbo].[job_name] 11 | AS 12 | BEGIN 13 | -- =============================================== 14 | -- Step 1: Check if the job exists and is enabled 15 | -- =============================================== 16 | IF EXISTS ( 17 | SELECT 1 18 | FROM msdb.dbo.sysjobs sj 19 | JOIN msdb.dbo.sysjobschedules sjs ON sj.job_id = sjs.job_id 20 | JOIN msdb.dbo.sysjobs_view sjv ON sj.job_id = sjv.job_id 21 | WHERE sj.name = 'job_name' 22 | AND sjv.enabled = 1 -- Only consider enabled jobs 23 | ) 24 | BEGIN 25 | -- =============================================== 26 | -- Step 2: Stop the job if currently running 27 | -- Errors are suppressed to avoid interruption 28 | -- =============================================== 29 | BEGIN TRY 30 | EXEC msdb.dbo.sp_stop_job @job_name = 'job_name'; 31 | END TRY 32 | BEGIN CATCH 33 | -- Ignore any errors during stop attempt 34 | END CATCH 35 | END 36 | 37 | -- =============================================== 38 | -- Step 3: Start the job 39 | -- Errors are suppressed to avoid interruption 40 | -- =============================================== 41 | BEGIN TRY 42 | EXEC msdb.dbo.sp_start_job @job_name = 'job_name'; 43 | END TRY 44 | BEGIN CATCH 45 | -- Ignore any errors during start attempt 46 | END CATCH 47 | END 48 | -------------------------------------------------------------------------------- /ssrs/info_reports.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-20 4 | -- Purpose: Retrieve catalog items with their details and parent information. 5 | -- Description: This query selects all items from the catalog, including reports, folders, 6 | -- files, linked reports, data sources, report models, KPIs, shared datasets, 7 | -- mobile reports, and PowerBI documents. It joins the catalog to itself to 8 | -- include the parent item name. The item type is mapped from its numeric code 9 | -- to a descriptive label. Additional details include description, hidden flag, 10 | -- creation and modification dates, and content size. 11 | ************************************************************************************************/ 12 | SELECT 13 | CHILD_ITEM.Path AS Item_Path, 14 | CHILD_ITEM.Name AS Item_Name, 15 | CASE 16 | WHEN CHILD_ITEM.Type = 1 THEN 'Folder (1)' 17 | WHEN CHILD_ITEM.Type = 2 THEN 'Report (2)' 18 | WHEN CHILD_ITEM.Type = 3 THEN 'File (3)' 19 | WHEN CHILD_ITEM.Type = 4 THEN 'Linked Report (4)' 20 | WHEN CHILD_ITEM.Type = 5 THEN 'Data Source (5)' 21 | WHEN CHILD_ITEM.Type = 6 THEN 'Report Model (6)' 22 | WHEN CHILD_ITEM.Type = 7 THEN 'Report Part (7)' 23 | WHEN CHILD_ITEM.Type = 8 THEN 'Shared Data Set (8)' 24 | WHEN CHILD_ITEM.Type = 9 THEN 'Report Part (9)' 25 | WHEN CHILD_ITEM.Type = 11 THEN 'KPI (11)' 26 | WHEN CHILD_ITEM.Type = 12 THEN 'Mobile Report Folder (12)' 27 | WHEN CHILD_ITEM.Type = 13 THEN 'PowerBI Desktop Document (13)' 28 | END AS Item_Type, 29 | PARENT_ITEM.Name AS Parent_Item_Name, 30 | CHILD_ITEM.Description AS Item_Description, 31 | CHILD_ITEM.Hidden AS Is_Hidden, 32 | CHILD_ITEM.CreationDate, 33 | CHILD_ITEM.ModifiedDate, 34 | CHILD_ITEM.ContentSize 35 | FROM dbo.Catalog CHILD_ITEM 36 | LEFT JOIN dbo.Catalog PARENT_ITEM 37 | ON PARENT_ITEM.ItemID = CHILD_ITEM.ParentID; 38 | -------------------------------------------------------------------------------- /free_space/shrink.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Shrink database and log files in SQL Server 5 | -- Description: Demonstrates database-level and file-level shrink operations 6 | -- Notes: Shrinking files frequently is not recommended as it can cause fragmentation. 7 | -- Use carefully, preferably during low activity periods. 8 | ************************************************************************************************/ 9 | 10 | -- ============================================================ 11 | -- 1. Shrink the entire database 12 | -- ============================================================ 13 | /* 14 | Shrinks the data and log files of the specified database. 15 | The second parameter (10) is the target percentage of free space to leave after the shrink. 16 | */ 17 | DBCC SHRINKDATABASE (my_database, 10); 18 | GO 19 | 20 | -- ============================================================ 21 | -- 2. Check database files and their physical paths 22 | -- ============================================================ 23 | /* 24 | Lists logical and physical names of files for a database, including type (ROWS/LOG). 25 | Useful to identify which files to shrink or monitor. 26 | */ 27 | SELECT 28 | name AS [Logical_Name], -- Logical file name used in SQL 29 | physical_name AS [Physical_Name],-- OS-level path of the file 30 | type_desc AS [File_Type] -- ROWS = data file, LOG = transaction log 31 | FROM sys.master_files 32 | WHERE database_id = DB_ID(N'SSISDB'); -- Replace with your database 33 | GO 34 | 35 | -- ============================================================ 36 | -- 3. Shrink a specific log file 37 | -- ============================================================ 38 | /* 39 | Shrinks a specific file (usually a log file). 40 | The second parameter (10) is the target size in MB or target percent depending on SQL Server version. 41 | Make sure to backup logs before shrinking to avoid data loss. 42 | */ 43 | DBCC SHRINKFILE (log, 10); 44 | GO 45 | -------------------------------------------------------------------------------- /helps/check_cascade.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PURPOSE: Retrieve detailed information about foreign key constraints 3 | -- including parent and referenced tables and columns, 4 | -- as well as update and delete rules. 5 | -- 6 | -- NOTES: 7 | -- - Uses sys.foreign_keys for FK definitions 8 | -- - Uses sys.foreign_key_columns to join columns correctly 9 | -- - Uses sys.tables and sys.columns for table/column names 10 | -- - Filtered for a specific parent table (replace 'YourTableName') 11 | -- ============================================= 12 | 13 | SELECT 14 | fk.name AS Nome_Constraint, -- Name of the foreign key constraint 15 | tp.name AS Tabela_Pai, -- Parent table (table with the foreign key) 16 | rf.name AS Coluna_Pai, -- Column in the parent table 17 | tr.name AS Tabela_Filha, -- Referenced table (child table) 18 | ff.name AS Coluna_Filha, -- Column in the referenced table 19 | fk.update_referential_action_desc AS Regra_Atualizacao, -- Action on update (CASCADE, NO ACTION, etc.) 20 | fk.delete_referential_action_desc AS Regra_Exclusao -- Action on delete (CASCADE, NO ACTION, etc.) 21 | FROM sys.foreign_keys AS fk 22 | INNER JOIN sys.foreign_key_columns AS fkc 23 | ON fk.object_id = fkc.constraint_object_id -- Join to get the FK column mapping 24 | INNER JOIN sys.tables AS tp 25 | ON fkc.parent_object_id = tp.object_id -- Join to get parent table name 26 | INNER JOIN sys.columns AS rf 27 | ON fkc.parent_object_id = rf.object_id -- Join to get parent column name 28 | AND fkc.parent_column_id = rf.column_id 29 | INNER JOIN sys.tables AS tr 30 | ON fkc.referenced_object_id = tr.object_id -- Join to get referenced table name 31 | INNER JOIN sys.columns AS ff 32 | ON fkc.referenced_object_id = ff.object_id -- Join to get referenced column name 33 | AND fkc.referenced_column_id = ff.column_id 34 | WHERE tp.name = 'YourTableName'; -- Filter by parent table (replace with actual table name) -------------------------------------------------------------------------------- /helps/spatial_data/polygons_pt2.sql: -------------------------------------------------------------------------------- 1 | -- ===================================================== 2 | -- Step 1: Create a point in geography type (WGS84 coordinates) 3 | -- ===================================================== 4 | DECLARE @scb0 GEOGRAPHY = 5 | GEOGRAPHY::STGeomFromText('POINT(-90.851758 44.292658)', 4326); 6 | 7 | -- ===================================================== 8 | -- Step 2: Create a buffer around the point (50 meters) 9 | -- ===================================================== 10 | SET @scb0 = @scb0.STBuffer(50); 11 | -- STBuffer(distance): creates a circular polygon around a point 12 | -- distance is in meters for geography type 13 | 14 | -- ===================================================== 15 | -- Step 3: Convert the buffered geography to geometry type 16 | -- ===================================================== 17 | DECLARE @scb1 GEOMETRY = GEOMETRY::STGeomFromText(@scb0.ToString(), 4326); 18 | 19 | -- ===================================================== 20 | -- Step 4: Create the Minimum Bounding Rectangle (MBR) 21 | -- ===================================================== 22 | SET @scb1 = @scb1.STEnvelope(); 23 | -- STEnvelope() returns the axis-aligned rectangle covering the shape 24 | 25 | -- ===================================================== 26 | -- Step 5: Convert the MBR back to geography type 27 | -- ===================================================== 28 | SET @scb0 = GEOGRAPHY::STGeomFromText(@scb1.ToString(), 4326); 29 | 30 | -- ===================================================== 31 | -- Step 6: View the result 32 | -- ===================================================== 33 | SELECT @scb0 AS BufferedAndEnvelopedPoint; 34 | 35 | -- ===================================================== 36 | -- One-liner alternative using BufferWithTolerance 37 | -- ===================================================== 38 | -- Point(latitude=45, longitude=-90), buffer 11,000 meters, tolerance 1,000 meters 39 | SELECT GEOGRAPHY::Point(45, -90, 4326) 40 | .BufferWithTolerance(11000, 1000, 0) AS BufferedPoint; 41 | -- BufferWithTolerance(distance, tolerance, maxPoints) 42 | -- allows you to control the number of vertices approximating the circle -------------------------------------------------------------------------------- /query_store/queries_w_multiple_plans.sql: -------------------------------------------------------------------------------- 1 | -- Queries with multiple plans 2 | -- Queries with more than one plan are especially interesting, because they can be candidates for a regression in performance due to a change in plan choice. 3 | 4 | -- The following query identifies the queries with the highest number of plans within the last hour: 5 | SELECT q.query_id, 6 | object_name(object_id) AS ContainingObject, 7 | COUNT(*) AS QueryPlanCount, 8 | STRING_AGG(p.plan_id, ',') plan_ids, 9 | qt.query_sql_text 10 | FROM sys.query_store_query_text AS qt 11 | INNER JOIN sys.query_store_query AS q 12 | ON qt.query_text_id = q.query_text_id 13 | INNER JOIN sys.query_store_plan AS p 14 | ON p.query_id = q.query_id 15 | INNER JOIN sys.query_store_runtime_stats AS rs 16 | ON p.plan_id = rs.plan_id 17 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 18 | GROUP BY OBJECT_NAME(object_id), 19 | q.query_id, 20 | qt.query_sql_text 21 | HAVING COUNT(DISTINCT p.plan_id) > 1 22 | ORDER BY QueryPlanCount DESC; 23 | 24 | 25 | -- The following query identifies these queries along with all plans within the last hour: 26 | WITH Query_MultPlans 27 | AS ( 28 | SELECT COUNT(*) AS QueryPlanCount, 29 | q.query_id 30 | FROM sys.query_store_query_text AS qt 31 | INNER JOIN sys.query_store_query AS q 32 | ON qt.query_text_id = q.query_text_id 33 | INNER JOIN sys.query_store_plan AS p 34 | ON p.query_id = q.query_id 35 | GROUP BY q.query_id 36 | HAVING COUNT(DISTINCT plan_id) > 1 37 | ) 38 | SELECT q.query_id, 39 | object_name(object_id) AS ContainingObject, 40 | query_sql_text, 41 | p.plan_id, 42 | p.query_plan AS plan_xml, 43 | p.last_compile_start_time, 44 | p.last_execution_time 45 | FROM Query_MultPlans AS qm 46 | INNER JOIN sys.query_store_query AS q 47 | ON qm.query_id = q.query_id 48 | INNER JOIN sys.query_store_plan AS p 49 | ON q.query_id = p.query_id 50 | INNER JOIN sys.query_store_query_text qt 51 | ON qt.query_text_id = q.query_text_id 52 | INNER JOIN sys.query_store_runtime_stats AS rs 53 | ON p.plan_id = rs.plan_id 54 | WHERE rs.last_execution_time > DATEADD(HOUR, -1, GETUTCDATE()) 55 | ORDER BY q.query_id, 56 | p.plan_id; -------------------------------------------------------------------------------- /backup_recovery/restore/restore_start_time.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: SQL Server Restore History and Backup Performance Check 3 | Purpose: Review restore history, backup durations, sizes, and check error logs. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- STEP 1: List restore history and related backup details 9 | -- Includes: database restored, restore date, backup start & finish, duration, size 10 | SELECT 11 | r.destination_database_name, -- Database that was restored 12 | r.restore_date AS restore_completed_at, -- When restore completed 13 | b.backup_start_date, -- Backup start time 14 | b.backup_finish_date, -- Backup finish time 15 | DATEDIFF(SECOND, b.backup_start_date, b.backup_finish_date) AS backup_duration_seconds, -- Duration in seconds 16 | b.backup_size / 1024 / 1024 AS backup_size_MB -- Backup size in MB 17 | FROM msdb.dbo.restorehistory r 18 | JOIN msdb.dbo.backupset b ON r.backup_set_id = b.backup_set_id 19 | ORDER BY r.restore_date DESC; -- Most recent restores first 20 | GO 21 | 22 | -- STEP 2: Check SQL Server error log for Restore operations 23 | -- This helps review restore performance or errors (MB/sec, issues during restore) 24 | EXEC sp_readerrorlog 0, 1, N'Restore'; 25 | GO 26 | 27 | -- STEP 3: Search SQL Server error log for a specific database 28 | EXEC xp_readerrorlog 0, 1, N'AdventureWorks2019'; 29 | GO 30 | 31 | /******************************************************************************************** 32 | NOTES: 33 | - Use these queries to monitor restore history, backup durations, and performance. 34 | - DATEDIFF gives duration in seconds; backup_size is converted to MB. 35 | - sp_readerrorlog and xp_readerrorlog allow filtering for specific keywords like 'Restore' or a database name. 36 | - Always run with appropriate permissions (sysadmin or db_backupoperator recommended). 37 | ********************************************************************************************/ -------------------------------------------------------------------------------- /audit/audit_users_table.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Audit changes to the 'users' table for compliance and security monitoring 5 | -- Description: Creates a server audit object, a database audit specification for tracking 6 | -- INSERT, UPDATE, DELETE operations on dbo.users, and enables them. 7 | -- Notes: Ensure the audit folder exists and SQL Server service account has write permission. 8 | ************************************************************************************************/ 9 | 10 | -- ============================================================ 11 | -- 1. CREATE SERVER AUDIT OBJECT 12 | -- ============================================================ 13 | /* 14 | Purpose: 15 | - Central audit object to collect database audit events. 16 | Parameters: 17 | - FILEPATH: Folder where audit logs are saved (must exist & be write-protected) 18 | - MAXSIZE: Maximum file size before rollover 19 | - ROLLOVER: ON enables automatic file rollover when max size is reached 20 | */ 21 | CREATE SERVER AUDIT Audit_Users 22 | TO FILE ( 23 | FILEPATH = 'C:\AuditLogs\', 24 | MAXSIZE = 10 MB, 25 | ROLLOVER = ON 26 | ); 27 | 28 | -- Enable the audit 29 | ALTER SERVER AUDIT Audit_Users 30 | WITH (STATE = ON); 31 | 32 | -- ============================================================ 33 | -- 2. CREATE DATABASE AUDIT SPECIFICATION 34 | -- ============================================================ 35 | /* 36 | Purpose: 37 | - Tracks INSERT, UPDATE, DELETE operations on dbo.users table 38 | Database Context: 39 | - Switch to the target database 40 | Parameters: 41 | - ADD (...): Specifies actions and objects to audit 42 | - BY PUBLIC: Applies to all users 43 | - STATE = ON: Activates the specification immediately 44 | */ 45 | USE [database]; 46 | GO 47 | 48 | CREATE DATABASE AUDIT SPECIFICATION Audit_Users_Changes 49 | FOR SERVER AUDIT Audit_Users 50 | ADD (INSERT, UPDATE, DELETE ON [dbo].[users] BY PUBLIC); 51 | GO 52 | 53 | -- Enable the database audit specification 54 | ALTER DATABASE AUDIT SPECIFICATION Audit_Users_Changes 55 | WITH (STATE = ON); 56 | GO -------------------------------------------------------------------------------- /triggers/trg_prevent_delete_where.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Trigger: trg_prevent_delete_where 3 | -- Table: Employees 4 | -- Event: INSTEAD OF DELETE 5 | -- Purpose: Prevent deletion of all records in the Employees table 6 | -- without a WHERE clause. 7 | -- Author: Lorenzo Uriel 8 | -- Date: 2025-08-19 9 | -- ============================================= 10 | 11 | CREATE TRIGGER trg_prevent_delete_where 12 | ON [Employees] 13 | INSTEAD OF DELETE 14 | AS 15 | BEGIN 16 | -- ===================================================== 17 | -- 1. Check if the deletion attempt is for all records 18 | -- ===================================================== 19 | -- The 'deleted' pseudo-table contains all rows that would be deleted 20 | -- If the count of 'deleted' rows equals the total rows in Employees, 21 | -- it means someone is trying to delete everything. 22 | IF (SELECT COUNT(*) FROM deleted) = (SELECT COUNT(*) FROM [Employees]) 23 | BEGIN 24 | -- ===================================================== 25 | -- 2. Raise an error using RAISERROR 26 | -- ===================================================== 27 | -- RAISERROR(message, severity, state) 28 | -- message: text displayed to the user 29 | -- severity: 16 = user-defined error 30 | -- state: arbitrary number to identify the error 31 | RAISERROR('You cannot delete all records! Please add a WHERE clause...', 16, 1); 32 | -- Stop trigger execution 33 | RETURN; 34 | END 35 | 36 | -- ===================================================== 37 | -- 3. If not a full delete, allow deletion of selected records 38 | -- ===================================================== 39 | DELETE E 40 | FROM [Employees] E 41 | INNER JOIN deleted D ON E.id = D.id; 42 | END 43 | 44 | -- Insert test data 45 | INSERT INTO Employees (id, name) VALUES (1, 'Alice'), (2, 'Bob'), (3, 'Charlie'); 46 | 47 | -- Attempt to delete all rows (will raise RAISERROR) 48 | DELETE FROM Employees; 49 | -- Expected output: 50 | -- "You cannot delete all records! Please add a WHERE clause..." 51 | 52 | -- Attempt to delete a specific row (will succeed) 53 | DELETE FROM Employees WHERE id = 2; -------------------------------------------------------------------------------- /database_size/database_storage_alert.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- Script: Database Storage Alert 3 | -- Description: Checks total, used, max size and raises warnings/alerts 4 | -- Author: Lorenzo Uriel 5 | -- Date: 2025-08-19 6 | -- ============================================= 7 | DECLARE @db_name SYSNAME = DB_NAME(); 8 | DECLARE @total_size_mb DECIMAL(18,2); 9 | DECLARE @used_size_mb DECIMAL(18,2); 10 | DECLARE @max_size_mb DECIMAL(18,2); 11 | DECLARE @usage_pct DECIMAL(5,2); 12 | DECLARE @usage_pct_str VARCHAR(10); 13 | 14 | -- Get total and used sizes for current DB 15 | SELECT 16 | @total_size_mb = SUM(size) * 8.0 / 1024, 17 | @used_size_mb = SUM(CASE WHEN type_desc = 'ROWS' THEN size END) * 8.0 / 1024 18 | FROM sys.master_files 19 | WHERE database_id = DB_ID(); 20 | 21 | -- Get max size (customize unlimited values) 22 | SELECT @max_size_mb = 23 | MAX(CASE 24 | WHEN max_size = -1 THEN 2000 -- Example for unlimited files 25 | ELSE max_size * 8.0 / 1024 26 | END) 27 | FROM sys.database_files; 28 | 29 | -- Calculate usage percentage 30 | SET @usage_pct = (@total_size_mb / @max_size_mb) * 100; 31 | SET @usage_pct_str = CAST(@usage_pct AS VARCHAR(10)); 32 | 33 | -- Print status 34 | PRINT 'Database: ' + @db_name; 35 | PRINT 'Used: ' + CAST(@used_size_mb AS VARCHAR) + ' MB'; 36 | PRINT 'Total: ' + CAST(@total_size_mb AS VARCHAR) + ' MB'; 37 | PRINT 'Max: ' + CAST(@max_size_mb AS VARCHAR) + ' MB'; 38 | PRINT 'Usage: ' + @usage_pct_str + '%'; 39 | 40 | -- Raise alerts 41 | IF @usage_pct >= 80 AND @usage_pct < 90 42 | RAISERROR('Warning: Database "%s" is %s%% full.', 16, 1, @db_name, @usage_pct_str); 43 | 44 | IF @usage_pct >= 90 AND @usage_pct < 95 45 | RAISERROR('High Warning: Database "%s" is %s%% full.', 16, 1, @db_name, @usage_pct_str); 46 | 47 | IF @usage_pct >= 95 AND @usage_pct < 99 48 | RAISERROR('Critical: Database "%s" is %s%% full. Consider cleanup or expansion.', 16, 1, @db_name, @usage_pct_str); 49 | 50 | IF @usage_pct >= 99 51 | RAISERROR('EMERGENCY: Database "%s" is %s%% full. Immediate action required!', 16, 1, @db_name, @usage_pct_str); 52 | 53 | -- Example custom max size 54 | -- SELECT @max_size_mb = 1800; -- MB -------------------------------------------------------------------------------- /backup_recovery/backup/search_backups_folder.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Determine Latest Differential Backup for FTP Transfer 3 | Purpose: Retrieve the most recent differential backup file and construct the remote FTP path. 4 | Author: Lorenzo Uriel 5 | Date: 2025-08-20 6 | ********************************************************************************************/ 7 | 8 | -- STEP 0: Declare variables 9 | DECLARE 10 | @FTPPath varchar(1024) = '/backup/DIFF/', -- Remote FTP folder path where backups will be stored 11 | @BakName varchar(1024), -- Backup file name (to be extracted) 12 | @RemoteFolderPath varchar(1024); -- Full remote path including file name 13 | 14 | -- STEP 1: Get the most recent differential backup file 15 | -- 'I' type indicates a differential backup (D = Full, L = Log) 16 | SELECT TOP 1 17 | --physical_device_name, -- original full path (commented out) 18 | @BakName = SUBSTRING( 19 | physical_device_name, 20 | LEN(physical_device_name) - CHARINDEX('\', REVERSE(physical_device_name)) + 2, 21 | LEN(physical_device_name) 22 | ) 23 | FROM msdb.dbo.backupmediafamily 24 | WHERE media_set_id = ( 25 | SELECT TOP 1 media_set_id 26 | FROM msdb.dbo.backupset 27 | WHERE type = 'I' -- Differential backup 28 | ORDER BY backup_finish_date DESC 29 | ); 30 | 31 | -- STEP 2: Construct the full remote FTP path 32 | SET @RemoteFolderPath = @FTPPath + @BakName; 33 | 34 | -- STEP 3: Display the results 35 | SELECT 36 | @RemoteFolderPath AS [RemoteFolderPath], -- Full remote path including file name 37 | @BakName AS [BackupName]; -- Backup file name only 38 | GO 39 | 40 | /******************************************************************************************** 41 | NOTES: 42 | - This script retrieves the latest differential backup file for a database. 43 | - The backup type 'I' corresponds to differential backups. 44 | - SUBSTRING with REVERSE extracts the file name from the full local path. 45 | - @RemoteFolderPath can be used in FTP scripts or automation tasks for uploading backups. 46 | ********************************************************************************************/ -------------------------------------------------------------------------------- /sql_agent/monitor_currently_agent_jobs.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Monitor currently running SQL Server Agent jobs. 5 | -- Description: These queries help identify jobs that are executing, their start times, 6 | -- and current execution status. 7 | ************************************************************************************************/ 8 | 9 | -- ============================================================ 10 | -- Step 1: List all jobs that are currently running (no stop time) 11 | -- ============================================================ 12 | SELECT 13 | j.name AS job_name, -- Name of the SQL Server Agent job 14 | ja.start_execution_date -- When the job started execution 15 | FROM msdb.dbo.sysjobactivity ja 16 | JOIN msdb.dbo.sysjobs j 17 | ON ja.job_id = j.job_id 18 | WHERE ja.stop_execution_date IS NULL -- Job has not yet stopped 19 | AND ja.start_execution_date IS NOT NULL; -- Job has started 20 | 21 | -- ============================================================ 22 | -- Step 2: Use built-in stored procedure to get executing jobs 23 | -- ============================================================ 24 | -- Execution status: 1 = Executing 25 | EXEC msdb.dbo.sp_help_job 26 | @execution_status = 1; 27 | 28 | -- ============================================================ 29 | -- Step 3: Detailed check for a specific job by Job ID 30 | -- ============================================================ 31 | USE msdb; 32 | GO 33 | 34 | SELECT 35 | j.job_id, -- Job identifier 36 | j.name AS job_name, -- Name of the job 37 | ja.start_execution_date, -- Start date/time 38 | ja.stop_execution_date, -- Stop date/time 39 | CASE 40 | WHEN ja.start_execution_date IS NOT NULL AND ja.stop_execution_date IS NULL THEN 'Running' 41 | ELSE 'Not Running' 42 | END AS job_status -- Current status derived from start/stop dates 43 | FROM msdb.dbo.sysjobactivity ja 44 | INNER JOIN msdb.dbo.sysjobs j 45 | ON ja.job_id = j.job_id 46 | WHERE j.job_id = '6B56A2A5-295C-4D63-A3CE-47DA802560BE'; -- Replace with desired Job ID 47 | -- ============================================================ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

3 | 4 | 5 | 6 | 7 |

8 |
9 | 10 | # Coding SQL 11 | This repository showcases my SQL learning journey so far and aims to help developers improve their skills. 12 | 13 | > ***Everything that comes from the community should return to it.*** - idk 14 | 15 | If you want to contribute, **feel free to fork this repository**. 16 | 17 | ### Repository Structure 18 | | Folder | Purpose | 19 | |--------|---------| 20 | | [audit](./audit/) | Scripts and queries for auditing SQL Server activities | 21 | | [backup_recovery](./backup_recovery/) | Backup and recovery scripts for databases | 22 | | [custom_alert_emails](./custom_alert_emails/) | Custom alert email implementations | 23 | | [database_size](./database_size/) | Queries to monitor database size | 24 | | [free_space](./free_space/) | Queries to monitor disk and file free space | 25 | | [functions](./functions/) | Custom SQL functions | 26 | | [helps](./helps/) | Helper scripts and templates | 27 | | [index](./index/) | Index management scripts | 28 | | [lock](./lock/) | Scripts for monitoring and resolving locks | 29 | | [server](./server/) | Server-related configuration scripts | 30 | | [sql_access](./sql_access/) | Scripts for SQL Server user and permission management | 31 | | [sql_agent](./sql_agent/) | SQL Server Agent job scripts | 32 | | [sql_profiler](./sql_profiler/) | SQL Profiler scripts and traces | 33 | | [ssis](./ssis/) | SSIS package examples | 34 | | [ssrs](./ssrs/) | SSRS report scripts | 35 | | [triggers](./triggers/) | Database triggers | 36 | 37 | ### Clone or Fork 38 | ```bash 39 | git clone https://github.com/lorenzouriel/coding-sql.git 40 | cd coding-sql 41 | ``` 42 | 43 | ### Give a Star 44 | - If you find this repository useful, please give it a star ⭐. 45 | 46 | If you have questions or want to collaborate, reach me at: 47 | 48 | * 📧 [Email](mailto:lorenzouriel394@gmail.com) 49 | * 🌐 [Social Media & Blog](https://lorenzouriel.com/) -------------------------------------------------------------------------------- /helps/spatial_data/polygons_pt1.sql: -------------------------------------------------------------------------------- 1 | -- ===================================================== 2 | -- Example 1: Create a LINESTRING geometry (open path) 3 | -- ===================================================== 4 | DECLARE @Sq GEOMETRY = 5 | GEOMETRY::STGeomFromText('LINESTRING (10 10, 10 100, 100 100, 100 10, 10 10)', 0); 6 | 7 | SELECT @Sq AS LineStringExample; 8 | -- LINESTRING: sequence of points connected by straight lines 9 | 10 | -- ===================================================== 11 | -- Example 2: Create a CIRCULARSTRING geometry (curved line) 12 | -- ===================================================== 13 | DECLARE @circle GEOMETRY = 14 | GEOMETRY::Parse('CIRCULARSTRING(3 2, 2 3, 1 2, 2 1, 3 2)'); 15 | 16 | SELECT @circle AS CircularStringExample; 17 | -- CIRCULARSTRING: sequence of points forming smooth circular arcs 18 | 19 | -- ===================================================== 20 | -- Example 3: Create a TRIANGLE polygon 21 | -- ===================================================== 22 | DECLARE @Tri GEOMETRY = 23 | GEOMETRY::STGeomFromText('POLYGON((100 100, 200 300, 300 100, 100 100))', 0); 24 | 25 | SELECT @Tri AS TrianglePolygon; 26 | -- POLYGON: closed shape defined by outer ring of points 27 | 28 | -- ===================================================== 29 | -- Example 4: Create a filled SQUARE polygon 30 | -- ===================================================== 31 | DECLARE @Sqfilled GEOMETRY = 32 | GEOMETRY::STGeomFromText('POLYGON((10 10, 10 100, 100 100, 100 10, 10 10))', 0); 33 | 34 | SELECT @Sqfilled AS SquarePolygon; 35 | -- POLYGON: closed shape representing an area 36 | 37 | -- ===================================================== 38 | -- Example 5: Persisted geography point column definition 39 | -- ===================================================== 40 | -- [position] AS ([geography]::Point([latitude],[longitude],4326)) PERSISTED NOT NULL 41 | -- Creates a persisted computed column storing a single point in WGS84 (SRID 4326) 42 | 43 | -- ===================================================== 44 | -- Example 6: Combine multiple geometries using UNION ALL 45 | -- ===================================================== 46 | DECLARE @Sq2 GEOMETRY = 47 | GEOMETRY::STGeomFromText('POLYGON((15 15, 15 250, 250 250, 250 15, 15 15))', 0), 48 | @Tri2 GEOMETRY = 49 | GEOMETRY::STGeomFromText('POLYGON((100 100, 200 300, 300 100, 100 100))', 0); 50 | 51 | -- Return both geometries in one result set 52 | SELECT @Sq2 AS GeometryExample 53 | UNION ALL 54 | SELECT @Tri2 AS GeometryExample; -------------------------------------------------------------------------------- /helps/median_of_total_points.sql: -------------------------------------------------------------------------------- 1 | -- ================================================ 2 | -- Table to store median and quartiles 3 | -- ================================================ 4 | CREATE TABLE [dbo].[median] 5 | ( 6 | id INT IDENTITY(1,1) NOT NULL, 7 | [user_id] INT NULL, 8 | median FLOAT NULL, 9 | q1 FLOAT NULL, 10 | q3 FLOAT NULL 11 | ); 12 | GO 13 | 14 | -- ================================================ 15 | -- Inspect all SLA records for a specific user 16 | -- ================================================ 17 | SELECT * 18 | FROM [dbo].[sla] 19 | WHERE [user_id] = 80000; 20 | GO 21 | 22 | -- ================================================ 23 | -- Inspect SLA records for a specific user ordered by points 24 | -- ================================================ 25 | SELECT * 26 | FROM [dbo].[sla] 27 | WHERE [user_id] = 80000 28 | ORDER BY [tot_points]; 29 | GO 30 | 31 | -- ================================================ 32 | -- Inspect stored median records for a specific user 33 | -- ================================================ 34 | SELECT * 35 | FROM [dbo].[median] 36 | WHERE [user_id] = 80000; 37 | GO 38 | 39 | -- ================================================ 40 | -- Calculate median, Q1 (25th percentile), Q3 (75th percentile) 41 | -- ================================================ 42 | SELECT TOP 1 43 | [user_id], 44 | PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tot_points) OVER () AS median, 45 | PERCENTILE_CONT(0.25) WITHIN GROUP (ORDER BY tot_points) OVER () AS q1, 46 | PERCENTILE_CONT(0.75) WITHIN GROUP (ORDER BY tot_points) OVER () AS q3 47 | FROM [dbo].[sla] 48 | WHERE [user_id] = 80000; 49 | GO 50 | 51 | -- ================================================ 52 | -- Calculate only the median for a specific user 53 | -- ================================================ 54 | SELECT TOP 1 55 | [user_id], 56 | PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tot_points) OVER () AS median 57 | FROM [dbo].[sla] 58 | WHERE [user_id] = 80000; 59 | GO 60 | 61 | -- ================================================ 62 | -- Optional: Insert calculated median and quartiles into [median] table 63 | -- ================================================ 64 | INSERT INTO [dbo].[median] ([user_id], median, q1, q3) 65 | SELECT TOP 1 66 | [user_id], 67 | PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tot_points) OVER () AS median, 68 | PERCENTILE_CONT(0.25) WITHIN GROUP (ORDER BY tot_points) OVER () AS q1, 69 | PERCENTILE_CONT(0.75) WITHIN GROUP (ORDER BY tot_points) OVER () AS q3 70 | FROM [dbo].[sla] 71 | WHERE [user_id] = 80000; 72 | GO -------------------------------------------------------------------------------- /functions/dbo.google_data.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- FUNCTION: dbo.google_data 3 | -- PURPOSE: Call Google Maps API to retrieve geolocation (latitude, longitude) 4 | -- and address details for a given input address. 5 | -- PARAMETERS: 6 | -- @address VARCHAR(1000) - The address to search 7 | -- @limit INT = 1 - Number of results to return (default 1) 8 | -- RETURNS: Table with columns 9 | -- latitude FLOAT 10 | -- longitude FLOAT 11 | -- address NVARCHAR(1000) 12 | -- NOTES: 13 | -- - Uses sp_OACreate / sp_OAMethod for HTTP requests (requires Ole Automation Procedures enabled) 14 | -- - Parses JSON response using OPENJSON 15 | -- - Collation applied to handle special characters in addresses 16 | -- ============================================= 17 | CREATE FUNCTION [dbo].[google_data]( 18 | @address VARCHAR(1000), 19 | @limit INT = 1 20 | ) 21 | RETURNS @table TABLE( 22 | latitude FLOAT, 23 | longitude FLOAT, 24 | address NVARCHAR(1000) 25 | ) 26 | WITH EXECUTE AS OWNER 27 | AS 28 | BEGIN 29 | -- Declare variables 30 | DECLARE @url NVARCHAR(MAX); 31 | DECLARE @Object INT; 32 | DECLARE @ResponseText NVARCHAR(MAX); 33 | 34 | -- Build Google Maps API URL 35 | SET @url = CONCAT( 36 | 'http://google.com.br/?format=json&addressdetails=0&q=', 37 | @address COLLATE SQL_Latin1_General_CP1253_CI_AI, 38 | '&format=json&limit=', 39 | CAST(@limit AS VARCHAR(2)) 40 | ); 41 | 42 | -- Create XMLHTTP object for HTTP request 43 | EXEC sp_OACreate 'MSXML2.XMLHTTP', @Object OUT; 44 | 45 | -- Open HTTP GET request 46 | EXEC sp_OAMethod @Object, 'open', NULL, 'GET', @url, 'false'; 47 | 48 | -- Send HTTP request 49 | EXEC sp_OAMethod @Object, 'send'; 50 | 51 | -- Get response text 52 | EXEC sp_OAMethod @Object, 'responseText', @ResponseText OUTPUT; 53 | 54 | -- Destroy the COM object 55 | EXEC sp_OADestroy @Object; 56 | 57 | -- Parse JSON response and insert into return table 58 | INSERT INTO @table (latitude, longitude, address) 59 | SELECT 60 | latitude, 61 | longitude, 62 | display_name 63 | FROM OPENJSON(@ResponseText) 64 | WITH ( 65 | latitude FLOAT '$.lat', 66 | longitude FLOAT '$.lon', 67 | display_name NVARCHAR(MAX) '$.display_name', 68 | road NVARCHAR(MAX) '$.address.road', 69 | neighbourhood NVARCHAR(MAX) '$.address.neighbourhood', 70 | suburb NVARCHAR(MAX) '$.address.suburb', 71 | city NVARCHAR(MAX) '$.address.city', 72 | state NVARCHAR(MAX) '$.address.state', 73 | postcode NVARCHAR(MAX) '$.address.postcode', 74 | country NVARCHAR(MAX) '$.address.country' 75 | ); 76 | 77 | RETURN; 78 | END; 79 | GO -------------------------------------------------------------------------------- /free_space/database_cleanup.sql: -------------------------------------------------------------------------------- 1 | /* ============================================================ 2 | Database Cleanup & Space Management 3 | Goal: diagnose and reduce excessive database size 4 | ============================================================ */ 5 | 6 | -- 1) Row count per table (ordered by number of records) 7 | -- Useful to identify which tables actually consume space. 8 | SELECT 9 | t.[schema_id], 10 | t.NAME AS table_name, 11 | i.rows AS row_count 12 | FROM sys.tables t 13 | INNER JOIN sys.sysindexes i 14 | ON t.object_id = i.id AND i.indid < 2 15 | ORDER BY i.rows DESC; 16 | 17 | --------------------------------------------------------------- 18 | 19 | -- 2) Overall database space usage 20 | -- Shows total database size, reserved space, and unallocated space. 21 | EXEC sp_spaceused; 22 | 23 | --------------------------------------------------------------- 24 | 25 | -- 3) Data and log file details 26 | -- Displays total size (SizeMB), used space (SpaceUsedMB), and free space (FreeMB). 27 | SELECT 28 | file_id, 29 | name, -- logical file name 30 | physical_name, -- physical path (.mdf / .ldf) 31 | type_desc, -- ROWS = data, LOG = log 32 | size * 8 / 1024 AS SizeMB, -- total size in MB 33 | FILEPROPERTY(name, 'SpaceUsed') * 8 / 1024 AS SpaceUsedMB, -- used 34 | (size - FILEPROPERTY(name, 'SpaceUsed')) * 8 / 1024 AS FreeMB -- free 35 | FROM sys.database_files; 36 | 37 | --------------------------------------------------------------- 38 | 39 | -- 4) List database files 40 | -- Confirms logical file names (needed for SHRINKFILE). 41 | EXEC sp_helpfile; 42 | 43 | --------------------------------------------------------------- 44 | 45 | -- 5) Shrink the main data file (MDF) 46 | -- Replace 'myfileMDF' with the actual logical name (from sp_helpfile). 47 | -- The number "100" is the target size in MB. 48 | DBCC SHRINKFILE ('myfileMDF', 100); 49 | 50 | --------------------------------------------------------------- 51 | 52 | -- 6) Shrink the log file (LDF) 53 | -- If the database is in FULL recovery mode, run a log backup before shrinking: 54 | -- BACKUP LOG geotracker TO DISK = 'NUL'; 55 | -- The number "50" is the target size in MB. 56 | DBCC SHRINKFILE ('myfileMDF_log', 50); 57 | 58 | --------------------------------------------------------------- 59 | 60 | -- 7) (Optional) Change recovery mode to SIMPLE 61 | -- Only if point-in-time recovery is not required. 62 | ALTER DATABASE geotracker SET RECOVERY SIMPLE; 63 | 64 | --------------------------------------------------------------- 65 | 66 | -- 8) (Optional) Adjust auto-growth settings 67 | -- Sets initial size to 100MB and growth increment to 50MB. 68 | ALTER DATABASE geotracker 69 | MODIFY FILE ( NAME = 'myfileMDF', SIZE = 100MB, FILEGROWTH = 50MB ); 70 | 71 | ALTER DATABASE geotracker 72 | MODIFY FILE ( NAME = 'myfileMDF_log', SIZE = 50MB, FILEGROWTH = 25MB ); -------------------------------------------------------------------------------- /helps/spatial_data/route.sql: -------------------------------------------------------------------------------- 1 | -- ================================================ 2 | -- Drop the points table if it exists 3 | -- ================================================ 4 | DROP TABLE IF EXISTS [points]; 5 | GO 6 | 7 | -- ================================================ 8 | -- Create table to store GPS points 9 | -- geog: geography type 10 | -- Geom: computed geometry type from geography 11 | -- ================================================ 12 | CREATE TABLE [points] 13 | ( 14 | [Id] INT IDENTITY, 15 | [geog] GEOGRAPHY, 16 | [Geom] AS GEOMETRY::STGeomFromWKB(geog.STAsBinary(), geog.STSrid) 17 | ); 18 | GO 19 | 20 | -- ================================================ 21 | -- Insert GPS points for a specific asset and track 22 | -- ================================================ 23 | DECLARE 24 | @id INT = 269559161, -- Track ID 25 | @asset_id INT = 40379; -- Asset ID 26 | 27 | INSERT INTO points (geog) 28 | SELECT 29 | [position] 30 | FROM [datas] 31 | WHERE [asset_id] = @asset_id 32 | AND [utc_timestamp] BETWEEN 33 | (SELECT CAST(DATEADD(hour, 3, [start_timestamp]) AS DATETIME) 34 | FROM [tracking] 35 | WHERE [id] = @id) 36 | AND (SELECT CAST(DATEADD(hour, 3, [end_timestamp]) AS DATETIME) 37 | FROM [tracking] 38 | WHERE [id] = @id) 39 | ORDER BY [utc_timestamp] ASC; 40 | GO 41 | 42 | -- ================================================ 43 | -- Check the inserted points 44 | -- ================================================ 45 | SELECT * FROM [points]; 46 | GO 47 | 48 | -- ================================================ 49 | -- Build a LINESTRING geometry from the points 50 | -- ================================================ 51 | DECLARE 52 | @iterRow INT, 53 | @rowCount INT, 54 | @string VARCHAR(MAX), 55 | @x VARCHAR(20), 56 | @y VARCHAR(20), 57 | @route GEOMETRY; 58 | 59 | -- Initialize variables 60 | SELECT 61 | @iterRow = 1, 62 | @rowCount = COUNT(1) 63 | FROM points; 64 | 65 | -- Concatenate points into a LINESTRING 66 | WHILE (@iterRow <= @rowCount) 67 | BEGIN 68 | SELECT 69 | @x = CAST(Geom.STX AS VARCHAR(20)), 70 | @y = CAST(Geom.STY AS VARCHAR(20)) 71 | FROM points 72 | WHERE Id = @iterRow; 73 | 74 | SET @string = ISNULL(@string + ',', '') + @x + ' ' + @y; 75 | SET @iterRow += 1; 76 | END 77 | 78 | SET @string = 'LINESTRING(' + @string + ')'; 79 | SET @route = GEOMETRY::STLineFromText(@string, 0); 80 | 81 | -- ================================================ 82 | -- Save the route geometry to the [routes] table 83 | -- ================================================ 84 | UPDATE [routes] 85 | SET [route] = @route 86 | WHERE [id] = 264202407; 87 | GO 88 | 89 | -- ================================================ 90 | -- Calculate total distance of the route 91 | -- ================================================ 92 | SELECT [distance] = @route.STLength(); 93 | GO 94 | 95 | -- Optional: drop the temporary points table if no longer needed 96 | -- DROP TABLE [points]; -------------------------------------------------------------------------------- /sql_performance_tuning/queries/wait_statistics.sql: -------------------------------------------------------------------------------- 1 | -- SQL Wait Stats and Queries 2 | SELECT wait_type AS Wait_Type, 3 | wait_time_ms / 1000.0 AS Wait_Time_Seconds, 4 | waiting_tasks_count AS Waiting_Tasks_Count, 5 | -- CAST((wait_time_ms / 1000.0)/waiting_tasks_count AS decimal(10,4)) AS AVG_Waiting_Tasks_Count, 6 | wait_time_ms * 100.0 / SUM(wait_time_ms) OVER() AS Percentage_WaitTime 7 | --,waiting_tasks_count * 100.0 / SUM(waiting_tasks_count) OVER() AS Percentage_Count 8 | FROM sys.dm_os_wait_stats 9 | WHERE wait_type NOT IN 10 | (N'BROKER_EVENTHANDLER', 11 | N'BROKER_RECEIVE_WAITFOR', 12 | N'BROKER_TASK_STOP', 13 | N'BROKER_TO_FLUSH', 14 | N'BROKER_TRANSMITTER', 15 | N'CHECKPOINT_QUEUE', 16 | N'CHKPT', 17 | N'CLR_AUTO_EVENT', 18 | N'CLR_MANUAL_EVENT', 19 | N'CLR_SEMAPHORE', 20 | N'DBMIRROR_DBM_EVENT', 21 | N'DBMIRROR_DBM_MUTEX', 22 | N'DBMIRROR_EVENTS_QUEUE', 23 | N'DBMIRROR_WORKER_QUEUE', 24 | N'DBMIRRORING_CMD', 25 | N'DIRTY_PAGE_POLL', 26 | N'DISPATCHER_QUEUE_SEMAPHORE', 27 | N'EXECSYNC', 28 | N'FSAGENT', 29 | N'FT_IFTS_SCHEDULER_IDLE_WAIT', 30 | N'FT_IFTSHC_MUTEX', 31 | N'HADR_CLUSAPI_CALL', 32 | N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', 33 | N'HADR_LOGCAPTURE_WAIT', 34 | N'HADR_NOTIFICATION_DEQUEUE', 35 | N'HADR_TIMER_TASK', 36 | N'HADR_WORK_QUEUE', 37 | N'LAZYWRITER_SLEEP', 38 | N'LOGMGR_QUEUE', 39 | N'MEMORY_ALLOCATION_EXT', 40 | N'ONDEMAND_TASK_QUEUE', 41 | N'PREEMPTIVE_HADR_LEASE_MECHANISM', 42 | N'PREEMPTIVE_OS_AUTHENTICATIONOPS', 43 | N'PREEMPTIVE_OS_AUTHORIZATIONOPS', 44 | N'PREEMPTIVE_OS_COMOPS', 45 | N'PREEMPTIVE_OS_CREATEFILE', 46 | N'PREEMPTIVE_OS_CRYPTOPS', 47 | N'PREEMPTIVE_OS_DEVICEOPS', 48 | N'PREEMPTIVE_OS_FILEOPS', 49 | N'PREEMPTIVE_OS_GENERICOPS', 50 | N'PREEMPTIVE_OS_LIBRARYOPS', 51 | N'PREEMPTIVE_OS_PIPEOPS', 52 | N'PREEMPTIVE_OS_QUERYREGISTRY', 53 | N'PREEMPTIVE_OS_VERIFYTRUST', 54 | N'PREEMPTIVE_OS_WAITFORSINGLEOBJECT', 55 | N'PREEMPTIVE_OS_WRITEFILEGATHER', 56 | N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', 57 | N'PREEMPTIVE_XE_GETTARGETSTATE', 58 | N'PWAIT_ALL_COMPONENTS_INITIALIZED', 59 | N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', 60 | N'QDS_ASYNC_QUEUE', 61 | N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', 62 | N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', 63 | N'QDS_SHUTDOWN_QUEUE', 64 | N'REDO_THREAD_PENDING_WORK', 65 | N'REQUEST_FOR_DEADLOCK_SEARCH', 66 | N'RESOURCE_QUEUE', 67 | N'SERVER_IDLE_CHECK', 68 | N'SLEEP_BPOOL_FLUSH', 69 | N'SLEEP_DBSTARTUP', 70 | N'SLEEP_DCOMSTARTUP', 71 | N'SLEEP_MASTERDBREADY', 72 | N'SLEEP_MASTERMDREADY', 73 | N'SLEEP_MASTERUPGRADED', 74 | N'SLEEP_MSDBSTARTUP', 75 | N'SLEEP_SYSTEMTASK', 76 | N'SLEEP_TASK', 77 | N'SP_SERVER_DIAGNOSTICS_SLEEP', 78 | N'SQLTRACE_BUFFER_FLUSH', 79 | N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', 80 | N'SQLTRACE_WAIT_ENTRIES', 81 | N'UCS_SESSION_REGISTRATION', 82 | N'WAIT_FOR_RESULTS', 83 | N'WAIT_XTP_CKPT_CLOSE', 84 | N'WAIT_XTP_HOST_WAIT', 85 | N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', 86 | N'WAIT_XTP_RECOVERY', 87 | N'WAITFOR', 88 | N'WAITFOR_TASKSHUTDOWN', 89 | N'XE_TIMER_EVENT', 90 | N'XE_DISPATCHER_WAIT' 91 | ) AND wait_time_ms >= 1 92 | ORDER BY Wait_Time_Seconds DESC 93 | -- ORDER BY Waiting_Tasks_Count DESC -------------------------------------------------------------------------------- /audit/audit_toolkit.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Configure SQL Server Auditing for compliance and security monitoring 5 | -- Description: Sets up a server audit, database audit specification, and server audit specification 6 | -- Notes: Ensure the FILEPATH exists and SQL Server service account has write permission. 7 | -- Auditing sensitive tables and login failures helps meet compliance requirements. 8 | ************************************************************************************************/ 9 | 10 | -- ============================================================ 11 | -- 1. CREATE SERVER AUDIT OBJECT 12 | -- ============================================================ 13 | /* 14 | Purpose: 15 | - Central audit object to collect audit events. 16 | - Logs are stored in a secure folder with access restrictions. 17 | Parameters: 18 | - FILEPATH: Folder where audit logs are saved. 19 | - MAXSIZE: Maximum file size before rollover. 20 | - MAX_ROLLOVER_FILES: Maximum number of rolled-over files to keep (~90 days retention). 21 | - ON_FAILURE: Action if audit cannot write (CONTINUE avoids blocking SQL operations). 22 | */ 23 | CREATE SERVER AUDIT FinPulseComplianceAudit 24 | TO FILE ( 25 | FILEPATH = 'E:\FinPulse_AuditLogs\', -- Must exist & have restricted access 26 | MAXSIZE = 500 MB, 27 | MAX_ROLLOVER_FILES = 50 -- ~90 days retention before rotation 28 | ) 29 | WITH (ON_FAILURE = CONTINUE); 30 | 31 | -- Enable the audit 32 | ALTER SERVER AUDIT FinPulseComplianceAudit 33 | WITH (STATE = ON); 34 | 35 | -- ============================================================ 36 | -- 2. CREATE DATABASE AUDIT SPECIFICATION 37 | -- ============================================================ 38 | /* 39 | Purpose: 40 | - Track SELECT, UPDATE, DELETE operations on sensitive tables. 41 | Database Context: 42 | - Must switch to the target database (Transactions in this case). 43 | Parameters: 44 | - ADD (...): Specifies which actions and objects to audit. 45 | - BY PUBLIC: Applies to all users. 46 | - STATE = ON: Activates the specification immediately. 47 | */ 48 | USE Transactions; 49 | GO 50 | 51 | CREATE DATABASE AUDIT SPECIFICATION AuditCustomerAccountAccess 52 | FOR SERVER AUDIT FinPulseComplianceAudit 53 | ADD (SELECT, UPDATE, DELETE ON dbo.CustomerAccounts BY PUBLIC) 54 | WITH (STATE = ON); 55 | 56 | -- ============================================================ 57 | -- 3. CREATE SERVER AUDIT SPECIFICATION 58 | -- ============================================================ 59 | /* 60 | Purpose: 61 | - Track server-level events such as failed login attempts. 62 | Parameters: 63 | - FAILED_LOGIN_GROUP: Captures all failed login attempts on the SQL Server instance. 64 | - STATE = ON: Activates the specification immediately. 65 | */ 66 | CREATE SERVER AUDIT SPECIFICATION AuditFailedLogins 67 | FOR SERVER AUDIT FinPulseComplianceAudit 68 | ADD (FAILED_LOGIN_GROUP) 69 | WITH (STATE = ON); 70 | GO -------------------------------------------------------------------------------- /sql_performance_tuning/README.md: -------------------------------------------------------------------------------- 1 | ## **Step 0 – Prep** 2 | * **Inventory**: Run the script `01_Server_Inventory` → this documents your SQL Server version, edition, CPU, RAM. 3 | * **Enable Query Store**: If you’re on SQL Server 2016+ (or Azure Managed Instance), enable it using script `13_Enable_Query_Store`. This gives you query history for tuning. 4 | 5 | *Why?* You need to know what environment you’re working with and have Query Store capturing queries. 6 | 7 | ## **Step 1 – Baseline** 8 | The idea here is: *before changing anything, measure everything*. 9 | 1. Run: 10 | * `02_Instance_Config` → snapshot server configuration. 11 | * `03_DB_Files_And_Autogrowth` → check database file sizes and autogrowth (should be fixed MB, not %). 12 | * `04_Top_Waits` → capture waits (what SQL Server is waiting on). 13 | * `05_IO_Latency_by_File` → check disk performance by file. 14 | 2. Collect PerfMon counters for at least 24h (see **PerfMon_Counters** tab). 15 | 16 | *Why?* This becomes your “before” picture. Without it, you can’t prove improvements. 17 | 18 | ## **Step 2 – Workload Analysis** 19 | This is where you find **the worst queries**: 20 | * Run `06_Top_Queries_By_CPU` and `07_Top_Queries_By_Reads`. 21 | * Look at missing indexes (`08_Missing_Indexes`) but don’t just apply blindly—validate they’re not duplicates. 22 | * Check index usage (`09_Index_Usage`) to find unused but costly indexes. 23 | * Measure fragmentation (`10_Index_Fragmentation`) and apply the policy in **Index_Maintenance** tab. 24 | 25 | *Goal:* Reduce query cost with indexing, stats, and query tuning. 26 | 27 | ## **Step 3 – Contention** 28 | When workloads block each other: 29 | * `11_Active_Requests_Blocking` → see who’s blocking who. 30 | * `12_Deadlocks_XE` → set up Extended Events to capture deadlocks. 31 | 32 | *Goal:* Keep concurrency smooth by removing hotspots. 33 | 34 | ## **Step 4 – TempDB** 35 | Run `16_Tempdb_Contention_Check`. 36 | * If you see **PAGELATCH waits**, add more TempDB data files (1 file per logical CPU up to 8 is common). 37 | 38 | ## **Step 5 – Memory** 39 | * Watch PerfMon: `Page life expectancy` and `Memory Grants Pending`. 40 | * If grants > 0 for sustained time → queries are asking for too much memory, or you don’t have enough. 41 | 42 | ## **Step 6 – CPU** 43 | * High `SOS_SCHEDULER_YIELD` waits or sustained CPU > 80%? 44 | * Tune the top CPU queries. 45 | * Adjust **MAXDOP** and **Cost Threshold for Parallelism** (see **Config_Review** tab). 46 | 47 | ## **Step 7 – IO / Log** 48 | * High `PAGEIOLATCH` → slow reads (need indexes or faster storage). 49 | * High `WRITELOG` → transaction log bottleneck. Fix with pre-sizing log files and faster log disks. 50 | 51 | ## **Step 8 – Config Review** 52 | Quarterly, review settings against best practices: 53 | * MAXDOP, cost threshold, memory, ad-hoc optimization, tempdb files, etc. 54 | 55 | ## **Step 9 – Verify** 56 | * Rerun all baseline steps after changes. 57 | * Compare before vs after in **Baseline_Log**. 58 | *Rule*: **One change at a time, measure before & after.** 59 | 60 | So, that’s the structured journey: 61 | **Prep → Baseline → Workload → Contention → TempDB → Memory → CPU → IO → Config → Verify.** -------------------------------------------------------------------------------- /backup_recovery/backup/olla_hallengreen_backup.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: Database Backup and Azure Blob Upload 3 | Purpose: Perform FULL, Differential, and Transaction Log backups with compression, verification, 4 | automatic cleanup, and upload to Azure Blob Storage. 5 | Author: Lorenzo Uriel 6 | Date: 2025-08-20 7 | ********************************************************************************************/ 8 | 9 | ------------------------- 10 | -- STEP 1: FULL Backup 11 | ------------------------- 12 | EXECUTE dbo.DatabaseBackup 13 | @Databases = 'database_name', -- Database to backup 14 | @Directory = 'G:\Backup', -- Local backup directory 15 | @BackupType = 'FULL', -- Full backup 16 | @Compress = 'Y', -- Enable compression 17 | @Verify = 'Y', -- Verify backup integrity 18 | @CleanupTime = 720, -- Cleanup backups older than 720 hours (30 days) 19 | @CleanupMode = 'AFTER_BACKUP'; -- Cleanup mode 20 | GO 21 | 22 | -- STEP 1b: Upload FULL backup to Azure Blob Storage 23 | -- Using AzCopy command-line tool 24 | -- Adjust the container URL and SAS token as needed 25 | -- Limit upload bandwidth to 100 Mbps 26 | !c:\tools\azcopy\azcopy.exe copy "G:\Backup\FULL" "https://.blob.core.windows.net/bonicio-database_name-backups?" ^ 27 | --overwrite=false --include-pattern=*.bak --include-path="database_name\FULL" --recursive --cap-mbps 100 28 | 29 | ------------------------- 30 | -- STEP 2: Differential Backup 31 | ------------------------- 32 | EXECUTE dbo.DatabaseBackup 33 | @Databases = 'database_name', 34 | @Directory = 'G:\Backup', 35 | @BackupType = 'DIFF', -- Differential backup 36 | @Compress = 'Y', 37 | @Verify = 'Y', 38 | @CleanupTime = 720, 39 | @CleanupMode = 'AFTER_BACKUP'; 40 | GO 41 | 42 | -- STEP 2b: Upload DIFF backup to Azure Blob Storage 43 | !c:\tools\azcopy\azcopy.exe copy "G:\Backup\DIFF" "https://.blob.core.windows.net/bonicio-database_name-backups?" ^ 44 | --overwrite=false --include-pattern=*.bak --include-path="database_name\DIFF" --recursive --cap-mbps 100 45 | 46 | ------------------------- 47 | -- STEP 3: Transaction Log Backup 48 | ------------------------- 49 | EXECUTE dbo.DatabaseBackup 50 | @Databases = 'database_name', 51 | @Directory = 'G:\Backup', 52 | @BackupType = 'LOG', -- Transaction log backup 53 | @Compress = 'Y', 54 | @Verify = 'Y', 55 | @CleanupTime = 720, 56 | @CleanupMode = 'AFTER_BACKUP'; 57 | GO 58 | 59 | -- STEP 3b: Upload LOG backup to Azure Blob Storage 60 | !c:\tools\azcopy\azcopy.exe copy "G:\Backup\LOG" "https://.blob.core.windows.net/bonicio-database_name-backups?" ^ 61 | --overwrite=false --include-pattern=*.bak --include-path="database_name\LOG" --recursive --cap-mbps 100 62 | 63 | /******************************************************************************************** 64 | NOTES: 65 | - dbo.DatabaseBackup is assumed to be Ola Hallengren’s maintenance solution stored procedure. 66 | - Compression reduces backup size; verify ensures backup integrity. 67 | - CleanupTime = 720 hours = 30 days; older backups will be removed automatically. 68 | - AzCopy is used to copy backups to Azure Blob Storage; adjust SAS token, container URL, and bandwidth cap. 69 | - The --overwrite=false option prevents overwriting existing files on Azure. 70 | ********************************************************************************************/ -------------------------------------------------------------------------------- /ssis/create_jobs_schedule.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Create and schedule a SQL Server Agent job named 'daily_jobs' that runs multiple 5 | -- SSIS packages for daily ETL processing. 6 | -- Description: This script: 7 | -- 1. Creates the job. 8 | -- 2. Adds multiple job steps for each SSIS package. 9 | -- 3. Sets a schedule to run every minute. 10 | -- 4. Attaches the job to the schedule. 11 | -- 5. Associates the job with the server. 12 | -- 6. Optionally starts the job. 13 | ************************************************************************************************/ 14 | 15 | -- =============================================== 16 | -- Step 1: Create a new SQL Server Agent job 17 | -- =============================================== 18 | DECLARE @jobId UNIQUEIDENTIFIER 19 | 20 | EXEC msdb.dbo.sp_add_job 21 | @job_name = 'daily_jobs', -- Name of the job 22 | @enabled = 1, -- Job enabled by default 23 | @description = 'SSIS Job to process daily jobs', -- Job description 24 | @job_id = @jobId OUTPUT -- Output Job ID for later use 25 | 26 | -- =============================================== 27 | -- Step 2: Add job steps for each SSIS package 28 | -- =============================================== 29 | -- Note: /ISSERVER command specifies the SSIS package path and execution options 30 | 31 | EXEC msdb.dbo.sp_add_jobstep 32 | @job_id = @jobId, 33 | @step_name = 'Daily Details', 34 | @subsystem = 'SSIS', 35 | @command = '/ISSERVER "\"\SSISDB\Database\ETL\.dtsx\"" /SERVER . /CHECKPOINTING OFF /REPORTING E', 36 | @on_success_action = 3 -- Go to next step 37 | 38 | EXEC msdb.dbo.sp_add_jobstep 39 | @job_id = @jobId, 40 | @step_name = 'Daily Data', 41 | @subsystem = 'SSIS', 42 | @command = '/ISSERVER "\"\SSISDB\Database\ETL\.dtsx\"" /SERVER . /CHECKPOINTING OFF /REPORTING E', 43 | @on_success_action = 3 44 | 45 | -- =============================================== 46 | -- Step 3: Create a schedule for the job 47 | -- =============================================== 48 | DECLARE @scheduleId INT 49 | 50 | EXEC msdb.dbo.sp_add_schedule 51 | @schedule_name = 'MinutelySchedule', 52 | @enabled = 1, 53 | @freq_type = 4, -- Daily schedule 54 | @freq_interval = 1, -- Every day 55 | @freq_subday_type = 4, -- Minutes 56 | @freq_subday_interval = 1, -- Every 1 minute 57 | @active_start_time = 000000, -- Start at 00:00:00 AM 58 | @active_end_time = 235959, -- End at 11:59:59 PM 59 | @schedule_id = @scheduleId OUTPUT 60 | 61 | -- =============================================== 62 | -- Step 4: Attach the job to the schedule 63 | -- =============================================== 64 | EXEC msdb.dbo.sp_attach_schedule 65 | @job_id = @jobId, 66 | @schedule_id = @scheduleId 67 | 68 | -- =============================================== 69 | -- Step 5: Associate the job with the server 70 | -- =============================================== 71 | EXEC msdb.dbo.sp_add_jobserver 72 | @job_name = 'daily_jobs', 73 | @server_name = @@SERVERNAME -- Uses current SQL Server instance 74 | 75 | -- =============================================== 76 | -- Step 6: Start the job (optional) 77 | -- Uncomment if you want the job to start immediately 78 | -- =============================================== 79 | -- EXEC msdb.dbo.sp_start_job @job_id = @jobId -------------------------------------------------------------------------------- /sql_monitor/stack/docs/jobs_monitoring.md: -------------------------------------------------------------------------------- 1 | ### Job Execution Count (Last 7 Days) 2 | ```sql 3 | SELECT 4 | job.name AS JobName, 5 | COUNT(run.run_date) AS ExecutionCount 6 | FROM msdb.dbo.sysjobs AS job 7 | INNER JOIN msdb.dbo.sysjobhistory AS run 8 | ON job.job_id = run.job_id 9 | WHERE run.run_date >= CONVERT(VARCHAR, GETDATE() - 7, 112) -- Last 7 days 10 | GROUP BY job.name 11 | ORDER BY ExecutionCount DESC; 12 | ``` 13 | * **Purpose:** To count how many times each SQL Server Agent job executed in the last 7 days. 14 | * **Metric:** Number of job executions per job. 15 | * **Use case:** Monitoring job activity frequency and detecting anomalies in job execution patterns. 16 | * **Reference:** `msdb.dbo.sysjobs`, `msdb.dbo.sysjobhistory`. 17 | 18 | ### Currently Running Jobs 19 | ```sql 20 | SELECT DISTINCT 21 | job.name AS JobName, 22 | activity.run_requested_date AS StartTime, 23 | DATEDIFF(SECOND, activity.run_requested_date, GETDATE()) AS RunDurationSeconds 24 | FROM msdb.dbo.sysjobs AS job 25 | INNER JOIN msdb.dbo.sysjobactivity AS activity 26 | ON job.job_id = activity.job_id 27 | WHERE activity.run_requested_date IS NOT NULL 28 | AND activity.stop_execution_date IS NULL 29 | ORDER BY activity.run_requested_date DESC; 30 | ``` 31 | * **Purpose:** To identify jobs that are currently running. 32 | * **Metric:** Start time and duration of running jobs in seconds. 33 | * **Use case:** Tracking live job execution for monitoring or troubleshooting long-running processes. 34 | * **Reference:** `msdb.dbo.sysjobs`, `msdb.dbo.sysjobactivity`. 35 | 36 | ### Scheduled and Running Jobs Overview 37 | ```sql 38 | SELECT 39 | job.name AS JobName, 40 | CASE 41 | WHEN activity.run_requested_date IS NULL THEN 'Scheduled' 42 | ELSE 'Running' 43 | END AS JobStatus, 44 | schedule.next_run_date AS NextRunDate, 45 | schedule.next_run_time AS NextRunTime 46 | FROM msdb.dbo.sysjobs AS job 47 | LEFT JOIN msdb.dbo.sysjobactivity AS activity ON job.job_id = activity.job_id 48 | LEFT JOIN msdb.dbo.sysjobschedules AS schedule ON job.job_id = schedule.job_id 49 | WHERE schedule.next_run_date IS NOT NULL -- Scheduled jobs 50 | ORDER BY schedule.next_run_date, schedule.next_run_time 51 | ``` 52 | * **Purpose:** To provide a snapshot of both running and scheduled jobs. 53 | * **Metric:** Job status (Running or Scheduled) and next scheduled run. 54 | * **Use case:** Monitoring upcoming and in-progress jobs to manage workloads efficiently. 55 | * **Reference:** `msdb.dbo.sysjobs`, `msdb.dbo.sysjobactivity`, `msdb.dbo.sysjobschedules`. 56 | 57 | ### Job Run History 58 | ```sql 59 | SELECT 60 | job.name AS JobName, 61 | run.run_date AS RunDate, 62 | run.run_duration AS RunDuration 63 | FROM msdb.dbo.sysjobs AS job 64 | INNER JOIN msdb.dbo.sysjobhistory AS run 65 | ON job.job_id = run.job_id 66 | WHERE run.step_id = 0 -- 0 indicates job level information 67 | ORDER BY run.run_date DESC; 68 | ``` 69 | * **Purpose:** To view the historical execution of jobs. 70 | * **Metric:** Job run date and duration. 71 | * **Use case:** Reviewing past job executions for auditing, performance tracking, or debugging. 72 | * **Reference:** `msdb.dbo.sysjobs`, `msdb.dbo.sysjobhistory`. 73 | 74 | ### Failed Job Runs 75 | ```sql 76 | SELECT 77 | job.name AS JobName, 78 | run.run_date AS RunDate, 79 | run.run_time AS RunTime, 80 | run.run_duration AS RunDuration, 81 | run.message AS ErrorMessage 82 | FROM msdb.dbo.sysjobs AS job 83 | INNER JOIN msdb.dbo.sysjobhistory AS run 84 | ON job.job_id = run.job_id 85 | WHERE run.run_status = 0 -- 0 indicates failure 86 | ORDER BY run.run_date DESC; 87 | ``` 88 | * **Purpose:** To identify jobs that failed during execution. 89 | * **Metric:** Run date, time, duration, and error message for failed jobs. 90 | * **Use case:** Detecting and troubleshooting job failures quickly. 91 | * **Reference:** `msdb.dbo.sysjobs`, `msdb.dbo.sysjobhistory`. -------------------------------------------------------------------------------- /free_space/partition_filegroup_allocation.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Examine partition and filegroup allocation, check file free space, 5 | -- rebuild partitions with compression, and shrink files if needed. 6 | -- Description: Multiple scripts for partition and file management: 7 | -- 1. View partition info and filegroup 8 | -- 2. Check free space per data file 9 | -- 3. Rebuild a partition with PAGE compression 10 | -- 4. Shrink a database file 11 | -- 5. Verify compression settings per partition 12 | ************************************************************************************************/ 13 | 14 | -- ============================================================ 15 | -- 1. View partitions and associated filegroups for a table 16 | -- ============================================================ 17 | SELECT DISTINCT 18 | OBJECT_NAME(p.object_id) AS Table_Name, -- Table name 19 | f.name AS Filegroup_Name, -- Filegroup storing the partition 20 | p.partition_number, -- Partition number 21 | p.rows -- Number of rows in the partition 22 | FROM sys.system_internals_allocation_units a 23 | JOIN sys.partitions p 24 | ON p.partition_id = a.container_id 25 | JOIN sys.filegroups f 26 | ON a.data_space_id = f.data_space_id -- Correct join for filegroup ID 27 | WHERE p.object_id = OBJECT_ID(N'dim.table') -- Replace with your table name 28 | ORDER BY p.partition_number; 29 | GO 30 | 31 | -- ============================================================ 32 | -- 2. Check free space per data file 33 | -- ============================================================ 34 | SELECT 35 | DB_NAME() AS DbName, 36 | name AS FileName, 37 | size/128.0 AS CurrentSizeMB, -- Size of file in MB 38 | size/128.0 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS INT)/128.0 AS FreeSpaceMB -- Free space in MB 39 | FROM sys.database_files 40 | WHERE type_desc = 'ROWS' -- Only rowstore data files 41 | AND name LIKE 'FGY%'; -- Filter by file name pattern 42 | GO 43 | 44 | -- ============================================================ 45 | -- 3. Rebuild a single partition with PAGE compression 46 | -- ============================================================ 47 | DECLARE 48 | @AlterSQL NVARCHAR(MAX), 49 | @PartitionNumber INT = 7; -- Set the partition number to rebuild 50 | 51 | -- Replace [table] with your actual table name 52 | SET @AlterSQL = N'ALTER TABLE [table] REBUILD PARTITION = ' + CAST(@PartitionNumber AS NVARCHAR(10)) + ' 53 | WITH (DATA_COMPRESSION = PAGE);'; 54 | 55 | PRINT @AlterSQL; -- Optional: review generated SQL 56 | EXEC sp_executesql @AlterSQL; 57 | GO 58 | 59 | -- ============================================================ 60 | -- 4. Shrink a database file 61 | -- ============================================================ 62 | DECLARE 63 | @ShrinkSQL NVARCHAR(MAX), 64 | @FileName SYSNAME = N'FGM11_5099EF98'; -- Replace with your actual file name 65 | 66 | SET @ShrinkSQL = N'DBCC SHRINKFILE (' + QUOTENAME(@FileName) + ');'; 67 | 68 | PRINT @ShrinkSQL; -- Optional: review generated SQL 69 | EXEC sp_executesql @ShrinkSQL; 70 | GO 71 | 72 | -- ============================================================ 73 | -- 5. Check compression settings per partition 74 | -- ============================================================ 75 | SELECT 76 | partition_number, 77 | data_compression, 78 | data_compression_desc 79 | FROM sys.partitions 80 | WHERE object_id = OBJECT_ID('[table]'); -- Replace with your table name 81 | GO -------------------------------------------------------------------------------- /sql_maintenance/use_cases/dbcc_check_ola_hallengren.sql: -------------------------------------------------------------------------------- 1 | -- ####################### 2 | -- DBCC CHECKDB USING OLA HALLENGREN SOLUTION 3 | -- ####################### 4 | 5 | -- Complete Integrity Check 6 | EXECUTE dbo.DatabaseIntegrityCheck 7 | @Databases = 'USER_DATABASES', -- All user databases 8 | @CheckCommands = 'CHECKDB,CHECKALLOC,CHECKCATALOG', -- Full check of DB, allocation, catalog 9 | @PhysicalOnly = 'N', -- Include logical checks 10 | @DataPurity = 'Y', -- Check column values for validity 11 | @NoIndex = 'N', -- Include nonclustered indexes 12 | @ExtendedLogicalChecks = 'Y', -- Enable extended logical checks 13 | @NoInformationalMessages = 'Y', -- Suppress informational messages 14 | @TabLock = 'N', -- Use snapshot instead of table locks 15 | @MaxDOP = 0, -- Use default MaxDOP 16 | @AvailabilityGroups = 'ALL_AVAILABILITY_GROUPS', -- Check all AGs 17 | @AvailabilityGroupReplicas = 'ALL', -- Check all replicas 18 | @Updateability = 'ALL', -- Check read/write and read-only DBs 19 | @TimeLimit = 0, -- No time limit 20 | @LockTimeout = 0, -- No lock timeout 21 | @LogToTable = 'Y', -- Log results to dbo.CommandLog 22 | @DatabasesInParallel = 'Y', -- Run databases in parallel 23 | @Execute = 'Y'; -- Execute (not just print) 24 | 25 | 26 | -- A. Check the integrity of all user databases 27 | EXECUTE dbo.DatabaseIntegrityCheck 28 | @Databases = 'USER_DATABASES', 29 | @CheckCommands = 'CHECKDB' 30 | 31 | -- B. Check the physical integrity of all user databases 32 | EXECUTE dbo.DatabaseIntegrityCheck 33 | @Databases = 'USER_DATABASES', 34 | @CheckCommands = 'CHECKDB', 35 | @PhysicalOnly = 'Y' 36 | 37 | -- C. Check the integrity of all user databases, using the option not to check nonclustered indexes 38 | EXECUTE dbo.DatabaseIntegrityCheck 39 | @Databases = 'USER_DATABASES', 40 | @CheckCommands = 'CHECKDB', 41 | @NoIndex = 'Y' 42 | 43 | -- D. Check the integrity of all user databases, using the option to perform extended logical checks 44 | EXECUTE dbo.DatabaseIntegrityCheck 45 | @Databases = 'USER_DATABASES', 46 | @CheckCommands = 'CHECKDB', 47 | @ExtendedLogicalChecks = 'Y' 48 | 49 | -- E. Check the integrity of the filegroup PRIMARY in the database AdventureWorks 50 | EXECUTE dbo.DatabaseIntegrityCheck 51 | @Databases = 'AdventureWorks', 52 | @CheckCommands = 'CHECKFILEGROUP', 53 | @FileGroups = 'AdventureWorks.PRIMARY' 54 | 55 | -- F. Check the integrity of all filegroups except the filegroup PRIMARY in the database AdventureWorks 56 | EXECUTE dbo.DatabaseIntegrityCheck 57 | @Databases = 'USER_DATABASES', 58 | @CheckCommands = 'CHECKFILEGROUP', 59 | @FileGroups = 'ALL_FILEGROUPS, -AdventureWorks.PRIMARY' 60 | 61 | -- G. Check the integrity of the table Production.Product in the database AdventureWorks 62 | EXECUTE dbo.DatabaseIntegrityCheck 63 | @Databases = 'AdventureWorks', 64 | @CheckCommands = 'CHECKTABLE', 65 | @Objects = 'AdventureWorks.Production.Product' 66 | 67 | -- H. Check the integrity of all tables except the table Production.Product in the database AdventureWorks 68 | EXECUTE dbo.DatabaseIntegrityCheck 69 | @Databases = 'USER_DATABASES', 70 | @CheckCommands = 'CHECKTABLE', 71 | @Objects = 'ALL_OBJECTS, -AdventureWorks.Production.Product' 72 | 73 | -- I. Check the disk-space allocation structures of all user databases 74 | EXECUTE dbo.DatabaseIntegrityCheck 75 | @Databases = 'USER_DATABASES', 76 | @CheckCommands = 'CHECKALLOC' 77 | 78 | -- J. Check the catalog consistency of all user databases 79 | EXECUTE dbo.DatabaseIntegrityCheck 80 | @Databases = 'USER_DATABASES', 81 | @CheckCommands = 'CHECKCATALOG' -------------------------------------------------------------------------------- /sql_monitor/sql-server-metrics/main.py: -------------------------------------------------------------------------------- 1 | import pymssql 2 | import time 3 | from opentelemetry.metrics import Observation 4 | from opentelemetry.sdk.metrics import MeterProvider 5 | from opentelemetry.metrics import get_meter_provider, set_meter_provider 6 | from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader 7 | from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter 8 | 9 | # Configuração do Exportador OTLP 10 | otlp_exporter = OTLPMetricExporter(endpoint="http://localhost:4317", insecure=True) 11 | reader = PeriodicExportingMetricReader(otlp_exporter) 12 | provider = MeterProvider(metric_readers=[reader]) 13 | set_meter_provider(provider) 14 | meter = get_meter_provider().get_meter("sqlserver_metrics") 15 | 16 | # Conexão com SQL Server 17 | conn = pymssql.connect( 18 | server=".", 19 | user="-", 20 | password="-", 21 | database="master" 22 | ) 23 | cursor = conn.cursor() 24 | print('Conexão com o SQL Server estabelecida') 25 | 26 | # Funções para Coletar Métricas 27 | def get_availability(callback_options): 28 | try: 29 | cursor.execute("SELECT 1") 30 | return [Observation(100)] # Disponível 31 | except: 32 | return [Observation(0)] # Indisponível 33 | 34 | def get_active_connections(callback_options): 35 | cursor.execute("SELECT COUNT(*) FROM sys.dm_exec_sessions WHERE status = 'running'") 36 | return [Observation(cursor.fetchone()[0])] 37 | 38 | def get_idle_connections(callback_options): 39 | cursor.execute("SELECT COUNT(*) FROM sys.dm_exec_sessions WHERE status = 'sleeping'") 40 | return [Observation(cursor.fetchone()[0])] 41 | 42 | def get_last_backup_time(callback_options): 43 | cursor.execute("SELECT MAX(backup_finish_date) FROM msdb.dbo.backupset") 44 | last_backup = cursor.fetchone()[0] 45 | return [Observation(int(last_backup.timestamp()))] if last_backup else [Observation(0)] 46 | 47 | # Definição das métricas observáveis 48 | db_availability = meter.create_observable_gauge( 49 | name="sqlserver_availability", 50 | description="Percentual de tempo de disponibilidade do banco", 51 | unit="%", 52 | callbacks=[get_availability], 53 | ) 54 | 55 | active_connections = meter.create_observable_gauge( 56 | name="sqlserver_active_connections", 57 | description="Número de conexões ativas no banco", 58 | unit="connections", 59 | callbacks=[get_active_connections], 60 | ) 61 | 62 | idle_connections = meter.create_observable_gauge( 63 | name="sqlserver_idle_connections", 64 | description="Número de conexões ociosas", 65 | unit="connections", 66 | callbacks=[get_idle_connections], 67 | ) 68 | 69 | last_backup_time = meter.create_observable_gauge( 70 | name="sqlserver_last_backup_time", 71 | description="Último backup realizado (timestamp)", 72 | unit="seconds", 73 | callbacks=[get_last_backup_time], 74 | ) 75 | 76 | # Definição das métricas não observáveis 77 | query_exec_time = meter.create_histogram( 78 | name="sqlserver_query_execution_time", 79 | description="Tempo médio de execução das consultas", 80 | unit="ms", 81 | ) 82 | 83 | slow_queries = meter.create_counter( 84 | name="sqlserver_slow_queries", 85 | description="Número de consultas que excederam 30s", 86 | ) 87 | 88 | def get_query_execution_time(): 89 | cursor.execute(""" 90 | SELECT TOP 10 total_elapsed_time / execution_count AS avg_time 91 | FROM sys.dm_exec_query_stats 92 | ORDER BY avg_time DESC 93 | """) 94 | rows = cursor.fetchall() 95 | for row in rows: 96 | query_exec_time.record(row[0]) 97 | 98 | def get_slow_queries(): 99 | cursor.execute(""" 100 | SELECT COUNT(*) FROM sys.dm_exec_requests 101 | WHERE total_elapsed_time > 30000 -- 30 segundos 102 | """) 103 | slow_queries.add(cursor.fetchone()[0]) 104 | 105 | # Loop para atualização periódica das métricas 106 | while True: 107 | get_query_execution_time() 108 | get_slow_queries() 109 | time.sleep(10) 110 | -------------------------------------------------------------------------------- /query_store/historical_regression_performance.sql: -------------------------------------------------------------------------------- 1 | -- Queries with historical regression in performance 2 | -- When you want to compare recent execution to historical execution, the following query compares query execution based on period of execution. 3 | -- In this particular example, the query compares execution in recent period (1 hour) vs. history period (last day) and identifies those that introduced additional_duration_workload. 4 | -- This metric is calculated as a difference between recent average execution and history average execution multiplied by the number of recent executions. 5 | -- It represents how much extra duration these recent executions introduced, compared to the history: 6 | --- "Recent" workload - last 1 hour 7 | DECLARE @recent_start_time DATETIMEOFFSET; 8 | DECLARE @recent_end_time DATETIMEOFFSET; 9 | 10 | SET @recent_start_time = DATEADD(hour, - 1, SYSUTCDATETIME()); 11 | SET @recent_end_time = SYSUTCDATETIME(); 12 | 13 | --- "History" workload 14 | DECLARE @history_start_time DATETIMEOFFSET; 15 | DECLARE @history_end_time DATETIMEOFFSET; 16 | 17 | SET @history_start_time = DATEADD(hour, - 24, SYSUTCDATETIME()); 18 | SET @history_end_time = SYSUTCDATETIME(); 19 | 20 | WITH hist AS ( 21 | SELECT p.query_id query_id, 22 | ROUND(ROUND(CONVERT(FLOAT, SUM(rs.avg_duration * rs.count_executions)) * 0.001, 2), 2) AS total_duration, 23 | SUM(rs.count_executions) AS count_executions, 24 | COUNT(DISTINCT p.plan_id) AS num_plans 25 | FROM sys.query_store_runtime_stats AS rs 26 | INNER JOIN sys.query_store_plan AS p 27 | ON p.plan_id = rs.plan_id 28 | WHERE ( 29 | rs.first_execution_time >= @history_start_time 30 | AND rs.last_execution_time < @history_end_time 31 | ) 32 | OR ( 33 | rs.first_execution_time <= @history_start_time 34 | AND rs.last_execution_time > @history_start_time 35 | ) 36 | OR ( 37 | rs.first_execution_time <= @history_end_time 38 | AND rs.last_execution_time > @history_end_time 39 | ) 40 | GROUP BY p.query_id 41 | ), 42 | recent AS ( 43 | SELECT p.query_id query_id, 44 | ROUND(ROUND(CONVERT(FLOAT, SUM(rs.avg_duration * rs.count_executions)) * 0.001, 2), 2) AS total_duration, 45 | SUM(rs.count_executions) AS count_executions, 46 | COUNT(DISTINCT p.plan_id) AS num_plans 47 | FROM sys.query_store_runtime_stats AS rs 48 | INNER JOIN sys.query_store_plan AS p 49 | ON p.plan_id = rs.plan_id 50 | WHERE ( 51 | rs.first_execution_time >= @recent_start_time 52 | AND rs.last_execution_time < @recent_end_time 53 | ) 54 | OR ( 55 | rs.first_execution_time <= @recent_start_time 56 | AND rs.last_execution_time > @recent_start_time 57 | ) 58 | OR ( 59 | rs.first_execution_time <= @recent_end_time 60 | AND rs.last_execution_time > @recent_end_time 61 | ) 62 | GROUP BY p.query_id 63 | ) 64 | SELECT results.query_id AS query_id, 65 | results.query_text AS query_text, 66 | results.additional_duration_workload AS additional_duration_workload, 67 | results.total_duration_recent AS total_duration_recent, 68 | results.total_duration_hist AS total_duration_hist, 69 | ISNULL(results.count_executions_recent, 0) AS count_executions_recent, 70 | ISNULL(results.count_executions_hist, 0) AS count_executions_hist 71 | FROM ( 72 | SELECT hist.query_id AS query_id, 73 | qt.query_sql_text AS query_text, 74 | ROUND(CONVERT(FLOAT, recent.total_duration / recent.count_executions - hist.total_duration / hist.count_executions) * (recent.count_executions), 2) AS additional_duration_workload, 75 | ROUND(recent.total_duration, 2) AS total_duration_recent, 76 | ROUND(hist.total_duration, 2) AS total_duration_hist, 77 | recent.count_executions AS count_executions_recent, 78 | hist.count_executions AS count_executions_hist 79 | FROM hist 80 | INNER JOIN recent 81 | ON hist.query_id = recent.query_id 82 | INNER JOIN sys.query_store_query AS q 83 | ON q.query_id = hist.query_id 84 | INNER JOIN sys.query_store_query_text AS qt 85 | ON q.query_text_id = qt.query_text_id 86 | ) AS results 87 | WHERE additional_duration_workload > 0 88 | ORDER BY additional_duration_workload DESC 89 | OPTION (MERGE JOIN); -------------------------------------------------------------------------------- /sql_monitor/stack/docs/buffer_index_management.md: -------------------------------------------------------------------------------- 1 | ### Buffer Pool Usage & Dirty Pages 2 | ```sql 3 | SELECT 4 | CAST(COUNT(*) * 8.0 / 1024 AS DECIMAL(10, 2)) AS 'Buffer Pool Used MB', 5 | CAST(SUM(CASE WHEN is_modified = 1 THEN 1 ELSE 0 END) * 8.0 / 1024 AS DECIMAL(10, 2)) AS 'Dirty Buffer MB', 6 | CAST(COUNT(*) * 8.0 / 1024 AS DECIMAL(10, 2)) AS 'Buffer Pool Total MB', 7 | CAST(SUM(CASE WHEN is_modified = 1 THEN 1 ELSE 0 END) * 8.0 / 1024 AS DECIMAL(10, 2)) AS 'Dirty Buffer Used MB' 8 | FROM sys.dm_os_buffer_descriptors 9 | WHERE database_id > 4; 10 | ``` 11 | * **Purpose:** Monitor **buffer pool usage** and **dirty pages** (pages modified but not yet written to disk). 12 | * **Metrics:** 13 | * `Buffer Pool Used MB` → Total memory used by the buffer pool for user databases. 14 | * `Dirty Buffer MB` → Memory occupied by pages pending flush to disk. 15 | * `Buffer Pool Total MB` → Total buffer pool memory allocated. 16 | * `Dirty Buffer Used MB` → Redundant with dirty buffer, useful for alerting. 17 | * **Use case:** Identify **memory pressure** and potential I/O bottlenecks. 18 | * High dirty pages → heavy write activity. 19 | * Helps identify I/O flush bottlenecks. 20 | * See how much buffer pool memory is used by user databases. 21 | * Helps decide if more memory is needed for SQL Server. 22 | 23 | ### System Memory Status 24 | ```sql 25 | SELECT 26 | (total_physical_memory_kb / 1024) AS 'Total Physical Memory MB', 27 | (available_physical_memory_kb / 1024) AS 'Available Physical Memory MB', 28 | (total_page_file_kb / 1024) AS 'Total Page File MB', 29 | (available_page_file_kb / 1024) AS 'Available Page File MB', 30 | (system_memory_state_desc) AS 'System Memory State Description' 31 | FROM sys.dm_os_sys_memory; 32 | ``` 33 | * **Purpose:** Monitor **physical and virtual memory availability** for SQL Server and the OS. 34 | * **Metrics:** 35 | * `Total Physical Memory MB` → Installed RAM. 36 | * `Available Physical Memory MB` → Free RAM available. 37 | * `Total Page File MB` → Virtual memory size. 38 | * `Available Page File MB` → Free virtual memory. 39 | * `System Memory State Description` → Overall memory pressure indicator. 40 | * **Use case:** Detect **low memory conditions** and prevent **slowdowns or memory throttling**. 41 | * Ensure enough free RAM is available for SQL Server buffer pool, queries, and tempdb. 42 | * Low Available Physical Memory or Available Page File → high memory usage by SQL Server or other processes. 43 | * Compare Total Physical Memory vs. SQL Server memory allocation to check if the server has enough memory for expected workload. 44 | 45 | ### Index Usage Statistics 46 | ```sql 47 | SELECT 48 | OBJECT_NAME(s.object_id) AS TableName, 49 | i.name AS IndexName, 50 | s.user_seeks, 51 | s.user_scans, 52 | s.user_lookups, 53 | s.user_updates 54 | FROM sys.dm_db_index_usage_stats AS s 55 | INNER JOIN sys.indexes AS i ON i.object_id = s.object_id AND i.index_id = s.index_id 56 | WHERE s.database_id = DB_ID(DB_NAME()) 57 | ORDER BY s.user_seeks DESC; 58 | ``` 59 | * **Purpose:** Analyze **how indexes are being used** (reads and writes). 60 | * **Metrics:** 61 | * `user_seeks` → Number of seeks (efficient index usage). 62 | * `user_scans` → Number of table/index scans. 63 | * `user_lookups` → Lookups using included columns. 64 | * `user_updates` → Updates applied to index. 65 | * **Use case:** Detect **unused indexes** (low `user_seeks`) or **heavily updated indexes**, informing **index maintenance strategy**. 66 | * Indexes with user_seeks = 0, user_scans = 0, and low user_lookups may not be needed. 67 | * High user_seeks → frequently queried, critical for query performance. 68 | * High user_updates → index maintenance may affect insert/update/delete performance. 69 | * Decide which indexes to keep, remove, or consolidate. Helps optimize storage, reduce fragmentation, and improve query performance. 70 | 71 | ### Page Life Expectancy (PLE) 72 | ```sql 73 | SELECT [object_name], [counter_name], [cntr_value] AS PageLifeExpectancy 74 | FROM sys.dm_os_performance_counters 75 | WHERE [object_name] LIKE '%Buffer Manager%' 76 | AND [counter_name] = 'Page life expectancy'; 77 | ``` 78 | * **Purpose:** Measure **how long pages stay in memory** before being flushed to disk. 79 | * **Metric:** `PageLifeExpectancy` → Higher is better (longer caching of pages). 80 | * **Use case:** Low PLE indicates **memory pressure** or **high I/O**. 81 | * **Reference:** Recommended baseline ≥ 300 seconds; < 300 may indicate **memory bottlenecks**. -------------------------------------------------------------------------------- /sql_access/users_info.sql: -------------------------------------------------------------------------------- 1 | /******************************************************************************************** 2 | Script: SQL Server Login Information and Last Login Tracking 3 | Description: 4 | - Retrieves server login information 5 | - Tracks granted permissions 6 | - Builds a historical table of last login times for users 7 | Author: Lorenzo Uriel 8 | Date: 2025-08-20 9 | ********************************************************************************************/ 10 | 11 | ------------------------- 12 | -- STEP 1: Basic information about users/logins 13 | ------------------------- 14 | -- List all logins with basic info 15 | EXEC sp_helplogins; 16 | 17 | -- List SQL logins and creation dates 18 | SELECT name, accdate 19 | FROM sys.syslogins; 20 | 21 | -- SQL logins from sys.sql_logins view 22 | SELECT * 23 | FROM sys.sql_logins; 24 | 25 | ------------------------- 26 | -- STEP 2: Check granted permissions for users 27 | ------------------------- 28 | SELECT 29 | S.name, 30 | S.loginname, 31 | S.password, 32 | L.sid, 33 | L.is_disabled, 34 | S.createdate, 35 | S.denylogin, 36 | S.hasaccess, 37 | S.isntname, 38 | S.isntgroup, 39 | S.isntuser, 40 | S.sysadmin, 41 | S.securityadmin, 42 | S.serveradmin, 43 | S.processadmin, 44 | S.diskadmin, 45 | S.dbcreator, 46 | S.bulkadmin 47 | FROM sys.syslogins S 48 | LEFT JOIN sys.sql_logins L ON S.sid = L.sid; 49 | 50 | ------------------------- 51 | -- STEP 3: Last login for each user 52 | ------------------------- 53 | SELECT 54 | login_name AS [Login], 55 | MAX(login_time) AS [Last Login Time] 56 | FROM sys.dm_exec_sessions 57 | GROUP BY login_name; 58 | 59 | ------------------------- 60 | -- STEP 4: Extract logins from SQL Server error logs 61 | ------------------------- 62 | -- Temporary tables to store log file info and parsed login entries 63 | IF OBJECT_ID('tempdb..#Arquivos_Log') IS NOT NULL DROP TABLE #Arquivos_Log; 64 | CREATE TABLE #Arquivos_Log ( 65 | [idLog] INT, 66 | [dtLog] NVARCHAR(30) COLLATE SQL_Latin1_General_CP1_CI_AI, 67 | [tamanhoLog] INT 68 | ); 69 | 70 | IF OBJECT_ID('tempdb..#Dados') IS NOT NULL DROP TABLE #Dados; 71 | CREATE TABLE #Dados ( 72 | [LogDate] DATETIME, 73 | [ProcessInfo] NVARCHAR(50) COLLATE SQL_Latin1_General_CP1_CI_AI, 74 | [Text] NVARCHAR(MAX) COLLATE SQL_Latin1_General_CP1_CI_AI, 75 | [User] AS (SUBSTRING(REPLACE([Text], 'Login succeeded for user ''', ''), 1, CHARINDEX('''', REPLACE([Text], 'Login succeeded for user ''', '')) - 1)) 76 | ); 77 | 78 | -- Populate log files 79 | INSERT INTO #Arquivos_Log 80 | EXEC sys.sp_enumerrorlogs; 81 | 82 | -- Loop through all error logs and extract successful login entries 83 | DECLARE @Contador INT = 0, 84 | @Total INT = (SELECT COUNT(*) FROM #Arquivos_Log); 85 | 86 | WHILE (@Contador < @Total) 87 | BEGIN 88 | INSERT INTO #Dados (LogDate, ProcessInfo, [Text]) 89 | EXEC master.dbo.xp_readerrorlog @Contador, 1, N'Login succeeded for user', NULL, NULL, NULL; 90 | 91 | SET @Contador += 1; 92 | END; 93 | 94 | -- Store the last login date per user 95 | IF OBJECT_ID('tempdb..#UltimoLogin') IS NOT NULL DROP TABLE #UltimoLogin; 96 | CREATE TABLE #UltimoLogin ( 97 | [User] VARCHAR(128) COLLATE SQL_Latin1_General_CP1_CI_AI NOT NULL, 98 | LogDate DATETIME NOT NULL 99 | ); 100 | 101 | INSERT INTO #UltimoLogin 102 | SELECT 103 | [User], 104 | MAX(LogDate) AS LogDate 105 | FROM #Dados 106 | GROUP BY [User]; 107 | 108 | ------------------------- 109 | -- STEP 5: Maintain historical LastLogin table 110 | ------------------------- 111 | -- Create table if it doesn't exist 112 | IF OBJECT_ID('dbo.LastLogin') IS NULL 113 | BEGIN 114 | CREATE TABLE dbo.LastLogin ( 115 | Username VARCHAR(128) COLLATE SQL_Latin1_General_CP1_CI_AI NOT NULL, 116 | CreateDate DATETIME, 117 | LastLogin DATETIME NULL, 118 | DaysSinceLastLogin AS (DATEDIFF(DAY, ISNULL(LastLogin, CreateDate), CONVERT(DATE, GETDATE()))) 119 | ); 120 | END; 121 | 122 | -- Insert new logins into historical table 123 | INSERT INTO dbo.LastLogin (Username, CreateDate) 124 | SELECT 125 | [name], 126 | create_date 127 | FROM sys.server_principals A 128 | LEFT JOIN dbo.LastLogin B ON A.[name] COLLATE SQL_Latin1_General_CP1_CI_AI = B.Username 129 | WHERE 130 | is_fixed_role = 0 131 | AND [name] NOT LIKE 'NT %' 132 | AND [name] NOT LIKE '##%' 133 | AND B.Username IS NULL 134 | AND A.[type] IN ('S', 'U'); 135 | 136 | -- Update last login times in historical table 137 | UPDATE A 138 | SET A.LastLogin = B.LogDate 139 | FROM dbo.LastLogin A 140 | JOIN #UltimoLogin B ON A.Username = B.[User] 141 | WHERE I -------------------------------------------------------------------------------- /index/statistics.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Last Modified: 2025-08-19 5 | -- Purpose: Compute table/index usage statistics in SQL Server 6 | -- Description: This query reports operational statistics per table/index using the DMV 7 | -- sys.dm_db_index_operational_stats. The statistics help identify which 8 | -- indexes are heavily scanned, updated, inserted, deleted, or looked up. 9 | -- 10 | -- Fields include: 11 | -- Table_Name : Name of the user table 12 | -- Index_Name : Name of the index on the table 13 | -- Partition : Partition number of the index (for partitioned tables) 14 | -- Index_ID : ID of the index (1 = clustered, 0 = heap, >1 = nonclustered) 15 | -- Index_Type : Type of index (CLUSTERED, NONCLUSTERED, HEAP) 16 | -- Percent_Update : Ratio of updates to total operations (updates + inserts + deletes + scans + lookups + merges) 17 | -- Percent_Insert : Ratio of inserts to total operations 18 | -- Percent_Delete : Ratio of deletes to total operations 19 | -- Percent_Scan : Ratio of scans to total operations 20 | -- Percent_Lookup : Ratio of singleton lookups to total operations 21 | ************************************************************************************************/ 22 | 23 | -- ============================================================ 24 | -- Query: Compute Index Operational Percentages (U, S, Insert, Delete, Lookup) 25 | -- ============================================================ 26 | 27 | SELECT 28 | o.name AS [Table_Name], -- User table name 29 | x.name AS [Index_Name], -- Index name on the table 30 | i.partition_number AS [Partition], -- Partition number of the index 31 | i.index_id AS [Index_ID], -- Index ID (0 = Heap, 1 = Clustered, >1 = Nonclustered) 32 | x.type_desc AS [Index_Type], -- Type of index 33 | i.leaf_update_count * 100.0 / -- Percent of total operations that are updates 34 | NULLIF((i.range_scan_count 35 | + i.leaf_insert_count 36 | + i.leaf_delete_count 37 | + i.leaf_update_count 38 | + i.leaf_page_merge_count 39 | + i.singleton_lookup_count),0) AS [Percent_Update], 40 | i.leaf_insert_count * 100.0 / -- Percent of total operations that are inserts 41 | NULLIF((i.range_scan_count 42 | + i.leaf_insert_count 43 | + i.leaf_delete_count 44 | + i.leaf_update_count 45 | + i.leaf_page_merge_count 46 | + i.singleton_lookup_count),0) AS [Percent_Insert], 47 | i.leaf_delete_count * 100.0 / -- Percent of total operations that are deletes 48 | NULLIF((i.range_scan_count 49 | + i.leaf_insert_count 50 | + i.leaf_delete_count 51 | + i.leaf_update_count 52 | + i.leaf_page_merge_count 53 | + i.singleton_lookup_count),0) AS [Percent_Delete], 54 | i.range_scan_count * 100.0 / -- Percent of total operations that are range scans 55 | NULLIF((i.range_scan_count 56 | + i.leaf_insert_count 57 | + i.leaf_delete_count 58 | + i.leaf_update_count 59 | + i.leaf_page_merge_count 60 | + i.singleton_lookup_count),0) AS [Percent_Scan], 61 | i.singleton_lookup_count * 100.0 / -- Percent of total operations that are singleton lookups 62 | NULLIF((i.range_scan_count 63 | + i.leaf_insert_count 64 | + i.leaf_delete_count 65 | + i.leaf_update_count 66 | + i.leaf_page_merge_count 67 | + i.singleton_lookup_count),0) AS [Percent_Lookup] 68 | FROM sys.dm_db_index_operational_stats (DB_ID(), NULL, NULL, NULL) i 69 | JOIN sys.objects o 70 | ON o.object_id = i.object_id 71 | JOIN sys.indexes x 72 | ON x.object_id = i.object_id AND x.index_id = i.index_id 73 | WHERE (i.range_scan_count 74 | + i.leaf_insert_count 75 | + i.leaf_delete_count 76 | + i.leaf_update_count 77 | + i.leaf_page_merge_count 78 | + i.singleton_lookup_count) != 0 -- Only include indexes with activity 79 | AND OBJECTPROPERTY(i.object_id,'IsUserTable') = 1 -- Only user tables 80 | ORDER BY [Percent_Update] DESC, [Percent_Scan] DESC; -------------------------------------------------------------------------------- /sql_monitor/stack/docs/query_perfomance.md: -------------------------------------------------------------------------------- 1 | ### Top 10 Longest Running Queries 2 | ```sql 3 | SELECT TOP 10 4 | qs.total_elapsed_time / qs.execution_count / 1000.0 AS avg_duration_ms, 5 | qs.execution_count, 6 | SUBSTRING(qt.text, qs.statement_start_offset/2, 7 | (CASE 8 | WHEN qs.statement_end_offset = -1 THEN LEN(CONVERT(NVARCHAR(MAX), qt.text)) * 2 9 | ELSE qs.statement_end_offset 10 | END - qs.statement_start_offset)/2) AS query_text 11 | FROM sys.dm_exec_query_stats qs 12 | CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) qt 13 | ORDER BY avg_duration_ms DESC; 14 | ``` 15 | 16 | * **Purpose:** Identify queries that take the **longest average execution time**. 17 | * **Metrics:** 18 | * `avg_duration_ms` → Average execution time per execution (ms). 19 | * `execution_count` → How many times the query has run. 20 | * `query_text` → SQL statement text. 21 | * **Use case:** Detect **slow-performing queries** for tuning or indexing. 22 | 23 | ### Ad-Hoc Query Cache Hit Ratio 24 | ```sql 25 | SELECT 26 | (CAST(SUM(CASE WHEN usecounts > 0 THEN 1 ELSE 0 END) AS FLOAT) / COUNT(*)) * 100 AS Cache_Hit_Ratio 27 | FROM sys.dm_exec_cached_plans 28 | WHERE objtype = 'Adhoc' 29 | OR objtype = 'Prepared'; 30 | ``` 31 | * **Purpose:** Measure **how effectively the plan cache is reused**. 32 | * **Metric:** 33 | * `Cache_Hit_Ratio` → Percentage of cached plans that are reused. 34 | * **Use case:** Identify potential **query plan churn**; low hit ratio → consider parameterizing queries. 35 | * **High ratio (90–100%)** → SQL Server is reusing plans efficiently, low plan churn. 36 | * **Low ratio (<50%)** → Many plans are cached but not reused, which may indicate: 37 | * Excessive ad-hoc queries 38 | * Lack of parameterization 39 | * Plan cache bloat 40 | 41 | ### Wait Statistics 42 | ```sql 43 | DBCC SQLPERF (WAITSTATS); 44 | ``` 45 | * **Purpose:** Analyze **where SQL Server is spending time waiting**. 46 | * **Metric:** Wait types and cumulative wait time. 47 | * **Use case:** Detect **bottlenecks** like I/O, locking, or CPU waits for performance tuning. 48 | 49 | ### Active Connections Count 50 | ```sql 51 | SELECT 52 | COUNT(session_id) AS Connections 53 | FROM sys.dm_exec_sessions 54 | WHERE is_user_process = 1; 55 | ``` 56 | * **Purpose:** Monitor **number of active user connections**. 57 | * **Metric:** 58 | * `Connections` → Total active user sessions. 59 | * **Use case:** Capacity planning, detecting **connection storms**, or performance troubleshooting. 60 | 61 | ### Low-Use Cache Plans 62 | ```sql 63 | SELECT 64 | plan_handle, 65 | cacheobjtype AS CacheObjectType, 66 | objtype AS ObjectType, 67 | usecounts AS 'Use Count', 68 | size_in_bytes AS 'Cache Size' 69 | FROM sys.dm_exec_cached_plans 70 | WHERE usecounts < 10; 71 | ``` 72 | * **Purpose:** Identify **cached plans that are rarely reused**. 73 | * **Metrics:** 74 | * `CacheObjectType`, `ObjectType` → Type of cache object. 75 | * `Use Count` → Number of times plan was executed. 76 | * `Cache Size` → Memory consumed. 77 | * **Use case:** Detect **plan cache pollution**, which can reduce efficiency and identify low-use cached plans that may be occupying memory but not being reused. 78 | * These plans contribute to plan cache pollution, where memory is taken up by unnecessary or one-off ad-hoc plans. 79 | * To clean a low-use, run:: `DBCC FREEPROCCACHE (0x0...);` 80 | 81 | ### Top 10 Queries by Logical Reads 82 | ```sql 83 | SELECT TOP 10 84 | creation_time AS CreationTime, 85 | total_logical_reads AS LogicalReads 86 | FROM sys.dm_exec_query_stats 87 | ORDER BY total_elapsed_time DESC; 88 | ``` 89 | * **Purpose:** Find queries **consuming the most logical reads** (I/O impact). 90 | * **Metrics:** 91 | * `CreationTime` → When query plan was created. 92 | * `LogicalReads` → Number of logical reads performed. 93 | * **Use case:** Identify **high I/O queries** for indexing or tuning. 94 | 95 | ### Top 10 CPU-Intensive Queries 96 | ```sql 97 | SELECT TOP 10 98 | st.text AS QueryText, 99 | qs.total_worker_time AS CPUTime, 100 | qs.total_elapsed_time AS TotalTime, 101 | qs.total_logical_reads AS LogicalReads 102 | FROM sys.dm_exec_query_stats qs 103 | CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) st 104 | ORDER BY qs.total_worker_time DESC; 105 | ``` 106 | * **Purpose:** Identify **queries consuming the most CPU**. 107 | * **Metrics:** 108 | * `CPUTime` → Total CPU time used (microseconds). 109 | * `TotalTime` → Total elapsed time. 110 | * `LogicalReads` → Total logical reads. 111 | * `QueryText` → SQL statement. 112 | * **Use case:** Optimize **CPU-heavy queries** or troubleshoot high CPU spikes. 113 | -------------------------------------------------------------------------------- /sql_monitor/stack/docs/other_metrics.md: -------------------------------------------------------------------------------- 1 | ### Database Availability 2 | ```sql 3 | SELECT 4 | d.name AS [Database], 5 | d.state_desc AS [Status], 6 | CAST(100.0 * (1 - (rs.[cntr_value] / DATEDIFF(SECOND, sqlserver_start_time, GETDATE()))) AS DECIMAL(5,2)) AS [AvailabilityPercent] 7 | FROM sys.databases d 8 | CROSS JOIN sys.dm_os_sys_info si 9 | CROSS APPLY ( 10 | SELECT [cntr_value] 11 | FROM sys.dm_os_performance_counters 12 | WHERE counter_name = 'Log Flush Waits/sec' 13 | ) rs 14 | CROSS APPLY ( 15 | SELECT sqlserver_start_time FROM sys.dm_os_sys_info 16 | ) st; 17 | ``` 18 | **Purpose:** Measure the **approximate percentage of time each database has been online** since the last SQL Server restart. 19 | **Use Case:** Track **uptime and availability trends** to ensure SLAs are met. 20 | **Benefits:** Provides a **quick health indicator** for all databases; helps identify **unexpected restarts or downtime**. 21 | *Note:* For precise SLA tracking, use **SQL Server Agent monitoring** or external tools like **SCOM/Zabbix**. 22 | 23 | ### Database Connections 24 | ```sql 25 | SELECT 26 | DB_NAME(dbid) AS [Database], 27 | COUNT(*) AS [TotalConnections], 28 | SUM(CASE WHEN status = 'sleeping' THEN 1 ELSE 0 END) AS [Idle], 29 | SUM(CASE WHEN status <> 'sleeping' THEN 1 ELSE 0 END) AS [Active] 30 | FROM sys.sysprocesses 31 | WHERE dbid > 0 32 | GROUP BY dbid; 33 | ``` 34 | **Purpose:** Count the **number of connections per database**, broken down by idle and active sessions. 35 | **Use Case:** Detect **connection spikes, potential blocking, or over-utilization**. 36 | **Benefits:** Helps **capacity planning**, **load analysis**, and **detecting abandoned sessions** that may impact performance. 37 | 38 | ### Query Performance – Average Execution Time 39 | ```sql 40 | SELECT TOP 10 41 | DB_NAME(st.dbid) AS [Database], 42 | qs.execution_count, 43 | qs.total_elapsed_time / qs.execution_count AS [Avg_Elapsed_Time_ms], 44 | qs.total_worker_time / qs.execution_count AS [Avg_CPU_Time_ms], 45 | qs.max_elapsed_time AS [Max_Elapsed_Time_ms], 46 | SUBSTRING(st.text, (qs.statement_start_offset/2)+1, 47 | ((CASE qs.statement_end_offset 48 | WHEN -1 THEN DATALENGTH(st.text) 49 | ELSE qs.statement_end_offset END 50 | - qs.statement_start_offset)/2)+1) AS [QueryText] 51 | FROM sys.dm_exec_query_stats qs 52 | CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) st 53 | ORDER BY [Avg_Elapsed_Time_ms] DESC; 54 | ``` 55 | **Purpose:** Identify the **top 10 queries by average execution time**. 56 | **Use Case:** Detect **long-running queries** that may be impacting performance. 57 | **Benefits:** Enables **query optimization**, **index tuning**, and **CPU resource management**. 58 | 59 | ### Slow Query Alerts (> 30s) 60 | ```sql 61 | SELECT 62 | DB_NAME(st.dbid) AS [Database], 63 | qs.execution_count, 64 | qs.max_elapsed_time AS [Max_Elapsed_Time_ms], 65 | SUBSTRING(st.text, (qs.statement_start_offset/2)+1, 66 | ((CASE qs.statement_end_offset 67 | WHEN -1 THEN DATALENGTH(st.text) 68 | ELSE qs.statement_end_offset END 69 | - qs.statement_start_offset)/2)+1) AS [QueryText] 70 | FROM sys.dm_exec_query_stats qs 71 | CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) st 72 | WHERE qs.max_elapsed_time > 30000 -- greater than 30 seconds 73 | ORDER BY qs.max_elapsed_time DESC; 74 | ``` 75 | **Purpose:** Identify queries that **exceed 30 seconds execution time**. 76 | **Use Case:** Alert DBA team to **potential performance issues or blocking queries**. 77 | **Benefits:** Improves **response time**, reduces **user impact**, and guides **query tuning efforts**. 78 | 79 | ### Last Backup Taken 80 | ```sql 81 | SELECT 82 | d.name AS [Database], 83 | MAX(b.backup_finish_date) AS [LastBackup], 84 | b.type AS [BackupType] 85 | FROM msdb.dbo.backupset b 86 | JOIN sys.databases d ON b.database_name = d.name 87 | GROUP BY d.name, b.type 88 | ORDER BY [LastBackup] DESC; 89 | ``` 90 | **Purpose:** Track the **most recent backup for each database**. 91 | **Use Case:** Verify **backup compliance** with RTO/RPO requirements. 92 | **Benefits:** Reduces **risk of data loss** and ensures **recovery readiness**. 93 | 94 | ### Backup Type (FULL, DIFFERENTIAL, LOG) 95 | ```sql 96 | SELECT 97 | d.name AS [Database], 98 | CASE b.type 99 | WHEN 'D' THEN 'FULL' 100 | WHEN 'I' THEN 'DIFFERENTIAL' 101 | WHEN 'L' THEN 'LOG' 102 | ELSE 'OTHER' 103 | END AS [BackupType], 104 | MAX(b.backup_finish_date) AS [LastBackup] 105 | FROM msdb.dbo.backupset b 106 | JOIN sys.databases d ON b.database_name = d.name 107 | GROUP BY d.name, b.type 108 | ORDER BY [LastBackup] DESC; 109 | ``` 110 | **Purpose:** Determine **the type of backup** most recently performed for each database. 111 | **Use Case:** Ensure **backup strategy is consistent** (FULL + DIFF + LOG). 112 | **Benefits:** Supports **audit compliance**, **RPO adherence**, and **disaster recovery planning**. -------------------------------------------------------------------------------- /ssrs/analyze.sql: -------------------------------------------------------------------------------- 1 | -- ===================================================== 2 | -- Query: ExecutionLog Analysis by Client and Report 3 | -- Includes Author, Creation Date, and Total Reports 4 | -- Author: Lorenzo Uriel 5 | -- Date: 2025-08-19 6 | -- ===================================================== 7 | 8 | SELECT 9 | [Log].*, 10 | [Empresa], 11 | (SELECT COUNT([name]) FROM [reports]) AS [TotalReports] -- Total number of reports 12 | FROM 13 | ( 14 | -- Inner query: counts report views by client 15 | SELECT 16 | C.[Path] AS [ReportPath], -- Report folder path 17 | C.[Name] AS [ReportName], -- Report name 18 | C.[CreatedBy] AS [Author], -- Report author 19 | C.[CreationDate] AS [CreationDate], -- Report creation date 20 | -- Extract 'customer' parameter from execution log 21 | SUBSTRING( 22 | E.[Parameters], 23 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 24 | CHARINDEX('&', E.[Parameters] + '&', CHARINDEX('customer=', E.[Parameters])) 25 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=') 26 | ) AS [customer], 27 | COUNT(*) AS [ViewCount] -- Number of executions for this client and report 28 | FROM ExecutionLog AS E 29 | JOIN Catalog AS C 30 | ON E.ReportID = C.ItemID -- Join to get report metadata 31 | WHERE E.[Parameters] LIKE '%customer=%' -- Only logs that include a 'customer' parameter 32 | GROUP BY 33 | C.[Path], 34 | C.[Name], 35 | C.[CreatedBy], 36 | C.[CreationDate], 37 | SUBSTRING( 38 | E.[Parameters], 39 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 40 | CHARINDEX('&', E.[Parameters] + '&', CHARINDEX('customer=', E.[Parameters])) 41 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=') 42 | ) 43 | ) AS [Log] 44 | ORDER BY [Log].[ViewCount] DESC; -- Sort by most viewed reports per client 45 | 46 | /************************************************************************************************ 47 | -- Retrieves all reports from the catalog along with their execution 48 | -- parameters and counts the number of times each report was viewed. 49 | ************************************************************************************************/ 50 | SELECT 51 | C.[Path] AS [ReportPath], 52 | C.[Name] AS [ReportName], 53 | CAST(E.[Parameters] as varchar) AS [Parameters], 54 | COUNT(*) AS [ViewCount] 55 | FROM ExecutionLog AS E 56 | JOIN Catalog AS C ON E.ReportID = C.ItemID 57 | GROUP BY C.Path, C.Name, CAST(E.[Parameters] as varchar) 58 | ORDER BY ViewCount DESC 59 | 60 | /************************************************************************************************ 61 | -- Extracts the value of the 'customer' parameter from the execution 62 | -- logs for each report and counts the number of views per client per report. 63 | -- Only logs containing the 'customer' parameter are considered. 64 | ************************************************************************************************/ 65 | SELECT * 66 | FROM ( 67 | SELECT 68 | C.[Path] AS [ReportPath], 69 | C.[Name] AS [ReportName], 70 | SUBSTRING(E.[Parameters], 71 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 72 | CHARINDEX('&', E.[Parameters], CHARINDEX('customer=', E.[Parameters])) 73 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=')) AS [customer], 74 | COUNT(*) AS [ViewCount] 75 | FROM ExecutionLog AS E 76 | JOIN Catalog AS C ON E.ReportID = C.ItemID 77 | WHERE E.[Parameters] LIKE '%customer=%' 78 | GROUP BY C.[Path], C.[Name], 79 | SUBSTRING(E.[Parameters], 80 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 81 | CHARINDEX('&', E.[Parameters], CHARINDEX('customer=', E.[Parameters])) 82 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=')) 83 | ) [Log] 84 | ORDER BY [Log].[ViewCount] DESC 85 | 86 | /************************************************************************************************ 87 | -- Similar to Query 2, selecting reports and their 'customer' values 88 | -- along with view counts, prepared for further extensions or additional filtering. 89 | ************************************************************************************************/ 90 | SELECT 91 | [Log].* 92 | FROM ( 93 | SELECT 94 | C.[Path] AS [ReportPath], 95 | C.[Name] AS [ReportName], 96 | SUBSTRING(E.[Parameters], 97 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 98 | CHARINDEX('&', E.[Parameters], CHARINDEX('customer=', E.[Parameters])) 99 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=')) AS [customer], 100 | COUNT(*) AS [ViewCount] 101 | FROM ExecutionLog AS E 102 | JOIN Catalog AS C ON E.ReportID = C.ItemID 103 | WHERE E.[Parameters] LIKE '%customer=%' 104 | GROUP BY C.[Path], C.[Name], 105 | SUBSTRING(E.[Parameters], 106 | CHARINDEX('customer=', E.[Parameters]) + LEN('customer='), 107 | CHARINDEX('&', E.[Parameters], CHARINDEX('customer=', E.[Parameters])) 108 | - CHARINDEX('customer=', E.[Parameters]) - LEN('customer=')) 109 | ) [Log] 110 | ORDER BY [Log].[ViewCount] DESC 111 | -------------------------------------------------------------------------------- /sql_monitor/stack/docs/server_performance.md: -------------------------------------------------------------------------------- 1 | ### Top 10 Longest Running Queries 2 | ```sql 3 | SELECT TOP 10 4 | creation_time AS CreationTime, 5 | total_elapsed_time/1000 AS TotalElapsedTimeMS, 6 | execution_count AS ExecutionCount, 7 | total_worker_time/1000 AS CPUTimeMS 8 | FROM sys.dm_exec_query_stats 9 | ORDER BY total_elapsed_time DESC; 10 | ``` 11 | * **Purpose:** Identify queries consuming the **most total execution time**. 12 | * **Metrics:** 13 | * `TotalElapsedTimeMS` → Total elapsed time (ms). 14 | * `ExecutionCount` → Number of times executed. 15 | * `CPUTimeMS` → CPU time used by the query (ms). 16 | * `CreationTime` → When the plan was compiled. 17 | * **Use case:** Detect high-impact queries for **performance tuning**. 18 | 19 | ### Currently Running Threads 20 | ```sql 21 | SELECT 22 | COUNT(*) AS Running_Threads 23 | FROM sys.dm_exec_requests 24 | WHERE status = 'running'; 25 | ``` 26 | * **Purpose:** Count active threads currently executing queries. 27 | * **Metric:** `Running_Threads` → Number of concurrent executing requests. 28 | * **Use case:** Monitor **concurrent activity**, detect **thread storms** or query blocking. 29 | * High number of running threads may indicate CPU pressure or inefficient queries. 30 | * Helps see how many queries are concurrently executing. 31 | 32 | ### Pending I/O Requests 33 | ```sql 34 | SELECT 35 | COUNT(*) AS OpenFiles 36 | FROM sys.dm_io_pending_io_requests; 37 | ``` 38 | * **Purpose:** Identify **I/O operations waiting to complete**. 39 | * **Metric:** `OpenFiles` → Number of pending I/O requests. 40 | * **Use case:** Detect potential **disk bottlenecks** affecting performance. 41 | * Pending I/O indicates how many disk operations are waiting, which may reflect slow storage, high concurrency, or large queries. 42 | * A high number of pending I/O requests can point to disk performance issues, I/O saturation, or queries scanning large amounts of data. 43 | 44 | ### Active Locks 45 | ```sql 46 | SELECT 47 | request_session_id AS SessionID, 48 | resource_database_id AS DatabaseID, 49 | resource_associated_entity_id AS EntityID, 50 | request_mode AS LockType, 51 | request_status AS Status 52 | FROM sys.dm_tran_locks; 53 | ``` 54 | * **Purpose:** Track **active locks** held or requested in the system. 55 | * **Metrics:** 56 | * `SessionID` → ID of session holding/requesting the lock. 57 | * `DatabaseID` → Database containing the locked resource. 58 | * `EntityID` → Object or page ID. 59 | * `LockType` → Lock mode (e.g., `X`, `S`). 60 | * `Status` → Granted or waiting. 61 | * **Use case:** Troubleshoot **blocking, deadlocks, or contention**. 62 | * Check which sessions are waiting on locks (Status = 'WAIT'). 63 | * Identify conflicting locks and involved sessions. 64 | * Detect hot spots in tables or indexes where locks are heavily requested. 65 | 66 | ### Session Space Usage 67 | ```sql 68 | SELECT 69 | SUM(user_objects_alloc_page_count) AS 'User Object Pages', 70 | SUM(internal_objects_alloc_page_count) AS 'Internal Object Pages' 71 | FROM sys.dm_db_session_space_usage; 72 | ``` 73 | * **Purpose:** Monitor **memory/pages allocated by current sessions**. 74 | * **Metrics:** 75 | * `User Object Pages` → Pages used by user objects (tables, indexes). 76 | * `Internal Object Pages` → Pages used by internal SQL Server structures. 77 | * **Use case:** Detect **memory pressure** caused by session activity. 78 | * Track how much session memory is allocated for user and internal objects. 79 | * High internal_objects_alloc_page_count → may indicate large sorts, joins, or hash operations consuming tempdb memory. 80 | * Can identify queries that generate heavy tempdb usage via internal objects. 81 | 82 | ### Active Transactions 83 | ```sql 84 | SELECT 85 | transaction_id AS TransactionID, 86 | transaction_begin_time AS BeginTime, 87 | transaction_state AS State, 88 | transaction_type AS Type 89 | FROM sys.dm_tran_active_transactions; 90 | ``` 91 | * **Purpose:** Track **transactions currently open** in SQL Server. 92 | * **Metrics:** 93 | * `TransactionID` → Unique transaction identifier. 94 | * `BeginTime` → When transaction started. 95 | * `State` → Active, committing, or rolling back. 96 | * `Type` → Transaction type (read/write). 97 | * **Use case:** Detect **long-running or uncommitted transactions**. 98 | * Spot transactions that have been open for too long. 99 | * Long-running transactions can hold locks and block other queries. 100 | * Open transactions prevent log truncation, potentially causing log growth. 101 | 102 | ### Wait Statistics (excluding idle waits) 103 | ```sql 104 | SELECT 105 | wait_type, 106 | SUM(wait_time_ms) AS WaitTimeMS, 107 | SUM(waiting_tasks_count) AS TaskCount 108 | FROM sys.dm_os_wait_stats 109 | WHERE wait_type NOT IN ('SLEEP_TASK', 'BROKER_TASK_STOP', 'SQLTRACE_BUFFER_FLUSH') 110 | GROUP BY wait_type 111 | ORDER BY WaitTimeMS DESC; 112 | ``` 113 | * **Purpose:** Identify where SQL Server **spends time waiting**, excluding trivial or idle waits. 114 | * **Metrics:** 115 | * `WaitTimeMS` → Total wait time in milliseconds. 116 | * `TaskCount` → Number of waiting tasks. 117 | * `wait_type` → Type of wait (e.g., `PAGEIOLATCH_SH`, `LCK_M_X`). 118 | * **Use case:** Troubleshoot **performance bottlenecks** caused by I/O, locks, or memory. 119 | * Example: PAGEIOLATCH_SH → waiting on data pages from disk. 120 | * Example: LCK_M_X → waiting for exclusive locks. 121 | * Example: SOS_SCHEDULER_YIELD → waiting for CPU. 122 | * High wait time → investigate queries, indexes, or hardware. -------------------------------------------------------------------------------- /custom_alert_emails/dbo.usp_send_job_custom_email.sql: -------------------------------------------------------------------------------- 1 | -- ============================================= 2 | -- PROCEDURE: dbo.usp_send_job_custom_email 3 | -- PURPOSE: Sends a fully formatted HTML email with the latest 4 | -- SQL Server Agent job execution details. 5 | -- PARAMETERS: 6 | -- @job_name SYSNAME - Name of the SQL Agent Job 7 | -- NOTES: 8 | -- - Uses sysjobs, sysjobhistory, sysjobsteps in msdb 9 | -- - Sends HTML email via Database Mail profile 10 | -- Author: Lorenzo Uriel 11 | -- Date: 2025-08-19 12 | -- ============================================= 13 | CREATE PROCEDURE dbo.usp_send_job_custom_email 14 | @job_name SYSNAME -- Input: Name of the job to report 15 | AS 16 | BEGIN 17 | SET NOCOUNT ON; -- Avoid extra result sets 18 | 19 | -- ======================== 20 | -- DECLARE VARIABLES 21 | -- ======================== 22 | DECLARE 23 | @job_id UNIQUEIDENTIFIER, -- Job identifier 24 | @run_datetime DATETIME, -- Job start datetime 25 | @duration VARCHAR(20), -- Job duration as formatted string 26 | @start_time TIME, -- Job start time 27 | @status VARCHAR(100), -- Job status (Succeeded/Failed/Unknown) 28 | @executed_by SYSNAME, -- SQL Server login executing the job 29 | @server_name SYSNAME = CONVERT(SYSNAME, SERVERPROPERTY('MachineName')), -- Server name 30 | @last_step NVARCHAR(128), -- Last step name 31 | @first_step NVARCHAR(128), -- First step name 32 | @html_body NVARCHAR(MAX); -- HTML email content 33 | 34 | -- ======================== 35 | -- GET JOB ID 36 | -- ======================== 37 | SELECT @job_id = job_id 38 | FROM msdb.dbo.sysjobs 39 | WHERE name = @job_name; 40 | 41 | IF @job_id IS NULL 42 | BEGIN 43 | RAISERROR('Job not found.', 16, 1); -- Raise error if job does not exist 44 | RETURN; 45 | END 46 | 47 | -- ======================== 48 | -- GET LATEST JOB RUN INFORMATION 49 | -- ======================== 50 | DECLARE @run_date INT, @run_time INT, @run_duration INT, @message NVARCHAR(MAX); 51 | 52 | SELECT TOP 1 53 | @run_date = run_date, 54 | @run_time = run_time, 55 | @run_duration = run_duration, 56 | @message = message 57 | FROM msdb.dbo.sysjobhistory 58 | WHERE job_id = @job_id AND step_id = 0 -- Step_id = 0 indicates job outcome 59 | ORDER BY instance_id DESC; 60 | 61 | -- ======================== 62 | -- FORMAT DATE, TIME AND DURATION 63 | -- ======================== 64 | SELECT @run_datetime = CONVERT(DATETIME, 65 | STUFF(STUFF(CONVERT(CHAR(8), @run_date), 5, 0, '-'), 8, 0, '-') + ' ' + 66 | STUFF(STUFF(RIGHT('000000' + CAST(@run_time AS VARCHAR(6)), 6), 3, 0, ':'), 6, 0, ':') 67 | ); 68 | 69 | SELECT @duration = 70 | CAST(@run_duration / 10000 AS VARCHAR) + 'h ' + 71 | CAST((@run_duration % 10000) / 100 AS VARCHAR) + 'm ' + 72 | CAST(@run_duration % 100 AS VARCHAR) + 's'; 73 | 74 | -- ======================== 75 | -- GET FIRST AND LAST STEP NAMES AND EXECUTOR 76 | -- ======================== 77 | SELECT TOP 1 78 | @executed_by = suser_sname(), 79 | @first_step = (SELECT TOP 1 step_name FROM msdb.dbo.sysjobsteps WHERE job_id = @job_id ORDER BY step_id), 80 | @last_step = (SELECT TOP 1 step_name FROM msdb.dbo.sysjobsteps WHERE job_id = @job_id ORDER BY step_id DESC); 81 | 82 | -- ======================== 83 | -- DETERMINE JOB STATUS AND STYLIZE 84 | -- ======================== 85 | IF @message LIKE '%succeeded%' 86 | SET @status = 'Succeeded'; 87 | ELSE IF @message LIKE '%failed%' 88 | SET @status = 'Failed'; 89 | ELSE 90 | SET @status = 'Unknown'; 91 | 92 | -- ======================== 93 | -- BUILD HTML EMAIL BODY 94 | -- ======================== 95 | SET @html_body = 96 | N' 97 | 98 | 105 | 106 | 107 |

SQL Server Job Notification

108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 |
Job Name' + @job_name + '
Status' + @status + '
Run Date' + CONVERT(VARCHAR, @run_datetime, 107) + '
Start Time' + CONVERT(VARCHAR, @run_datetime, 114) + '
Duration' + @duration + '
Executed By' + @executed_by + '
Last Step' + ISNULL(@last_step, '-') + '
Initial Step' + ISNULL(@first_step, '-') + '
Server' + @server_name + '
119 |

Message: ' + @message + '

120 | 121 | '; 122 | 123 | DECLARE @subject NVARCHAR(255) = '[SQL Job Completed] ' + @job_name; 124 | 125 | -- ======================== 126 | -- SEND THE EMAIL 127 | -- ======================== 128 | EXEC msdb.dbo.sp_send_dbmail 129 | @profile_name = 'SQL Alerts', -- Database Mail profile 130 | @recipients = 'lorenzouriel@sqlserver.com', -- Recipient(s) 131 | @subject = @subject, 132 | @body_format = 'HTML', 133 | @body = @html_body; 134 | END 135 | 136 | -- Execute the procedure for a specific SQL Agent Job 137 | -- EXEC dbo.usp_send_job_custom_email @job_name = 'job_name'; -------------------------------------------------------------------------------- /sql_profiler/automatically_profiler.sql: -------------------------------------------------------------------------------- 1 | /************************************************************************************************ 2 | -- Author: Lorenzo Uriel 3 | -- Created Date: 2025-08-19 4 | -- Purpose: Automate SQL Profiler tracing, monitor long-running queries, and optionally 5 | -- delete the trace file. Integrated with SQL Server Agent for alerting. 6 | -- Description: This script contains: 7 | -- 1. Procedure creation: AutomaticallyProfiler 8 | -- 2. Execution of the procedure 9 | -- 3. Checking the trace file for queries longer than 30 seconds 10 | -- 4. Deleting the profiler trace file 11 | ************************************************************************************************/ 12 | 13 | -- ======================================================== 14 | -- 1. Create Procedure: AutomaticallyProfiler 15 | -- ======================================================== 16 | USE [your_database]; -- Replace with your target database 17 | GO 18 | 19 | CREATE PROCEDURE [dbo].[AutomaticallyProfiler] 20 | @RunTime INT = 120 -- Duration of the trace in minutes 21 | AS 22 | BEGIN 23 | -- ================================================ 24 | -- Variables for trace creation and control 25 | -- ================================================ 26 | DECLARE @rc INT; -- Return code from trace procedures 27 | DECLARE @TraceID INT; -- Trace ID for the created trace 28 | DECLARE @maxfilesize BIGINT; -- Maximum size of the trace file 29 | DECLARE @Now DATETIME; -- Current datetime 30 | DECLARE @StopTime DATETIME; -- Stop datetime for the trace 31 | DECLARE @FQFileName NVARCHAR(100); -- Full trace file path 32 | DECLARE @FileStamp NVARCHAR(25); -- Timestamp for filename 33 | 34 | -- Set time and file name 35 | SET @Now = GETDATE(); 36 | SET @StopTime = DATEADD(MI, @RunTime, @Now); 37 | SET @FQFileName = 'C:\Profiler\TraceProfiler_'; -- Ensure folder exists 38 | SET @FileStamp = 39 | CAST(DATEPART(YEAR, GETDATE()) AS NVARCHAR) + 40 | RIGHT('0' + CAST(DATEPART(MONTH, GETDATE()) AS NVARCHAR), 2) + 41 | RIGHT('0' + CAST(DATEPART(DAY, GETDATE()) AS NVARCHAR), 2); 42 | SET @FQFileName = @FQFileName + @FileStamp; 43 | SET @maxfilesize = 500; -- Max file size in MB 44 | 45 | -- ================================================ 46 | -- Create the trace 47 | -- ================================================ 48 | EXEC @rc = sp_trace_create @TraceID OUTPUT, 0, @FQFileName, @maxfilesize, @StopTime; 49 | IF (@rc != 0) GOTO error; 50 | 51 | -- ================================================ 52 | -- Configure trace events 53 | -- ================================================ 54 | DECLARE @on BIT = 1; 55 | 56 | -- RPC:Completed (EventClass 10) 57 | EXEC sp_trace_setevent @TraceID, 10, 1, @on; -- TextData 58 | EXEC sp_trace_setevent @TraceID, 10, 12, @on; -- SPID 59 | EXEC sp_trace_setevent @TraceID, 10, 13, @on; -- Duration 60 | 61 | -- SQL:BatchCompleted (EventClass 12) 62 | EXEC sp_trace_setevent @TraceID, 12, 1, @on; -- TextData 63 | EXEC sp_trace_setevent @TraceID, 12, 12, @on; -- SPID 64 | EXEC sp_trace_setevent @TraceID, 12, 13, @on; -- Duration 65 | 66 | -- ================================================ 67 | -- Optional filters 68 | -- ================================================ 69 | DECLARE @intfilter INT = 50; -- Duration threshold 70 | EXEC sp_trace_setfilter @TraceID, 12, 0, 4, @intfilter; -- Filter: Duration > 50 71 | EXEC sp_trace_setfilter @TraceID, 35, 0, 7, N'Master'; -- Filter: Database = Master 72 | 73 | -- ================================================ 74 | -- Start the trace 75 | -- ================================================ 76 | EXEC sp_trace_setstatus @TraceID, 1; 77 | SELECT TraceID = @TraceID; -- Return Trace ID 78 | GOTO finish; 79 | 80 | error: 81 | SELECT ErrorCode = @rc; -- Return error code if creation fails 82 | 83 | finish: 84 | END; 85 | GO 86 | 87 | -- ======================================================== 88 | -- 2. Execute Procedure 89 | -- ======================================================== 90 | USE [your_database]; 91 | GO 92 | 93 | DECLARE @return_value INT; 94 | 95 | EXEC @return_value = [dbo].[AutomaticallyProfiler]; 96 | 97 | SELECT 'Return Value' = @return_value; 98 | GO 99 | 100 | -- ======================================================== 101 | -- 3. Check Profiler Trace File for Long Queries (>30s) 102 | -- ======================================================== 103 | DECLARE @FQFileName NVARCHAR(100); 104 | DECLARE @FileStamp NVARCHAR(25); 105 | 106 | SET @FQFileName = 'C:\Profiler\TraceProfiler_'; 107 | SET @FileStamp = 108 | CAST(DATEPART(YEAR, GETDATE()) AS NVARCHAR) + 109 | RIGHT('0' + CAST(DATEPART(MONTH, GETDATE()) AS NVARCHAR), 2) + 110 | RIGHT('0' + CAST(DATEPART(DAY, GETDATE()) AS NVARCHAR), 2); 111 | SET @FQFileName = @FQFileName + @FileStamp + '.trc'; 112 | 113 | DECLARE @DurationCount INT; 114 | 115 | -- Count queries taking longer than 30 seconds (30,000,000 microseconds) 116 | SELECT @DurationCount = ISNULL(COUNT(Duration), 0) 117 | FROM fn_trace_gettable(@FQFileName, 1) 118 | WHERE [Duration] > 30000000; 119 | 120 | -- Raise an error if long queries exist 121 | IF @DurationCount > 0 122 | BEGIN 123 | RAISERROR('Some queries took more than 30 seconds', 16, 1); 124 | END; 125 | 126 | -- ======================================================== 127 | -- 4. Delete Profiler Trace File 128 | -- ======================================================== 129 | DECLARE @FilePath NVARCHAR(100) = 'C:\Profiler\'; 130 | DECLARE @FileName NVARCHAR(100) = 'TraceProfiler_'; 131 | 132 | SET @FileName = 133 | @FileName + 134 | CAST(DATEPART(YEAR, GETDATE()) AS NVARCHAR) + 135 | RIGHT('0' + CAST(DATEPART(MONTH, GETDATE()) AS NVARCHAR), 2) + 136 | RIGHT('0' + CAST(DATEPART(DAY, GETDATE()) AS NVARCHAR), 2) + 137 | '.trc'; 138 | 139 | DECLARE @Cmd NVARCHAR(200); 140 | SET @Cmd = 'DEL "' + @FilePath + @FileName + '"'; 141 | 142 | EXEC xp_cmdshell @Cmd; -------------------------------------------------------------------------------- /sql_maintenance/use_cases/backup_ola_hallengren.sql: -------------------------------------------------------------------------------- 1 | -- ####################### 2 | -- BACKUP USING OLA HALLENGREN SOLUTION 3 | -- ####################### 4 | 5 | -- A. Back up all user databases, using checksums and compression; verify the backup; and delete old backup files 6 | EXECUTE dbo.DatabaseBackup 7 | @Databases = 'USER_DATABASES', 8 | @Directory = 'C:\Backup', 9 | @BackupType = 'FULL', 10 | @Verify = 'Y', 11 | @Compress = 'Y', 12 | @Checksum = 'Y', 13 | @CleanupTime = 24 14 | 15 | -- B. Back up all user databases to a network share, and verify the backup 16 | EXECUTE dbo.DatabaseBackup 17 | @Databases = 'USER_DATABASES', 18 | @Directory = '\\Server1\Backup', 19 | @BackupType = 'FULL', 20 | @Verify = 'Y' 21 | 22 | -- C. Back up all user databases across four network shares, and verify the backup 23 | EXECUTE dbo.DatabaseBackup 24 | @Databases = 'USER_DATABASES', 25 | @Directory = '\\Server1\Backup, \\Server2\Backup, \\Server3\Backup, \\Server4\Backup', 26 | @BackupType = 'FULL', 27 | @Verify = 'Y', 28 | @NumberOfFiles = 4 29 | 30 | -- D. Back up all user databases to 64 files, using checksums and compression and setting the buffer count and the maximum transfer size 31 | EXECUTE dbo.DatabaseBackup 32 | @Databases = 'USER_DATABASES', 33 | @Directory = 'C:\Backup', 34 | @BackupType = 'FULL', 35 | @Compress = 'Y', 36 | @Checksum = 'Y', 37 | @BufferCount = 50, 38 | @MaxTransferSize = 4194304, 39 | @NumberOfFiles = 64 40 | 41 | -- E. Back up all user databases to Azure Blob Storage, using compression 42 | EXECUTE dbo.DatabaseBackup 43 | @Databases = 'USER_DATABASES', 44 | @URL = 'https://myaccount.blob.core.windows.net/mycontainer', 45 | @Credential = 'MyCredential', 46 | @BackupType = 'FULL', 47 | @Compress = 'Y', 48 | @Verify = 'Y' 49 | 50 | -- F. Back up all user databases to S3 storage, using compression 51 | EXECUTE dbo.DatabaseBackup 52 | @Databases = 'USER_DATABASES', 53 | @URL = 's3://myaccount.s3.us-east-1.amazonaws.com/mybucket', 54 | @BackupType = 'FULL', 55 | @Compress = 'Y', 56 | @Verify = 'Y', 57 | @MaxTransferSize = 20971520, 58 | @BackupOptions = '{"s3": {"region":"us-east-1"}}' 59 | 60 | -- G. Back up the transaction log of all user databases, using the option to change the backup type if a log backup cannot be performed 61 | EXECUTE dbo.DatabaseBackup 62 | @Databases = 'USER_DATABASES', 63 | @Directory = 'C:\Backup', 64 | @BackupType = 'LOG', 65 | @ChangeBackupType = 'Y' 66 | 67 | -- H. Back up all user databases, using compression, encryption, and a server certificate. 68 | EXECUTE dbo.DatabaseBackup @Databases = 'USER_DATABASES', 69 | @Directory = 'C:\Backup', 70 | @BackupType = 'FULL', 71 | @Compress = 'Y', 72 | @Encrypt = 'Y', 73 | @EncryptionAlgorithm = 'AES_256', 74 | @ServerCertificate = 'MyCertificate' 75 | 76 | -- I. Back up all user databases, using compression, encryption, and LiteSpeed, and limiting the CPU usage to 10 percent 77 | EXECUTE dbo.DatabaseBackup 78 | @Databases = 'USER_DATABASES', 79 | @Directory = 'C:\Backup', 80 | @BackupType = 'FULL', 81 | @BackupSoftware = 'LITESPEED', 82 | @Compress = 'Y', 83 | @Encrypt = 'Y', 84 | @EncryptionAlgorithm = 'AES_256', 85 | @EncryptionKey = 'MyPassword', 86 | @Throttle = 10 87 | 88 | -- J. Back up all user databases, using compression, encryption, and Red Gate SQL Backup Pro 89 | EXECUTE dbo.DatabaseBackup 90 | @Databases = 'USER_DATABASES', 91 | @Directory = 'C:\Backup', 92 | @BackupType = 'FULL', 93 | @BackupSoftware = 'SQLBACKUP', 94 | @Compress = 'Y', 95 | @Encrypt = 'Y', 96 | @EncryptionAlgorithm = 'AES_256', 97 | @EncryptionKey = 'MyPassword' 98 | 99 | -- K. Back up all user databases, using compression, encryption, and Idera SQL Safe Backup 100 | EXECUTE dbo.DatabaseBackup 101 | @Databases = 'USER_DATABASES', 102 | @Directory = 'C:\Backup', 103 | @BackupType = 'FULL', 104 | @BackupSoftware = 'SQLSAFE', 105 | @Compress = 'Y', 106 | @Encrypt = 'Y', 107 | @EncryptionAlgorithm = 'AES_256', 108 | @EncryptionKey = '8tPyzp4i1uF/ydAN1DqevdXDeVoryWRL' 109 | 110 | -- L. Back up all user databases, using mirrored backups. 111 | EXECUTE dbo.DatabaseBackup 112 | @Databases = 'USER_DATABASES', 113 | @Directory = 'C:\Backup', 114 | @MirrorDirectory = 'D:\Backup', 115 | @BackupType = 'FULL', 116 | @Compress = 'Y', 117 | @Verify = 'Y', 118 | @CleanupTime = 24, 119 | @MirrorCleanupTime = 48 120 | 121 | -- M. Back up all user databases, using Data Domain Boost. 122 | EXECUTE dbo.DatabaseBackup 123 | @Databases = 'USER_DATABASES', 124 | @BackupType = 'FULL', 125 | @Checksum = 'Y', 126 | @BackupSoftware = 'DATA_DOMAIN_BOOST', 127 | @DataDomainBoostHost = 'Host', 128 | @DataDomainBoostUser = 'User', 129 | @DataDomainBoostDevicePath = '/DevicePath', 130 | @DataDomainBoostLockboxPath = 'C:\Program Files\DPSAPPS\common\lockbox', 131 | @CleanupTime = 24 132 | 133 | -- N. Back up all user databases, with the default directory structure and file names. 134 | EXECUTE dbo.DatabaseBackup 135 | @Databases = 'USER_DATABASES', 136 | @Directory = 'C:\Backup', 137 | @BackupType = 'FULL', 138 | @DirectoryStructure = '{ServerName}${InstanceName}{DirectorySeparator}{DatabaseName}{DirectorySeparator}{BackupType}_{Partial}_{CopyOnly}', 139 | @AvailabilityGroupDirectoryStructure = '{ClusterName}${AvailabilityGroupName}{DirectorySeparator}{DatabaseName}{DirectorySeparator}{BackupType}_{Partial}_{CopyOnly}', 140 | @FileName = '{ServerName}${InstanceName}_{DatabaseName}_{BackupType}_{Partial}_{CopyOnly}_{Year}{Month}{Day}_{Hour}{Minute}{Second}_{FileNumber}.{FileExtension}', 141 | @AvailabilityGroupFileName = '{ClusterName}${AvailabilityGroupName}_{DatabaseName}_{BackupType}_{Partial}_{CopyOnly}_{Year}{Month}{Day}_{Hour}{Minute}{Second}_{FileNumber}.{FileExtension}' 142 | 143 | -- O. Back up all user databases, to a directory structure without the server name, instance name, cluster name, and availability group name. 144 | EXECUTE dbo.DatabaseBackup 145 | @Databases = 'USER_DATABASES', 146 | @Directory = 'C:\Backup', 147 | @BackupType = 'FULL', 148 | @DirectoryStructure = '{DatabaseName}{DirectorySeparator}{BackupType}_{Partial}_{CopyOnly}', 149 | @AvailabilityGroupDirectoryStructure = '{DatabaseName}{DirectorySeparator}{BackupType}_{Partial}_{CopyOnly}' 150 | 151 | -- P. Back up all user databases, without creating any sub-directories. 152 | EXECUTE dbo.DatabaseBackup 153 | @Databases = 'USER_DATABASES', 154 | @Directory = 'C:\Backup', 155 | @BackupType = 'FULL', 156 | @DirectoryStructure = NULL, 157 | @AvailabilityGroupDirectoryStructure = NULL -------------------------------------------------------------------------------- /sql_maintenance/use_cases/index_statistics_ola_hallengren.sql: -------------------------------------------------------------------------------- 1 | -- ####################### 2 | -- INDEX MAINTENANCE USING OLA HALLENGREN SOLUTION 3 | -- ####################### 4 | 5 | -- Complete Index and Statistics Maintenance 6 | EXECUTE dbo.IndexOptimize 7 | @Databases = 'USER_DATABASES', -- All user databases 8 | @FragmentationLow = NULL, -- No maintenance for low fragmentation 9 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 10 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 11 | @FragmentationLevel1 = 5, -- Medium fragmentation threshold 12 | @FragmentationLevel2 = 30, -- High fragmentation threshold 13 | @MinNumberOfPages = 1000, -- Skip small indexes 14 | @MaxNumberOfPages = 0, -- No upper limit 15 | @SortInTempdb = 'Y', -- Use tempdb for sort operations 16 | @MaxDOP = 0, -- Use server MaxDOP 17 | @FillFactor = 90, -- Fill factor for rebuilt indexes 18 | @PadIndex = 'Y', -- Apply fill factor to intermediate pages 19 | @LOBCompaction = 'Y', -- Compact LOB pages on reorganize 20 | @UpdateStatistics = 'ALL', -- Update statistics for indexes and columns 21 | @OnlyModifiedStatistics = 'Y', -- Only update modified statistics 22 | @StatisticsModificationLevel = 10, -- Percentage threshold for updating stats 23 | @StatisticsSample = NULL, -- Auto sample 24 | @StatisticsResample = 'N', -- Do not resample 25 | @PartitionLevel = 'Y', -- Maintain partitioned indexes per partition 26 | @MSShippedObjects = 'N', -- Skip system objects 27 | @Indexes = 'ALL_INDEXES', -- All indexes 28 | @TimeLimit = 3600, -- Maximum execution time in seconds 29 | @Delay = 0, -- No delay between index commands 30 | @WaitAtLowPriorityMaxDuration = 30, -- Max wait in minutes for online rebuild 31 | @WaitAtLowPriorityAbortAfterWait = 'SELF', -- Abort if low priority waits exceed max 32 | @Resumable = 'Y', -- Make online rebuilds resumable 33 | @AvailabilityGroups = 'ALL_AVAILABILITY_GROUPS', -- Check all AGs 34 | @LockTimeout = 0, -- No lock timeout 35 | @LockMessageSeverity = 16, -- Error severity on lock timeouts 36 | @DatabasesInParallel = 'Y', -- Run databases in parallel 37 | @ExecuteAsUser = 'dbo', -- Execute as dbo 38 | @LogToTable = 'Y', -- Log results to dbo.CommandLog 39 | @Execute = 'Y'; -- Execute commands 40 | 41 | 42 | -- A. Rebuild or reorganize all indexes with fragmentation on all user databases 43 | EXECUTE dbo.IndexOptimize 44 | @Databases = 'USER_DATABASES', 45 | @FragmentationLow = NULL, 46 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 47 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 48 | @FragmentationLevel1 = 5, 49 | @FragmentationLevel2 = 30 50 | 51 | -- B. Rebuild or reorganize all indexes with fragmentation and update modified statistics on all user databases 52 | EXECUTE dbo.IndexOptimize 53 | @Databases = 'USER_DATABASES', 54 | @FragmentationLow = NULL, 55 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 56 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 57 | @FragmentationLevel1 = 5, 58 | @FragmentationLevel2 = 30, 59 | @UpdateStatistics = 'ALL', 60 | @OnlyModifiedStatistics = 'Y' 61 | 62 | -- C. Update statistics on all user databases 63 | EXECUTE dbo.IndexOptimize 64 | @Databases = 'USER_DATABASES', 65 | @FragmentationLow = NULL, 66 | @FragmentationMedium = NULL, 67 | @FragmentationHigh = NULL, 68 | @UpdateStatistics = 'ALL' 69 | 70 | -- D. Update modified statistics on all user databases 71 | EXECUTE dbo.IndexOptimize 72 | @Databases = 'USER_DATABASES', 73 | @FragmentationLow = NULL, 74 | @FragmentationMedium = NULL, 75 | @FragmentationHigh = NULL, 76 | @UpdateStatistics = 'ALL', 77 | @OnlyModifiedStatistics = 'Y' 78 | 79 | -- E. Rebuild or reorganize all indexes with fragmentation on all user databases, performing sort operations in tempdb and using all available CPUs 80 | EXECUTE dbo.IndexOptimize @Databases = 'USER_DATABASES', 81 | @FragmentationLow = NULL, 82 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 83 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 84 | @FragmentationLevel1 = 5, 85 | @FragmentationLevel2 = 30, 86 | @SortInTempdb = 'Y', 87 | @MaxDOP = 0 88 | 89 | -- F. Rebuild or reorganize all indexes with fragmentation on all user databases, using the option to maintain partitioned indexes on the partition level 90 | EXECUTE dbo.IndexOptimize 91 | @Databases = 'USER_DATABASES', 92 | @FragmentationLow = NULL, 93 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 94 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 95 | @FragmentationLevel1 = 5, 96 | @FragmentationLevel2 = 30, 97 | @PartitionLevel = 'Y' 98 | 99 | -- G. Rebuild or reorganize all indexes with fragmentation on all user databases, with a time limit so that no commands are executed after 3600 seconds 100 | EXECUTE dbo.IndexOptimize 101 | @Databases = 'USER_DATABASES', 102 | @FragmentationLow = NULL, 103 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 104 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 105 | @FragmentationLevel1 = 5, 106 | @FragmentationLevel2 = 30, 107 | @TimeLimit = 3600 108 | 109 | -- H. Rebuild or reorganize all indexes with fragmentation on the table Production.Product in the database AdventureWorks 110 | EXECUTE dbo.IndexOptimize 111 | @Databases = 'AdventureWorks', 112 | @FragmentationLow = NULL, 113 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 114 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 115 | @FragmentationLevel1 = 5, 116 | @FragmentationLevel2 = 30, 117 | @Indexes = 'AdventureWorks.Production.Product' 118 | 119 | -- I. Rebuild or reorganize all indexes with fragmentation except indexes on the table Production.Product in the database AdventureWorks 120 | EXECUTE dbo.IndexOptimize 121 | @Databases = 'USER_DATABASES', 122 | @FragmentationLow = NULL, 123 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 124 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 125 | @FragmentationLevel1 = 5, 126 | @FragmentationLevel2 = 30, 127 | @Indexes = 'ALL_INDEXES, -AdventureWorks.Production.Product' 128 | 129 | -- J. Rebuild or reorganize all indexes with fragmentation on all user databases and log the results to a table 130 | EXECUTE dbo.IndexOptimize 131 | @Databases = 'USER_DATABASES', 132 | @FragmentationLow = NULL, 133 | @FragmentationMedium = 'INDEX_REORGANIZE,INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 134 | @FragmentationHigh = 'INDEX_REBUILD_ONLINE,INDEX_REBUILD_OFFLINE', 135 | @FragmentationLevel1 = 5, 136 | @FragmentationLevel2 = 30, 137 | @LogToTable = 'Y' -------------------------------------------------------------------------------- /sql_monitor/stack/docs/database_space_usage.md: -------------------------------------------------------------------------------- 1 | ### Total Space per Table 2 | ```sql 3 | SELECT 4 | ISNULL(t.NAME, 'Total') AS TableName, 5 | ISNULL(s.NAME, '') AS SchemaName, 6 | CAST(ROUND(((SUM(a.total_pages) * 8) / 1024.0), 2) AS DECIMAL(18, 2)) AS TotalSpaceMB 7 | FROM 8 | sys.tables t 9 | INNER JOIN 10 | sys.indexes i ON t.OBJECT_ID = i.object_id 11 | INNER JOIN 12 | sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 13 | INNER JOIN 14 | sys.allocation_units a ON p.partition_id = a.container_id 15 | INNER JOIN 16 | sys.schemas s ON t.schema_id = s.schema_id 17 | GROUP BY 18 | ROLLUP(t.Name, s.Name) 19 | ORDER BY 20 | CASE WHEN t.Name IS NULL THEN 1 ELSE 0 END, 21 | TotalSpaceMB DESC; 22 | ``` 23 | * **Purpose:** To measure the total space allocated to each table in the database, including indexes and data. 24 | * **Metric:** Total space per table in MB. 25 | * **Use case:** Identifying large tables that may require optimization or monitoring storage usage. 26 | * **Reference:** `sys.tables`, `sys.indexes`, `sys.partitions`, `sys.allocation_units`. 27 | 28 | ### Used Space per Table 29 | ```sql 30 | SELECT 31 | ISNULL(t.NAME, 'Total') AS TableName, 32 | ISNULL(s.NAME, '') AS SchemaName, 33 | CAST(ROUND(((SUM(a.used_pages) * 8) / 1024.0), 2) AS DECIMAL(18, 2)) AS UsedSpaceMB 34 | FROM 35 | sys.tables t 36 | INNER JOIN 37 | sys.indexes i ON t.OBJECT_ID = i.object_id 38 | INNER JOIN 39 | sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 40 | INNER JOIN 41 | sys.allocation_units a ON p.partition_id = a.container_id 42 | INNER JOIN 43 | sys.schemas s ON t.schema_id = s.schema_id 44 | GROUP BY 45 | ROLLUP(t.Name, s.Name) 46 | ORDER BY 47 | CASE WHEN t.Name IS NULL THEN 1 ELSE 0 END, 48 | UsedSpaceMB DESC; 49 | ``` 50 | * **Purpose:** To measure the actual used space of each table. 51 | * **Metric:** Used space per table in MB. 52 | * **Use case:** Tracking how much allocated storage is actually utilized. 53 | * **Reference:** `sys.tables`, `sys.indexes`, `sys.partitions`, `sys.allocation_units`. 54 | 55 | ### Unused Space per Table 56 | ```sql 57 | SELECT 58 | ISNULL(t.NAME, 'Total') AS TableName, 59 | ISNULL(s.NAME, '') AS SchemaName, 60 | CAST(ROUND(((SUM(a.total_pages - a.used_pages) * 8) / 1024.0 * 100), 2) AS DECIMAL(18, 2)) AS UnusedSpaceMB 61 | FROM 62 | sys.tables t 63 | INNER JOIN 64 | sys.indexes i ON t.OBJECT_ID = i.object_id 65 | INNER JOIN 66 | sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 67 | INNER JOIN 68 | sys.allocation_units a ON p.partition_id = a.container_id 69 | INNER JOIN 70 | sys.schemas s ON t.schema_id = s.schema_id 71 | GROUP BY 72 | ROLLUP(t.Name, s.Name) 73 | ORDER BY 74 | CASE WHEN t.Name IS NULL THEN 1 ELSE 0 END, 75 | UnusedSpaceMB DESC; 76 | ``` 77 | * **Purpose:** To determine how much allocated space is unused per table. 78 | * **Metric:** Unused space per table in MB (as percentage). 79 | * **Use case:** Identifying tables with inefficient space usage for maintenance. 80 | * **Reference:** `sys.tables`, `sys.indexes`, `sys.partitions`, `sys.allocation_units`. 81 | 82 | ### Pending Memory Grants 83 | ```sql 84 | SELECT 85 | COUNT(*) AS MemoryGrantsPending 86 | FROM sys.dm_exec_query_memory_grants 87 | WHERE grant_time IS NULL; 88 | ``` 89 | * **Purpose:** To track queries waiting for memory allocation. 90 | * **Metric:** Count of pending memory grants. 91 | * **Use case:** Monitoring memory pressure in SQL Server to detect performance bottlenecks. 92 | * **Reference:** `sys.dm_exec_query_memory_grants`. 93 | 94 | ### Transaction Log Usage 95 | ```sql 96 | dbcc sqlperf (logspace) 97 | ``` 98 | * **Purpose:** To report transaction log space usage per database. 99 | * **Metric:** Log space used and percentage. 100 | * **Use case:** Monitoring log growth and ensuring proper log backups. 101 | * **Reference:** `DBCC SQLPERF`. 102 | 103 | ### Database Files Info 104 | ```sql 105 | SELECT name AS FileName, 106 | size/128 AS FileSizeMB, 107 | physical_name AS PhysicalName 108 | FROM sys.master_files; 109 | ``` 110 | * **Purpose:** To list database files and their sizes. 111 | * **Metric:** File size in MB and physical file path. 112 | * **Use case:** Tracking file sizes for storage management. 113 | * **Reference:** `sys.master_files`. 114 | 115 | ### Active Locks 116 | ```sql 117 | SELECT 118 | request_session_id AS 'Session ID', 119 | resource_database_id AS 'Database ID', 120 | resource_associated_entity_id AS 'Entity ID', 121 | request_mode AS LockType, 122 | request_status AS Status 123 | FROM sys.dm_tran_locks; 124 | ``` 125 | * **Purpose:** To monitor currently active locks in the database. 126 | * **Metric:** Lock type, session, and status. 127 | * **Use case:** Detecting blocking, deadlocks, or resource contention. 128 | * **Reference:** `sys.dm_tran_locks`. 129 | 130 | ### Backup History 131 | ```sql 132 | SELECT database_name, 133 | backup_start_date, 134 | backup_finish_date, 135 | backup_size / 1024 / 1024 AS BackupSizeMB 136 | FROM msdb.dbo.backupset 137 | ORDER BY backup_start_date DESC; 138 | ``` 139 | * **Purpose:** To track database backup history and sizes. 140 | * **Metric:** Backup start/finish date and size in MB. 141 | * **Use case:** Verifying backup schedules and sizes. 142 | * **Reference:** `msdb.dbo.backupset`. 143 | 144 | ### Row Counts per Table 145 | ```sql 146 | SELECT 147 | ISNULL(t.NAME, 'Total') AS TableName, 148 | ISNULL(s.NAME, '') AS SchemaName, 149 | SUM(p.rows) AS RowCounts 150 | FROM 151 | sys.tables t 152 | INNER JOIN 153 | sys.indexes i ON t.OBJECT_ID = i.object_id 154 | INNER JOIN 155 | sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 156 | INNER JOIN 157 | sys.schemas s ON t.schema_id = s.schema_id 158 | GROUP BY 159 | ROLLUP(t.Name, s.Name) 160 | ORDER BY 161 | CASE WHEN t.Name IS NULL THEN 1 ELSE 0 END, 162 | RowCounts DESC; 163 | ``` 164 | * **Purpose:** To count the number of rows per table. 165 | * **Metric:** Row counts per table. 166 | * **Use case:** Monitoring table growth trends and data distribution. 167 | * **Reference:** `sys.tables`, `sys.partitions`. 168 | 169 | ### Memory Usage 170 | ```sql 171 | SELECT 172 | CAST(total_physical_memory_kb / 1024.0 / 1024.0 AS DECIMAL(10,2)) AS Total_Physical_Memory_GB, 173 | CAST(available_physical_memory_kb / 1024.0 / 1024.0 AS DECIMAL(10,2)) AS Available_Physical_Memory_GB, 174 | CAST(total_page_file_kb / 1024.0 / 1024.0 AS DECIMAL(10,2)) AS Total_Page_File_GB, 175 | CAST(available_page_file_kb / 1024.0 / 1024.0 AS DECIMAL(10,2)) AS Available_Page_File_GB 176 | FROM sys.dm_os_sys_memory; 177 | ``` 178 | * **Purpose:** To monitor system memory usage on the server. 179 | * **Metric:** Total and available physical memory and page file in KB or GB. 180 | * **Use case:** Detecting memory pressure affecting SQL Server performance. 181 | * **Reference:** `sys.dm_os_sys_memory`. 182 | 183 | ### Disk I/O Stats per Database File 184 | ```sql 185 | SELECT database_id, 186 | file_id, 187 | io_stall_read_ms AS DiskReadStall, 188 | io_stall_write_ms AS DiskWriteStall, 189 | num_of_reads, 190 | num_of_writes 191 | FROM sys.dm_io_virtual_file_stats (2, NULL); 192 | ``` 193 | * **Purpose:** To monitor disk I/O performance per database file. 194 | * **Metric:** Read/write stalls and counts per file. 195 | * **Use case:** Identifying I/O bottlenecks and slow storage. 196 | * **Reference:** `sys.dm_io_virtual_file_stats`. 197 | 198 | ### Database size per database 199 | ```sql 200 | SELECT 201 | db.name AS DatabaseName, 202 | mf.name AS FileName, 203 | mf.type_desc AS FileType, 204 | CAST(mf.size * 8.0 / 1024 / 1024 AS DECIMAL(10,2)) AS FileSize_GB, 205 | CAST((mf.size - FILEPROPERTY(mf.name, 'SpaceUsed')) * 8.0 / 1024 / 1024 AS DECIMAL(10,2)) AS FreeSpace_GB, 206 | vs.total_bytes / 1024.0 / 1024 / 1024 AS DriveSize_GB, 207 | vs.available_bytes / 1024.0 / 1024 / 1024 AS DriveFreeSpace_GB 208 | FROM sys.master_files mf 209 | INNER JOIN sys.databases db ON db.database_id = mf.database_id 210 | CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) vs 211 | ORDER BY db.name, mf.type_desc; 212 | 213 | -- Sum of values 214 | SELECT 215 | SUM(CAST(mf.size * 8.0 / 1024 / 1024 AS DECIMAL(10,2))) AS FileSize_GB, 216 | SUM(CAST((mf.size - FILEPROPERTY(mf.name, 'SpaceUsed')) * 8.0 / 1024 / 1024 AS DECIMAL(10,2))) AS FreeSpace_GB, 217 | vs.total_bytes / 1024.0 / 1024 / 1024 AS DriveSize_GB, 218 | vs.available_bytes / 1024.0 / 1024 / 1024 AS DriveFreeSpace_GB 219 | FROM sys.master_files mf 220 | INNER JOIN sys.databases db ON db.database_id = mf.database_id 221 | CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) vs 222 | GROUP BY vs.total_bytes, vs.available_bytes 223 | ``` 224 | * **Purpose:** Provide a snapshot of database file sizes and free space on disk for all databases. Helps monitor storage utilization and prevent space-related issues. 225 | * **Metric:** 226 | * DatabaseName → Name of the database. 227 | * FileName → Logical file name. 228 | * FileType → Type of file: ROWS (data) or LOG (transaction log). 229 | * FileSize_GB → Total size of the file in GB. 230 | * FreeSpace_GB → Available free space inside the file in GB. 231 | * DriveSize_GB → Total size of the drive hosting the file. 232 | * DriveFreeSpace_GB → Free space available on the drive. 233 | * **Use case:** 234 | * Detect databases approaching their file size limit. 235 | * Monitor log and data file growth. 236 | * Identify low disk space on drives hosting databases to prevent failures. 237 | * Useful for capacity planning and maintenance. 238 | * **Reference:** 239 | * sys.master_files → SQL Server system view for database files. 240 | * FILEPROPERTY() → Returns file space usage info. 241 | * sys.dm_os_volume_stats() → Provides disk-level statistics for database files. --------------------------------------------------------------------------------