├── .gitattributes ├── .gitignore ├── AG - Am I Primary.sql ├── AG - Health_XEvents pre-2016.sql ├── AG - Health_XEvents.sql ├── AG - Monitor.sql ├── AG - PerfMon counters.sql ├── AG - alerts.sql ├── AG - check owner.sql ├── AG - extendedEvents_Create.sql ├── AG - failover (in development).ps1 ├── AG - monitor automatic seeding.sql ├── AG - post-zerto failover.ps1 ├── AG - readonly routing urls.sql ├── AG - worker threads diagnostic.sql ├── AutomatedFileGrowthV2.sql ├── README.md ├── SSHC - Agent-Security-Files-Backups-DBMail.ipynb ├── SSHC - Database.ipynb ├── SSHC-HighAvailability.ipynb ├── SSHC-Windows-SQLConfig-BasePerf.ipynb ├── SSRS rebuild RSExecRole permissions.sql ├── Stable State Health Checks.ipynb ├── add database descriptions.sql ├── add error event alerts.sql ├── agent job history.sql ├── audit setup.sql ├── autogrow change all 1mb growth files.sql ├── autogrow events.sql ├── automated index rebuild old.sql ├── automated index rebuild.sql ├── avg disk secwrite.png ├── azure 15min dtu usage.sql ├── azure 15sec dtu usage.sql ├── azure sql db scale.ps1 ├── backup database master keys.sql ├── backup history oldest latest.sql ├── backup history.sql ├── backup progress.sql ├── backup readonly copy restore.sql ├── backup restore progress.sql ├── backup service master key.sql ├── backup to azure blob tsql script for legacy SQL.sql ├── bad clustered indexes.sql ├── basic diff backup to URL.sql ├── basic full backup to URL system dbs.sql ├── basic full backup to URL.sql ├── basic log backup to URL.sql ├── blocked processes report xevents.sql ├── capture login info xevent.sql ├── change tracking autocleanup diagnostic.sql ├── checksum vs hashbytes.sql ├── collect connections.sql ├── compress indexes.sql ├── configuration changes history.sql ├── cpu utilization.sql ├── database mail diag.sql ├── database mail profiles.sql ├── database ownership.sql ├── database settings info.sql ├── dbatools.Copy-DbaLogin.ps1 ├── deadlocks in xevents.sql ├── defrag columnstore.sql ├── defrag.sql ├── dependencies.sql ├── deprecated usage counter.sql ├── dimdate.sql ├── dm_exec_query_memory_grants.sql ├── dm_exec_session_wait_stats.sql ├── dm_os_performance_counters.sql ├── dm_os_wait_stats.sql ├── dm_os_waiting_tasks.sql ├── easy passwords.sql ├── endpoint owners.sql ├── error log.sql ├── failover cluster force quorum.ps1 ├── filtered index opportunities.sql ├── find duplicate indexes.sql ├── find memory mini dumps.sql ├── firstByte (working).sql ├── fix orphaned sid.sql ├── fk drop recreate.sql ├── fk untrusted or disabled check.sql ├── fulltext index demo.sql ├── fulltext index status.sql ├── gather log events - remoting.ps1 ├── gather log events.ps1 ├── generate error tables.sql ├── get disk block allocation size.ps1 ├── guest permissions.sql ├── hypothetical cleanup.sql ├── index ALLOW_PAGE_LOCKS.sql ├── index_usage_stats.sql ├── indirect checkpoints enable.sql ├── install failover clustering features.ps1 ├── instant_file_initialization.sql ├── job - Backup Failure Notifications.sql ├── job - File Growth Management.sql ├── job - Space in Files Monitoring.sql ├── job - Volume Stats Monitoring for Production.sql ├── job - Volume Stats Monitoring.sql ├── job - automated_index_maint for Azure SQL DB.sql ├── job - automated_index_maint for DBALogging.sql ├── job - job failure notifications.sql ├── job - memory stats Monitoring.sql ├── job - setup database audit.sql ├── job - startup failures.sql ├── job failure notifications.sql ├── job owners.sql ├── job status.sql ├── kill detect blocking sessions setup.sql ├── kill detect blocking spids job.sql ├── kill detect blocking spids.sql ├── lab - FORMATMESSAGE in SQL2016.sql ├── lab - TSQL 101.sql ├── lab - TemporalTable demo.sql ├── lab - VLF generation example.sql ├── lab - azure blob storage retention plan.ps1 ├── lab - backup to URL.sql ├── lab - basic error table.sql ├── lab - cdc 101.sql ├── lab - clustered key design.sql ├── lab - concat_ws.sql ├── lab - cte 101.sql ├── lab - cte with dimdate.sql ├── lab - datetime conversion from datetime to datetimeoffsets.sql ├── lab - deadlock part 1.sql ├── lab - deadlock part 2.sql ├── lab - default constraints.sql ├── lab - docker win AG.ps1 ├── lab - docker.ps1 ├── lab - dynamic data masking.sql ├── lab - execute as.sql ├── lab - filetable demo.sql ├── lab - fk untrusted or disabled check.ipynb ├── lab - fk untrusted or disabled check.sql ├── lab - float as pk problems.sql ├── lab - fragmented table create missing index.sql ├── lab - fragmented table int.sql ├── lab - fragmented table newsequentialid.sql ├── lab - fragmented table.sql ├── lab - implicit conversion.sql ├── lab - joins.sql ├── lab - materialized view with columnstore.sql ├── lab - memory optimized table.sql ├── lab - missing index setup demo.sql ├── lab - nested sproc tran rollback commit.sql ├── lab - nonsequentialguid.sql ├── lab - optimize_for_sequential_key testing.sql ├── lab - partitioning 101.sql ├── lab - power of columnstore.sql ├── lab - pre2016 correct time zone conversion from UTC.sql ├── lab - replaceFloatWithDecimal.sql ├── lab - restore wideworldimporters.sql ├── lab - resumeable index maintenance.sql ├── lab - security p1.sql ├── lab - security p2.sql ├── lab - sequence permissions.sql ├── lab - sql agent execution lab.sql ├── lab - string truncation error message improvement in SQL 2019.sql ├── lab - tde encryption workshop 2014.sql ├── lab - testing mem opt.sql ├── lab - time zone.sql ├── lab - translate syntax.sql ├── last known DBCC CHECKDB.sql ├── limit number of error log files.sql ├── lock Pages in Memory LPIM.sql ├── log_reuse_wait.sql ├── memory clerks.sql ├── memory-optimized table memory usage.sql ├── missing indexes.sql ├── modules vs routines.sql ├── move system databases.sql ├── multiserver backup history.sql ├── multiserver space in files.sql ├── my_permissions.sql ├── oledb providers.sql ├── open transactions.sql ├── optimize for ad hoc workloads.sql ├── overdue jobs.sql ├── page life expectancy Azure SQL DB.sql ├── page life expectancy.sql ├── permissions for readonly admin accounts.sql ├── public permissions.sql ├── query store troubleshooting.sql ├── record_dm_io_virtual_file_stats.sql ├── refresh view metadata.sql ├── rename SQL instance.sql ├── reports catalog metadata.sql ├── restore.sql ├── sas credential.sql ├── schema dependencies.sql ├── script ssisdb env.sql ├── security Script databases.sql ├── security Script servers.sql ├── security check msdb role members.sql ├── security check sysadmin members.sql ├── security group members.sql ├── services dmv.sql ├── sessions and requests - Azure SQL DB.sql ├── sessions and requests - SQL2000.sql ├── sessions and requests - dump to table.sql ├── sessions and requests - old temp table.sql ├── sessions and requests blocking chain.jpg ├── sessions and requests.sql ├── sharepoint databases.ps1 ├── sharepoint db recovery models.sql ├── size database files.sql ├── size in memory.sql ├── size.sql ├── sp_GetAGInformation.sql ├── sp_repldone.sql ├── space in files.sql ├── sproc sessions and requests.sql ├── ssis events.sql ├── ssisdb execution query.sql ├── ssisdb history cleanup.sql ├── ssisdb package history.sql ├── ssrs Subscription catalog inventory.sql ├── ssrs Subscription failure emails.sql ├── ssrs Subscription start jobs.sql ├── ssrs subscription failure logs.sql ├── ssrs subscriptions status.sql ├── stats out of date whileloop.sql ├── stats out of date.sql ├── sys.dm_io_virtual_file_stats.sql ├── sys_dm_os_ring_buffers.sql ├── tables without clustered indexes.sql ├── tde setup.sql ├── tde status.sql ├── tempdb data files.sql ├── tempdb space used diagnostic.sql ├── test user permissions.sql ├── toolbox-stablestate.zip ├── toolbox.zip ├── uncommitted transactions.sql ├── vlfs analysis.sql ├── volume stats.sql ├── worst query plans.sql ├── xevent ringbuffer.sql └── zip toolbox.ps1 /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sql linguist-detectable=true 2 | *.sql linguist-language=sql 3 | *.ps1 linguist-detectable=true 4 | *.ps1 linguist-language=PowerShell -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | zip toolbox.ps1 3 | -------------------------------------------------------------------------------- /AG - Am I Primary.sql: -------------------------------------------------------------------------------- 1 | 2 | --add as step 1 on every AAG-aware job 3 | IF NOT EXISTS ( 4 | SELECT @@SERVERNAME, * 5 | FROM sys.dm_hadr_availability_replica_states rs 6 | inner join sys.availability_databases_cluster dc 7 | on rs.group_id = dc.group_id 8 | WHERE is_local = 1 9 | and role_desc = 'PRIMARY' 10 | and dc.database_name = N'whateverdbname' 11 | ) 12 | BEGIN 13 | 14 | print 'local SQL instance is not primary, skipping'; 15 | throw 50000, 'Do not continue', 1; 16 | 17 | END 18 | -------------------------------------------------------------------------------- /AG - check owner.sql: -------------------------------------------------------------------------------- 1 | --should not be owned by named account! 2 | --Run this on EACH replica SQL instance, it may be different. 3 | 4 | SELECT ar.replica_server_name 5 | ,ag.name AS ag_name 6 | ,ar.owner_sid 7 | ,sp.name 8 | FROM sys.availability_replicas ar 9 | LEFT JOIN sys.server_principals sp 10 | ON sp.sid = ar.owner_sid 11 | INNER JOIN sys.availability_groups ag 12 | ON ag.group_id = ar.group_id 13 | WHERE ar.replica_server_name = SERVERPROPERTY('ServerName') ; 14 | 15 | /* 16 | 17 | ALTER AUTHORIZATION ON AVAILABILITY GROUP::[drAG1] to [sa] ; 18 | 19 | */ -------------------------------------------------------------------------------- /AG - extendedEvents_Create.sql: -------------------------------------------------------------------------------- 1 | CREATE EVENT SESSION [AlwaysOn_health] ON SERVER 2 | ADD EVENT sqlserver.alwayson_ddl_executed, 3 | ADD EVENT sqlserver.availability_group_lease_expired, 4 | ADD EVENT sqlserver.availability_replica_automatic_failover_validation, 5 | ADD EVENT sqlserver.availability_replica_manager_state_change, 6 | ADD EVENT sqlserver.availability_replica_state, 7 | ADD EVENT sqlserver.availability_replica_state_change, 8 | ADD EVENT sqlserver.error_reported( 9 | WHERE ([error_number]=(9691) 10 | OR [error_number]=(35204) 11 | OR [error_number]=(9693) 12 | OR [error_number]=(26024) 13 | OR [error_number]=(28047) 14 | OR [error_number]=(26023) 15 | OR [error_number]=(9692) 16 | OR [error_number]=(28034) 17 | OR [error_number]=(28036) 18 | OR [error_number]=(28048) 19 | OR [error_number]=(28080) 20 | OR [error_number]=(28091) 21 | OR [error_number]=(26022) 22 | OR [error_number]=(9642) 23 | OR [error_number]=(35201) 24 | OR [error_number]=(35202) 25 | OR [error_number]=(35206) 26 | OR [error_number]=(35207) 27 | OR [error_number]=(26069) 28 | OR [error_number]=(26070) 29 | OR [error_number]>(41047) 30 | AND [error_number]<(41056) 31 | OR [error_number]=(41142) 32 | OR [error_number]=(41144) 33 | OR [error_number]=(1480) 34 | OR [error_number]=(823) 35 | OR [error_number]=(824) 36 | OR [error_number]=(829) 37 | OR [error_number]=(35264) 38 | OR [error_number]=(35265))), 39 | ADD EVENT sqlserver.hadr_db_partner_set_sync_state, 40 | ADD EVENT sqlserver.lock_redo_blocked 41 | ADD TARGET package0.event_file(SET filename=N'AlwaysOn_health.xel',max_file_size=(5),max_rollover_files=(4)) 42 | WITH (MAX_MEMORY=4096 KB,EVENT_RETENTION_MODE=ALLOW_SINGLE_EVENT_LOSS,MAX_DISPATCH_LATENCY=30 SECONDS,MAX_EVENT_SIZE=0 KB,MEMORY_PARTITION_MODE=NONE,TRACK_CAUSALITY=OFF,STARTUP_STATE=OFF) 43 | GO 44 | 45 | 46 | -------------------------------------------------------------------------------- /AG - monitor automatic seeding.sql: -------------------------------------------------------------------------------- 1 | -- Monitor automatic seeding 2 | USE master; 3 | GO 4 | SELECT s.local_database_name, s.role_desc, s.internal_state_desc, s.transfer_rate_bytes_per_second, s.transferred_size_bytes, s.database_size_bytes, s.start_time_utc, s.end_time_utc, s.estimate_time_complete_utc, s.total_disk_io_wait_time_ms, s.total_network_wait_time_ms, s.failure_message, s.failure_time_utc, s.is_compression_enabled 5 | FROM sys.dm_hadr_physical_seeding_stats s 6 | ORDER BY start_time_utc desc 7 | 8 | -- Automatic seeding History 9 | USE master; 10 | GO 11 | 12 | SELECT TOP 10 ag.name, dc.database_name, s.start_time, s.completion_time, s.current_state, s.performed_seeding, s.failure_state_desc, s.error_code, s.number_of_attempts 13 | FROM sys.dm_hadr_automatic_seeding s 14 | INNER JOIN sys.availability_databases_cluster dc ON s.ag_db_id = dc.group_database_id 15 | INNER JOIN sys.availability_groups ag ON s.ag_id = ag.group_id 16 | ORDER BY start_time desc; -------------------------------------------------------------------------------- /AG - worker threads diagnostic.sql: -------------------------------------------------------------------------------- 1 | 2 | /* 3 | References: 4 | https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-worker-threads-server-configuration-option 5 | 6 | */ 7 | EXEC sp_configure 'show advanced options', 1; 8 | GO 9 | RECONFIGURE ; 10 | GO 11 | EXEC sp_configure 'max worker threads' --shows current setting, see below for script to change 12 | --Configuration of 0 is default, means it is automatically calculated by SQL according to formula in this link: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-max-worker-threads-server-configuration-option 13 | --Manually overriding the setting may be needed for replicas of Availability Groups with many databases, but obviously has an upper limit in terms of system stability. 14 | GO 15 | select 16 | max_workers_count = (select max_workers_count FROM sys.dm_os_sys_info) --current running config setting 17 | , active_workers_sum = sum(active_workers_count) --active_workers_sum should maintain < max_workers_count 18 | , work_queue_count_avg = avg(work_queue_count*1.) --Should not be above 1. If it is, probably need to override the worker threads formula (see above) and/or increase server processors. 19 | , current_workers_sum = sum(current_workers_count) --total, informative only 20 | FROM sys.dm_os_schedulers 21 | WHERE status = 'VISIBLE ONLINE' 22 | GO 23 | 24 | 25 | 26 | /* 27 | --sample below, to run on all replicas 28 | 29 | EXEC sp_configure 'show advanced options', 1; 30 | GO 31 | RECONFIGURE ; 32 | GO 33 | EXEC sp_configure 'max worker threads', 960; 34 | GO 35 | RECONFIGURE ; 36 | GO 37 | 38 | */ -------------------------------------------------------------------------------- /AutomatedFileGrowthV2.sql: -------------------------------------------------------------------------------- 1 | DECLARE @TempTable TABLE 2 | (ID INT Identity(1,1) not null, 3 | DatabaseName varchar(128) 4 | ,recovery_model_desc varchar(50) 5 | ,DatabaseFileName varchar(500) 6 | ,FileLocation varchar(500) 7 | ,FileId int 8 | ,FileSizeMB decimal(19,2) 9 | ,SpaceUsedMB decimal(19,2) 10 | ,AvailableMB decimal(19,2) 11 | ,FreePercent decimal(19,2) 12 | ,growTSQL nvarchar(4000) 13 | ) 14 | 15 | DECLARE @Threshold decimal(19,2) 16 | DECLARE @GrowFileTxt nvarchar(4000) 17 | Set @Threshold = 10.0 18 | 19 | INSERT INTO @TempTable 20 | exec sp_MSforeachdb 'use [?]; 21 | select *, 22 | growTSQL = ''ALTER DATABASE [''+DatabaseName_____________ COLLATE SQL_Latin1_General_CP1_CI_AS+''] 23 | MODIFY FILE ( NAME = N''''''+DatabaseFileName_______ COLLATE SQL_Latin1_General_CP1_CI_AS +'''''' 24 | , '' + CASE WHEN FileSizeMB < 100 THEN ''SIZE = ''+STR(FileSizeMB+64) 25 | WHEN FileSizeMB < 1000 THEN ''SIZE = ''+STR(FileSizeMB+256) 26 | WHEN FileSizeMB < 10000 THEN ''SIZE = ''+STR(FileSizeMB+1024) 27 | WHEN FileSizeMB < 40000 THEN ''SIZE = ''+STR(FileSizeMB+4092) 28 | ELSE ''SIZE = ''+STR(FileSizeMB+(FileSizeMB*.05)) END +''MB )'' 29 | FROM ( 30 | SELECT 31 | ''DatabaseName_____________'' = d.name 32 | , Recovery = d.recovery_model_desc 33 | , ''DatabaseFileName_______'' = df.name 34 | , Location = df.physical_name 35 | , File_ID = df.File_ID 36 | , FileSizeMB = CAST(size/128.0 as Decimal(9,2)) 37 | , SpaceUsedMB = CAST(CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 38 | , AvailableMB = CAST(size/128.0 - CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 39 | , FreePercent = CAST((((size/128.0) - (CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0)) / (size/128.0) ) * 100. as Decimal(9,2)) 40 | FROM sys.database_files df 41 | CROSS APPLY sys.databases d 42 | WHERE d.database_id = DB_ID() 43 | AND d.is_read_only = 0 44 | AND df.size > 0 45 | AND ( d.Replica_id is null or Exists ( 46 | SELECT @@SERVERNAME, * 47 | FROM sys.dm_hadr_availability_replica_states rs 48 | inner join sys.availability_databases_cluster dc 49 | on rs.group_id = dc.group_id 50 | WHERE is_local = 1 51 | and role_desc = ''PRIMARY'' 52 | and dc.database_name = d.name)) 53 | ) x; 54 | ' 55 | 56 | Delete from @TempTable where FreePercent > @Threshold 57 | Or FreePercent is NULL 58 | 59 | DECLARE @FileCounter INT = 0 60 | DECLARE @FileMax INT 61 | Set @FileMax = (Select Max(ID) from @TempTable) 62 | 63 | Print @FileCounter 64 | PRINT @FileMax 65 | 66 | while @FileCounter < @FileMax 67 | begin 68 | Set @GrowFileTxt = (Select growTSQL from @TempTable where ID = @FileCounter) 69 | Exec (@GrowFileTxt) 70 | set @FileCounter = @FileCounter +1 71 | end 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sql-server-toolbox 2 | SQL Server Toolbox 3 | 4 | ## Getting Started 5 | 6 | A loose assembly of files use for day-to-day administration, performance tuning, troubleshooting and investigation of Microsoft SQL Server databases. 7 | 8 | ### Prerequisites 9 | 10 | Many scripts have alternative versions for backwards compatibility with SQL Servers prior to SQL Server 2012, most scripts developed for SQL Server 2012 or higher, many with comments indicating features added in specific versions of SQL Server. 11 | 12 | Execution intended with SSMS/VSCode without SQLCMD mode enabled for most scripts, unless otherwise noted. 13 | 14 | PowerShell should be supported for PowerShell 3+ unless otherwise noted. 15 | 16 | ## Contributing 17 | 18 | Suggestions/corrections welcome. Collaboration limited to Sparkhound SQL DBA team. 19 | 20 | ## Contributors 21 | 22 | * **William Assaf** - [williamadba](https://github.com/williamadba) 23 | * **Colby Loupe** - [cloupe](https://github.com/cloupe) 24 | * **David Nguyen** - [davidnguyen2889](https://github.com/davidnguyen2889) 25 | * **Bill Carter** - [wfc1973 ](https://github.com/wfc1973) 26 | 27 | ## Acknowledgments 28 | 29 | * Code snippet source documented with URL where appropriate, if lacking, please notify us. 30 | 31 | ## Easy Download 32 | 33 | * A regularly updated .zip file of the toolbox is available here: https://github.com/williamadba/sql-server-toolbox/blob/master/toolbox.zip for quick download of the entire folder. It is not guaranteed to contain all recent changes, but it usually does. -------------------------------------------------------------------------------- /SSRS rebuild RSExecRole permissions.sql: -------------------------------------------------------------------------------- 1 | USE [master] 2 | GO 3 | CREATE ROLE [RSExecRole] AUTHORIZATION dbo 4 | GO 5 | --https://docs.microsoft.com/en-us/sql/reporting-services/security/create-the-rsexecrole 6 | GRANT EXECUTE ON master.dbo.xp_sqlagent_notify TO RSExecRole 7 | GRANT EXECUTE ON master.dbo.xp_sqlagent_is_starting TO RSExecRole 8 | GRANT EXECUTE ON master.dbo.xp_sqlagent_enum_jobs TO RSExecRole 9 | GO 10 | 11 | use msdb 12 | go 13 | CREATE ROLE [RSExecRole] AUTHORIZATION dbo 14 | GO 15 | GRANT EXECUTE ON msdb.dbo.sp_add_category TO RSExecRole 16 | GRANT EXECUTE ON msdb.dbo.sp_add_job TO RSExecRole 17 | GRANT EXECUTE ON msdb.dbo.sp_add_jobschedule TO RSExecRole 18 | GRANT EXECUTE ON msdb.dbo.sp_add_jobserver TO RSExecRole 19 | GRANT EXECUTE ON msdb.dbo.sp_add_jobstep TO RSExecRole 20 | GRANT EXECUTE ON msdb.dbo.sp_delete_job TO RSExecRole 21 | GRANT EXECUTE ON msdb.dbo.sp_help_category TO RSExecRole 22 | GRANT EXECUTE ON msdb.dbo.sp_help_job TO RSExecRole 23 | GRANT EXECUTE ON msdb.dbo.sp_help_jobschedule TO RSExecRole 24 | GRANT EXECUTE ON msdb.dbo.sp_verify_job_identifiers TO RSExecRole 25 | 26 | GRANT SELECT ON msdb.dbo.syscategories TO RSExecRole 27 | GRANT SELECT ON msdb.dbo.sysjobs TO RSExecRole 28 | GO -------------------------------------------------------------------------------- /add database descriptions.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | exec sp_MSforeachdb ' 4 | if ((select db_id(''?'')) > 4 or ''?'' = ''model'' ) 5 | BEGIN 6 | EXEC [?].sys.sp_addextendedproperty @name=N''Description'', @value=N''Change this value''; 7 | EXEC [?].sys.sp_addextendedproperty @name=N''BusinessOwner'', @value=N''Change this value''; 8 | END' 9 | 10 | select * from sys.databases 11 | exec sp_MSforeachdb 'select ''[?]'', class_desc, name, value from [?].sys.extended_properties ep where class_desc = ''database''' 12 | 13 | -------------------------------------------------------------------------------- /agent job history.sql: -------------------------------------------------------------------------------- 1 | --Determines the retention of SQL Agent job history, stored in msdb 2 | 3 | --Look for jobhistory_Max_Rows and jobhistory_max_rows_per_job. 4 | --By default, 1000 and 100. 5 | exec msdb.dbo.sp_get_sqlagent_properties 6 | GO 7 | 8 | --If Autostart = 0, check if SQL Server Agent service is set to Automatic startup. 9 | select servicename, startup_type_desc from sys.dm_server_services 10 | 11 | 12 | /* 13 | --Sample script to increase job history retention by an order of magnitude each. 14 | --For a job that executes every 15 minutes, how much history do you want for that job? 15 | -- 672 records per week, 2880 records per month! Consider increasing to 50000/10000 16 | EXEC msdb.dbo.sp_set_sqlagent_properties @jobhistory_max_rows=10000, 17 | @jobhistory_max_rows_per_job=1000 18 | */ -------------------------------------------------------------------------------- /autogrow change all 1mb growth files.sql: -------------------------------------------------------------------------------- 1 | --Run in Results to Text mode 2 | USE [master] 3 | GO 4 | select 5 | Alter_Autogrowth_Rates = case when mf.type_desc = 'ROWS' 6 | then 'ALTER DATABASE ['+d.name+'] MODIFY FILE ( NAME = N'''+ mf.name+ ''', FILEGROWTH = 256MB ); 7 | GO' 8 | else 'ALTER DATABASE ['+d.name+'] MODIFY FILE ( NAME = N'''+ mf.name+ ''', FILEGROWTH = 256MB ); 9 | GO' 10 | end 11 | , mf.* 12 | FROM sys.databases d 13 | inner join sys.master_files mf 14 | on d.database_id = mf.database_id 15 | where (d.state_desc = 'ONLINE') 16 | and (d.is_read_only = 0) 17 | and ((mf.is_percent_growth = 0 and growth = 128) or (mf.is_percent_growth = 1)) 18 | /* 19 | ALTER DATABASE [model] MODIFY FILE ( NAME = N'modeldev', FILEGROWTH = 512000KB ) 20 | */ 21 | -------------------------------------------------------------------------------- /autogrow events.sql: -------------------------------------------------------------------------------- 1 | 2 | --The trace automatically finds _n files, strip off the _nnn. For example, will read all data from log_14.trc, log_15.trc, log_16.trc, log_17.trc, etc. 3 | --Default trace files are limited to 20mb, and there are at most five of them, so we have 100mb of history. Depends on activity to determine how far back that goes. 4 | 5 | 6 | SELECT 7 | DBName = g.DatabaseName 8 | , DBFileName = mf.physical_name 9 | , FileType = CASE mf.type WHEN 0 THEN 'Row' WHEN 1 THEN 'Log' WHEN 2 THEN 'FILESTREAM' WHEN 4 THEN 'Full-text' END 10 | , EventName = te.name 11 | , EventGrowthMB = convert(decimal(19,2),g.IntegerData*8/1024.) -- Number of 8-kilobyte (KB) pages by which the file increased. 12 | , EventTime = g.StartTime 13 | , EventDurationSec = convert(decimal(19,2),g.Duration/1000./1000.) -- Length of time (in milliseconds) necessary to extend the file. 14 | , CurrentAutoGrowthSet= CASE 15 | WHEN mf.is_percent_growth = 1 16 | THEN CONVERT(char(2), mf.growth) + '%' 17 | ELSE CONVERT(varchar(30), convert(decimal(19,2), mf.growth*8./1024.)) + 'MB' 18 | END 19 | , CurrentFileSizeMB = convert(decimal(19,2),mf.size* 8./1024.) 20 | , d.Recovery_model_Desc 21 | --, @tracepath 22 | --, MaxFileSizeMB = CASE WHEN mf.max_size = -1 THEN 'Unlimited' ELSE convert(varchar(30), convert(decimal(19,2),mf.max_size*8./1024.)) END 23 | --select count(1) 24 | FROM fn_trace_gettable((select substring((select path from sys.traces where is_default =1), 0, charindex('\log_', (select path from sys.traces where is_default =1),0)+4) + '.trc'), default) g 25 | cross apply sys.trace_events te 26 | inner join sys.master_files mf 27 | on mf.database_id = g.DatabaseID 28 | and g.FileName = mf.name 29 | inner join sys.databases d 30 | on d.database_id = g.DatabaseID 31 | WHERE g.eventclass = te.trace_event_id 32 | and te.name in ('Data File Auto Grow','Log File Auto Grow') 33 | and g.StartTime > dateadd(d, -7, sysdatetime()) 34 | --GROUP BY StartTime,Databaseid, Filename, IntegerData, Duration 35 | order by StartTime desc; 36 | 37 | SELECT servicename, status_desc, last_startup_time FROM sys.dm_server_services; 38 | GO 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /avg disk secwrite.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/avg disk secwrite.png -------------------------------------------------------------------------------- /azure 15min dtu usage.sql: -------------------------------------------------------------------------------- 1 | --https://msdn.microsoft.com/en-us/library/dn800981.aspx 2 | --Run in the master Azure SQL server database 3 | SELECT SYSDATETIMEOFFSET() 4 | 5 | SELECT 6 | rs.Database_Name 7 | , rs.sku --Basic, Standard, Premium 8 | , TierDTU = rs.dtu_limit 9 | , Storage_in_mb = MAX(rs.Storage_in_megabytes) 10 | , 'Average CPU Utilization In %' = AVG(rs.avg_cpu_percent) 11 | , 'Maximum CPU Utilization In %' = MAX(rs.avg_cpu_percent) 12 | , 'Average Data IO In %' = AVG(rs.avg_data_io_percent) 13 | , 'Maximum Data IO In %' = MAX(rs.avg_data_io_percent) 14 | , 'Average Log Write Utilization In %' = AVG(rs.avg_log_write_percent) 15 | , 'Maximum Log Write Utilization In %' = MAX(rs.avg_log_write_percent) 16 | , 'Average Requests In %' = AVG(rs.max_worker_percent) 17 | , 'Maximum Requests In %' = MAX(rs.max_worker_percent) 18 | , 'Average Sessions In %' = AVG(rs.max_session_percent) 19 | , 'Maximum Sessions In %' = MAX(rs.max_session_percent) 20 | FROM master.sys.resource_stats as rs --past 14 days 21 | GROUP BY rs.Database_Name, rs.sku, rs.dtu_limit 22 | ORDER BY rs.Database_Name desc 23 | 24 | SELECT 25 | Timestamp = datetimefromparts(year(rs.end_time), month(rs.end_time), day(rs.end_time), datepart(hh,rs.end_time), datepart(minute, rs.end_time),0,0) 26 | , rs.Database_Name 27 | , rs.sku --Basic, Standard, Premium 28 | , TierDTU = rs.dtu_limit 29 | , Storage_in_mb = MAX(rs.Storage_in_megabytes) 30 | , 'Average CPU Utilization In %' = AVG(rs.avg_cpu_percent) 31 | , 'Maximum CPU Utilization In %' = MAX(rs.avg_cpu_percent) 32 | , 'Average Data IO In %' = AVG(rs.avg_data_io_percent) 33 | , 'Maximum Data IO In %' = MAX(rs.avg_data_io_percent) 34 | , 'Average Log Write Utilization In %' = AVG(rs.avg_log_write_percent) 35 | , 'Maximum Log Write Utilization In %' = MAX(rs.avg_log_write_percent) 36 | , 'Average Worker Requests In %' = AVG(rs.max_worker_percent) 37 | , 'Maximum Worker Requests In %' = MAX(rs.max_worker_percent) 38 | , 'Average Sessions In %' = AVG(rs.max_session_percent) 39 | , 'Maximum Sessions In %' = MAX(rs.max_session_percent) 40 | FROM master.sys.resource_stats as rs --past 14 days only 41 | GROUP BY rs.Database_Name, rs.sku, rs.dtu_limit, datetimefromparts(year(rs.end_time), month(rs.end_time), day(rs.end_time), datepart(hh,rs.end_time), datepart(minute, rs.end_time),0,0) 42 | ORDER BY rs.Database_Name desc, TimeStamp DESC 43 | 44 | -------------------------------------------------------------------------------- /azure 15sec dtu usage.sql: -------------------------------------------------------------------------------- 1 | --https://msdn.microsoft.com/en-us/library/dn800981.aspx 2 | --Run in the user Azure database 3 | SELECT SYSDATETIMEOFFSET() 4 | GO 5 | 6 | SELECT 7 | Database_Name = DB_NAME() 8 | , TierDTU = rs.dtu_limit 9 | , 'Average CPU Utilization In %' = AVG(rs.avg_cpu_percent) 10 | , 'Maximum CPU Utilization In %' = MAX(rs.avg_cpu_percent) 11 | , 'Average Data IO In %' = AVG(rs.avg_data_io_percent) 12 | , 'Maximum Data IO In %' = MAX(rs.avg_data_io_percent) 13 | , 'Average Log Write Utilization In %' = AVG(rs.avg_log_write_percent) 14 | , 'Maximum Log Write Utilization In %' = MAX(rs.avg_log_write_percent) 15 | , 'Average Memory Usage In %' = AVG(rs.avg_memory_usage_percent) 16 | , 'Maximum Memory Usage In %' = MAX(rs.avg_memory_usage_percent) 17 | FROM sys.dm_db_resource_stats as rs --past hour only 18 | group by rs.dtu_limit 19 | 20 | GO 21 | select 22 | Database_Name = DB_NAME() 23 | , UTC_time = end_time 24 | , 'CPU Utilization In % of Limit' = rs.avg_cpu_percent 25 | , 'Data IO In % of Limit' = rs.avg_data_io_percent 26 | , 'Log Write Utilization In % of Limit' = rs.avg_log_write_percent 27 | , 'Memory Usage In % of Limit' = rs.avg_memory_usage_percent 28 | , 'In-Memory OLTP Storage in % of Limit' = rs.xtp_storage_percent 29 | , 'Concurrent Worker Threads in % of Limit' = rs.max_worker_percent 30 | , 'Concurrent Sessions in % of Limit' = rs.max_session_percent 31 | from 32 | sys.dm_db_resource_stats as rs --past hour only 33 | order by rs.end_time desc 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /azure sql db scale.ps1: -------------------------------------------------------------------------------- 1 | #Install-Module AzureRM.Sql -Force 2 | Login-AzureRmAccount 3 | 4 | get-AzureRmSqlDatabase -ResourceGroupName "AppServices-TeamLeads" -DatabaseName "LunchQueue" -ServerName "sparkhound-appservices" | select-object DatabaseName, currentserviceobjectivename 5 | 6 | set-AzureRmSqlDatabase -ResourceGroupName "AppServices-TeamLeads" -DatabaseName "LunchQueue" -ServerName "sparkhound-appservices" -RequestedServiceObjectiveName "S1" 7 | 8 | get-AzureRmSqlDatabase -ResourceGroupName "AppServices-TeamLeads" -DatabaseName "LunchQueue" -ServerName "sparkhound-appservices" | select-object DatabaseName, currentserviceobjectivename 9 | 10 | -------------------------------------------------------------------------------- /backup database master keys.sql: -------------------------------------------------------------------------------- 1 | --Will generate a script to backup all database keys, including the master key if it exists. 2 | --Use to backup database master keys used for row-level encryption. 3 | 4 | --Not so useful for TDE, instead see: toolbox\lab - tde encryption workshop 2014.sql 5 | 6 | --name = ''##MS_DatabaseMasterKey##'' is the database master key 7 | 8 | --TODO: Add password to two places where text = passwordhere but DO NOT SAVE THIS FILE WITH PASSWORD 9 | -- The password must be the current password for the database key. 10 | -- If the password is not known, you must regenerate the password and immediately re-backup the key. Note this will force all encyrypted data to be unencrypted and re-encrypted. It is transparent but could be time-consuming. 11 | -- https://docs.microsoft.com/sql/t-sql/statements/alter-master-key-transact-sql follow directions to REGENERATE key with new password. 12 | -- See also: toolbox\backup service master key.sql 13 | 14 | exec sp_msforeachdb 'use [?]; 15 | if exists(select * from sys.symmetric_keys ) 16 | begin 17 | select ''Database key(s) found in [?]'' 18 | select ''USE [?];'' 19 | select ''OPEN MASTER KEY DECRYPTION BY PASSWORD = ''''passwordhere''''; 20 | BACKUP MASTER KEY TO FILE = ''''c:\temp\?_''+name+''_20200131.snk'''' 21 | ENCRYPTION BY PASSWORD = ''''passwordhere''''; 22 | GO '' 23 | from sys.symmetric_keys; 24 | END'; 25 | 26 | --exec sp_msforeachdb 'use [?]; select ''[?]'',* from sys.symmetric_keys'; 27 | 28 | --THEN: 29 | --Move the file to enterprise security vault, along with its password, associated with the SQL instance. 30 | -------------------------------------------------------------------------------- /backup history oldest latest.sql: -------------------------------------------------------------------------------- 1 | --sql2005 and above 2 | select 3 | backuptype 4 | , recovery_model_desc 5 | , state_desc 6 | , is_read_only 7 | , OldestLatestBackupDate = MIN(BackupDate) 8 | FROM 9 | ( 10 | select 11 | database_name 12 | , backuptype 13 | , d.recovery_model_desc 14 | , BackupDate = MAX(BackupDate) 15 | , d.state_desc 16 | , d.is_read_only 17 | from sys.databases d 18 | inner join 19 | ( 20 | select distinct 21 | database_name 22 | , backuptype = case type WHEN 'D' then 'Database' 23 | WHEN 'I' then 'Differential database' 24 | WHEN 'L' then 'Transaction Log' 25 | WHEN 'F' then 'File or filegroup' 26 | WHEN 'G' then 'Differential file' 27 | WHEN 'P' then 'Partial' 28 | WHEN 'Q' then 'Differential partial' END 29 | , BackupDate = MAX(backup_start_date) 30 | from msdb.dbo.backupset bs 31 | group by database_name, type 32 | UNION 33 | select distinct 34 | db_name(d.database_id) 35 | , backuptype = 'Database' 36 | , null 37 | FROM master.sys.databases d 38 | UNION 39 | select distinct 40 | db_name(d.database_id) 41 | , backuptype = 'Transaction Log' 42 | , null 43 | FROM master.sys.databases d 44 | where d.recovery_model_desc in ('FULL', 'BULK_LOGGED') 45 | 46 | ) a 47 | on db_name(d.database_id) = a.database_name 48 | WHERE backuptype = 'transaction log' 49 | group by database_name, backuptype, d.recovery_model_desc, d.state_desc, d.is_read_only 50 | ) x 51 | group by backuptype, recovery_model_desc, state_desc, is_read_only 52 | order by backuptype, recovery_model_desc 53 | 54 | GO 55 | 56 | select 57 | database_name 58 | , backuptype 59 | , d.recovery_model_desc 60 | , BackupDate = MAX(BackupDate) 61 | , d.state_desc 62 | , d.is_read_only 63 | from sys.databases d 64 | inner join 65 | ( 66 | select distinct 67 | database_name 68 | , backuptype = case type WHEN 'D' then 'Database' 69 | WHEN 'I' then 'Differential database' 70 | WHEN 'L' then 'Transaction Log' 71 | WHEN 'F' then 'File or filegroup' 72 | WHEN 'G' then 'Differential file' 73 | WHEN 'P' then 'Partial' 74 | WHEN 'Q' then 'Differential partial' END 75 | , BackupDate = MAX(backup_start_date) 76 | from msdb.dbo.backupset bs 77 | group by database_name, type 78 | UNION 79 | select distinct 80 | db_name(d.database_id) 81 | , backuptype = 'Database' 82 | , null 83 | FROM master.sys.databases d 84 | UNION 85 | select distinct 86 | db_name(d.database_id) 87 | , backuptype = 'Transaction Log' 88 | , null 89 | FROM master.sys.databases d 90 | where d.recovery_model_desc in ('FULL', 'BULK_LOGGED') 91 | 92 | ) a 93 | on db_name(d.database_id) = a.database_name 94 | --WHERE backuptype = 'transaction log' 95 | group by database_name, backuptype, d.recovery_model_desc, d.state_desc, d.is_read_only 96 | -------------------------------------------------------------------------------- /backup progress.sql: -------------------------------------------------------------------------------- 1 | SELECT R.session_id, 2 | R.percent_complete, 3 | R.total_elapsed_time/1000 AS elapsed_secs, 4 | R.wait_type, 5 | R.wait_time, 6 | R.last_wait_type, 7 | DATEADD(s,100/((R.percent_complete)/ (R.total_elapsed_time/1000)), R.start_time) estim_completion_time, 8 | ST.text, 9 | SUBSTRING(ST.text, R.statement_start_offset / 2, 10 | ( 11 | CASE WHEN R.statement_end_offset = -1 THEN DATALENGTH(ST.text) 12 | ELSE R.statement_end_offset 13 | END - R.statement_start_offset 14 | ) / 2 15 | ) AS statement_executing 16 | FROM sys.dm_exec_requests R 17 | CROSS APPLY sys.dm_exec_sql_text(R.sql_handle) ST 18 | WHERE R.percent_complete > 0 19 | AND R.session_id <> @@spid -------------------------------------------------------------------------------- /backup readonly copy restore.sql: -------------------------------------------------------------------------------- 1 | --Backup/Copy/Restore user db's 2 | 3 | --Must have ending \ 4 | declare @old_server_path_data nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\DATA\'; 5 | declare @old_server_path_log nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\DATA\'; 6 | declare @new_server_path_data nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\DATA\'; 7 | declare @new_server_path_log nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\DATA\'; 8 | declare @old_server_path_backup nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\Backup\'; 9 | declare @new_server_path_backup nvarchar(4000) = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\Backup\'; 10 | 11 | 12 | with ctefiles_data (database_id, file_id, type_desc, name, physical_name, new_physical_name) 13 | AS ( 14 | select database_id, file_id, type_desc, name, physical_name 15 | , new_physical_name = replace(physical_name, @old_server_path_data, @new_server_path_data) 16 | from sys.master_files where type_desc = 'ROWS') 17 | 18 | , ctefiles_log (database_id, file_id, type_desc, name, physical_name, new_physical_name) 19 | AS ( 20 | select database_id, file_id, type_desc, name, physical_name, new_physical_name = replace(physical_name, 21 | @old_server_path_log, @new_server_path_log) 22 | from sys.master_files where type_desc = 'LOG') 23 | 24 | 25 | select 26 | ReadOnly_On = ' 27 | ALTER DATABASE ['+db_name(d.database_id)+'] SET SINGLE_USER WITH ROLLBACK IMMEDIATE; 28 | ALTER DATABASE ['+db_name(d.database_id)+'] SET READ_ONLY WITH NO_WAIT; 29 | ALTER DATABASE ['+db_name(d.database_id)+'] SET MULTI_USER; 30 | GO', 31 | TakeBackups = ' 32 | BACKUP DATABASE ['+db_name(d.database_id)+'] TO DISK = N'''+@old_server_path_backup+db_name(d.database_id)+'_migration_20180418.bak'' 33 | WITH NOFORMAT, NOINIT, NAME = N'''+db_name(d.database_id)+'-Migration 20180418 Full Database Backup'', SKIP, NOREWIND, NOUNLOAD, STATS = 10, CHECKSUM, COMPRESSION 34 | GO 35 | declare @backupSetId as int 36 | select @backupSetId = position from msdb..backupset where database_name=N'''+db_name(d.database_id)+''' and backup_set_id=(select max(backup_set_id) from msdb..backupset where database_name=N'''+db_name(d.database_id)+''' ) 37 | if @backupSetId is null begin raiserror(N''Verify failed. Backup information for database '''''+db_name(d.database_id)+''''' not found.'', 16, 1) end 38 | RESTORE VERIFYONLY FROM DISK = N'''+@old_server_path_backup+db_name(d.database_id)+'_migration_20180418.bak'' WITH FILE = @backupSetId, NOUNLOAD, NOREWIND 39 | GO', 40 | Restores = 'USE [master] 41 | RESTORE DATABASE ['+db_name(d.database_id)+'] 42 | FROM DISK = N'''+@new_server_path_backup+ db_name(d.database_id) +'_migration_20180418.bak'' 43 | WITH FILE = 1 44 | , MOVE N'''+d.name+''' TO N'''+d.new_physical_name+''' 45 | , MOVE N'''+l.name+''' TO N'''+l.new_physical_name+''' 46 | , NOUNLOAD, STATS = 5, NORECOVERY 47 | GO 48 | ', 49 | ReadOnly_Off = ' 50 | ALTER DATABASE ['+db_name(d.database_id)+'] SET READ_WRITE WITH NO_WAIT; 51 | GO', 52 | * 53 | FROM ctefiles_data d 54 | inner join ctefiles_log l 55 | on d.database_id = l.database_id 56 | where d.database_id > 4 57 | 58 | 59 | /* 60 | 61 | 62 | */ -------------------------------------------------------------------------------- /backup restore progress.sql: -------------------------------------------------------------------------------- 1 | SELECT command, 2 | s.text, 3 | start_time, 4 | percent_complete, 5 | CAST(((DATEDIFF(s,start_time,GetDate()))/3600) as varchar) + ' hour(s), ' 6 | + CAST((DATEDIFF(s,start_time,GetDate())%3600)/60 as varchar) + 'min, ' 7 | + CAST((DATEDIFF(s,start_time,GetDate())%60) as varchar) + ' sec' as running_time, 8 | CAST((estimated_completion_time/3600000) as varchar) + ' hour(s), ' 9 | + CAST((estimated_completion_time %3600000)/60000 as varchar) + 'min, ' 10 | + CAST((estimated_completion_time %60000)/1000 as varchar) + ' sec' as est_time_to_go, 11 | dateadd(second,estimated_completion_time/1000, getdate()) as est_completion_time 12 | FROM sys.dm_exec_requests r 13 | CROSS APPLY sys.dm_exec_sql_text(r.sql_handle) s 14 | WHERE r.command like 'RESTORE%' or r.command like 'BACKUP%' -------------------------------------------------------------------------------- /backup service master key.sql: -------------------------------------------------------------------------------- 1 | --TODO: Change Master key name to include instance name. 2 | --TODO: Change password to complex, unique password for this key. 3 | --You may also want to check for database master keys that need to be backed up: toolbox\backup database master keys.sql 4 | 5 | BACKUP SERVICE MASTER KEY --not actually important for TDE, but important overall and should be backed up regardless. 6 | TO FILE = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\data\InstanceNameHere_SQLServiceMasterKey_20120314.snk' 7 | ENCRYPTION BY PASSWORD = 'complexpasswordhere' 8 | 9 | --THEN, TODO: 10 | --Move the file to enterprise security vault, along with its password, associated with the SQL instance. 11 | 12 | 13 | /* 14 | --To restore, in the event of a restoring a master database to a new install, for example: 15 | 16 | RESTORE SERVICE MASTER KEY FROM FILE = 'path_to_file' 17 | DECRYPTION BY PASSWORD = 'password' FORCE 18 | 19 | */ -------------------------------------------------------------------------------- /backup to azure blob tsql script for legacy SQL.sql: -------------------------------------------------------------------------------- 1 | --https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017 2 | 3 | DECLARE @DB_name VARCHAR(256) -- database name 4 | DECLARE @BackupLoc VARCHAR(512) -- path for backup files 5 | DECLARE @BackupfileName VARCHAR(512) -- filename for backup 6 | DECLARE @fileDate VARCHAR(20) -- used for file name 7 | DECLARE @process VARCHAR(2000) -- used for documentation 8 | -- specify database backup container location 9 | SET @BackupLoc = 'https://whatever.blob.core.windows.net/prodsqlbak/sh-sp2013-sql/' 10 | 11 | DECLARE db_cursor CURSOR FOR 12 | SELECT name FROM master.sys.databases WHERE database_id > 4 and state=0 and is_read_only = 0 13 | OPEN db_cursor 14 | FETCH NEXT FROM db_cursor INTO @DB_name 15 | WHILE @@FETCH_STATUS = 0 16 | BEGIN 17 | set @fileDate= replace(replace(convert(nvarchar(50),getdate()),' ','_'),':','_') 18 | SET @BackupfileName = @BackupLoc + @DB_name + '_' + @fileDate + '.DIF' 19 | SET @process = 'BACKUP DATABASE '+@DB_name+ ' TO URL ='''+ @BackupfileName +''' WITH CREDENTIAL = ''https://whatever.blob.core.windows.net/prodsqlbak'' 20 | , COMPRESSION, CHECKSUM, FORMAT;' 21 | --print @process 22 | BEGIN TRY 23 | BACKUP DATABASE @DB_name TO URL = @BackupfileName 24 | WITH CREDENTIAL = N'https://whatever.blob.core.windows.net/prodsqlbak' 25 | , COMPRESSION, CHECKSUM, FORMAT; 26 | END TRY 27 | BEGIN CATCH 28 | --Only captures the 3013, not the preceding and actual error message for any backup failure. :( 29 | --INSERT INTO DBALogging.dbo.errortable ([ErrorNumber], [ErrorSeverity], [ErrorState], [ErrorProcedure], [ErrorLine], [ErrorMessage], [Process]) 30 | --SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_SEVERITY() AS ErrorSeverity, ERROR_STATE() as ErrorState, ERROR_PROCEDURE() as ErrorProcedure, ERROR_LINE() as ErrorLine, ERROR_MESSAGE() as ErrorMessage 31 | -- , Process = 'Testing'; --need the semicolon 32 | 33 | THROW --optional, to actually cause a failure. 34 | 35 | END CATCH 36 | 37 | FETCH NEXT FROM db_cursor INTO @DB_name 38 | 39 | END 40 | CLOSE db_cursor 41 | DEALLOCATE db_cursor 42 | -------------------------------------------------------------------------------- /bad clustered indexes.sql: -------------------------------------------------------------------------------- 1 | --Any rows returned in here probably need to be re-designed. Bad designs. 2 | 3 | select 4 | Database_name = DB_NAME() 5 | , Table_Name = '[' + s.name + '].[' + o.name + ']' 6 | , Index_Name = i.name 7 | , Column_Name = c.name 8 | , Data_Type = t.name 9 | from sys.columns c 10 | inner join sys.types t on c.user_type_id = t.user_type_id 11 | inner join sys.objects o on o.object_id = c.object_id 12 | inner join sys.schemas s on o.schema_id = s.schema_id 13 | inner join sys.indexes i on i.object_id = o.object_id 14 | inner join sys.index_columns ic on ic.index_id = i.index_id and ic.column_id = c.column_id and ic.object_id = o.object_id 15 | where 16 | (t.name = 'float' or t.name = 'uniqueidentifier' or t.max_length = -1) --GUID or (n)varchar(max) data types or float 17 | and i.index_id = 1 --the clustered index 18 | and o.is_ms_shipped = 0 19 | 20 | -------------------------------------------------------------------------------- /basic diff backup to URL.sql: -------------------------------------------------------------------------------- 1 | --Basic job script to backup to URL, SQL 2016+. 2 | --Performs diff backups on up all user databases 3 | 4 | --This script assumes that an SAS Credential is in place for the storage container. 5 | --Verify: select * from sys.credentials where credential_identity = 'Shared Access Signature' 6 | --If you need to create an SAS Credential, see toolbox\sas credential.sql 7 | --SAS Credentials only work for SQL 2016+. 8 | --More info: https://techcommunity.microsoft.com/t5/DataCAT/SQL-Server-Backup-to-URL-a-cheat-sheet/ba-p/346358?advanced=false&collapse_discussion=true&q=MAXTRANSFERSIZE&search_type=thread 9 | --Note that WITH CERTIFICATE to use a storage account credential is not needed in the script, this was for behavior <2016+ and is still needed for those older versions. 10 | --See: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017#credential 11 | 12 | DECLARE @BackupLoc VARCHAR(512) -- path for backup files 13 | -- specify database backup container location 14 | SET @BackupLoc = 'https://storageaccountwhatever.blob.core.windows.net/containerwhatever/servername/'; 15 | 16 | DECLARE @DB_name VARCHAR(255) -- database name 17 | DECLARE @BackupfileName VARCHAR(1024) -- filename for backup 18 | DECLARE @fileDate VARCHAR(20) -- used for file name 19 | DECLARE @backupSetId int 20 | DECLARE @errormessage nvarchar(2000) 21 | 22 | DECLARE db_cursor CURSOR FOR 23 | SELECT name FROM master.sys.databases 24 | WHERE database_id > 4 --USER databases only. Should be another job for system databases. 25 | and state_desc='ONLINE' and is_read_only = 0 26 | and is_in_standby = 0 27 | 28 | OPEN db_cursor 29 | FETCH NEXT FROM db_cursor INTO @DB_name 30 | WHILE @@FETCH_STATUS = 0 31 | BEGIN 32 | SET @BackupfileName = @BackupLoc + @DB_name + '_diff_' + 33 | + convert(varchar, datepart(year, sysdatetime())) + Right(Replicate('0',2) 34 | + convert(varchar(2), datepart(month, sysdatetime())),2) + Right(Replicate('0',2) 35 | + convert(varchar(2), datepart(day, sysdatetime())),2) + Right(Replicate('0',2) 36 | + convert(varchar(2), datepart(hour, sysdatetime())),2) + Right(Replicate('0',2) 37 | + convert(varchar(2), datepart(minute, sysdatetime())),2) + 38 | + '.dif' 39 | 40 | select @BackupfileName 41 | BEGIN TRY 42 | BACKUP DATABASE @DB_name TO URL = @BackupfileName 43 | WITH DIFFERENTIAL, 44 | COMPRESSION, CHECKSUM, FORMAT, MAXTRANSFERSIZE = 4194304, BLOCKSIZE = 65536; 45 | 46 | --verify the backup 47 | select @backupSetId = position from msdb..backupset where database_name= @DB_name and backup_set_id=(select max(backup_set_id) from msdb..backupset where database_name= @DB_name ) 48 | select @errormessage = N'Verify failed. Backup information for differential backup for database '+@DB_name+' not found.' 49 | if @backupSetId is null begin raiserror(@errormessage, 16, 1) end 50 | RESTORE VERIFYONLY FROM URL = @BackupfileName WITH 51 | FILE = @backupSetId, NOUNLOAD, NOREWIND 52 | 53 | END TRY 54 | BEGIN CATCH 55 | THROW --to actually cause a failure. Reports both error codes 4208 and 3013, SQL Agent job handles the capture. 56 | 57 | END CATCH 58 | 59 | FETCH NEXT FROM db_cursor INTO @DB_name 60 | 61 | END 62 | CLOSE db_cursor 63 | DEALLOCATE db_cursor 64 | -------------------------------------------------------------------------------- /basic full backup to URL system dbs.sql: -------------------------------------------------------------------------------- 1 | --Basic job script to backup to URL, SQL 2016+. 2 | --Performs full backups on system databases 3 | 4 | --This script assumes that an SAS Credential is in place for the storage container. 5 | --Verify: select * from sys.credentials where credential_identity = 'Shared Access Signature' 6 | --If you need to create an SAS Credential, see toolbox\sas credential.sql 7 | --SAS Credentials only work for SQL 2016+. 8 | --More info: https://techcommunity.microsoft.com/t5/DataCAT/SQL-Server-Backup-to-URL-a-cheat-sheet/ba-p/346358?advanced=false&collapse_discussion=true&q=MAXTRANSFERSIZE&search_type=thread 9 | --Note that WITH CERTIFICATE to use a storage account credential is not needed in the script, this was for behavior <2016+ and is still needed for those older versions. 10 | --See: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017#credential 11 | 12 | DECLARE @BackupLoc VARCHAR(512) -- path for backup files 13 | -- specify database backup container location 14 | SET @BackupLoc = 'https://storageaccountwhatever.blob.core.windows.net/containerwhatever/servername/'; 15 | 16 | DECLARE @DB_name VARCHAR(255) -- database name 17 | DECLARE @BackupfileName VARCHAR(1024) -- filename for backup 18 | DECLARE @fileDate VARCHAR(20) -- used for file name 19 | DECLARE @backupSetId int 20 | DECLARE @errormessage nvarchar(2000) 21 | 22 | DECLARE db_cursor CURSOR FOR 23 | SELECT name FROM master.sys.databases 24 | WHERE database_id <= 4 --SYSTEM databases only. Should be another job for USER databases. 25 | and name <> 'tempdb' 26 | 27 | OPEN db_cursor 28 | FETCH NEXT FROM db_cursor INTO @DB_name 29 | WHILE @@FETCH_STATUS = 0 30 | BEGIN 31 | SET @BackupfileName = @BackupLoc + @DB_name + '_full_' + 32 | + convert(varchar, datepart(year, sysdatetime())) + Right(Replicate('0',2) 33 | + convert(varchar(2), datepart(month, sysdatetime())),2) + Right(Replicate('0',2) 34 | + convert(varchar(2), datepart(day, sysdatetime())),2) + Right(Replicate('0',2) 35 | + convert(varchar(2), datepart(hour, sysdatetime())),2) + Right(Replicate('0',2) 36 | + convert(varchar(2), datepart(minute, sysdatetime())),2) + 37 | + '.bak' 38 | 39 | select @BackupfileName 40 | BEGIN TRY 41 | BACKUP DATABASE @DB_name TO URL = @BackupfileName 42 | WITH 43 | COMPRESSION, CHECKSUM, FORMAT, MAXTRANSFERSIZE = 4194304, BLOCKSIZE = 65536; 44 | 45 | --verify the backup 46 | select @backupSetId = position from msdb..backupset where database_name= @DB_name and backup_set_id=(select max(backup_set_id) from msdb..backupset where database_name= @DB_name ) 47 | select @errormessage = N'Verify failed. Backup information for database '+@DB_name+' not found.' 48 | if @backupSetId is null begin raiserror(@errormessage, 16, 1) end 49 | RESTORE VERIFYONLY FROM URL = @BackupfileName WITH 50 | FILE = @backupSetId, NOUNLOAD, NOREWIND 51 | 52 | END TRY 53 | BEGIN CATCH 54 | THROW --to actually cause a failure. Reports both error codes 4208 and 3013, SQL Agent job handles the capture. 55 | 56 | END CATCH 57 | 58 | FETCH NEXT FROM db_cursor INTO @DB_name 59 | 60 | END 61 | CLOSE db_cursor 62 | DEALLOCATE db_cursor 63 | -------------------------------------------------------------------------------- /basic full backup to URL.sql: -------------------------------------------------------------------------------- 1 | --Basic job script to backup to URL, SQL 2016+. 2 | --Performs full backups on user databases 3 | 4 | --This script assumes that an SAS Credential is in place for the storage container. 5 | --Verify: select * from sys.credentials where credential_identity = 'Shared Access Signature' 6 | --If you need to create an SAS Credential, see toolbox\sas credential.sql 7 | --SAS Credentials only work for SQL 2016+. 8 | --More info: https://techcommunity.microsoft.com/t5/DataCAT/SQL-Server-Backup-to-URL-a-cheat-sheet/ba-p/346358?advanced=false&collapse_discussion=true&q=MAXTRANSFERSIZE&search_type=thread 9 | --Note that WITH CERTIFICATE to use a storage account credential is not needed in the script, this was for behavior <2016+ and is still needed for those older versions. 10 | --See: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017#credential 11 | 12 | DECLARE @BackupLoc VARCHAR(512) -- path for backup files 13 | -- specify database backup container location 14 | SET @BackupLoc = 'https://storageaccountwhatever.blob.core.windows.net/containerwhatever/servername/'; 15 | 16 | DECLARE @DB_name VARCHAR(255) -- database name 17 | DECLARE @BackupfileName VARCHAR(1024) -- filename for backup 18 | DECLARE @fileDate VARCHAR(20) -- used for file name 19 | DECLARE @backupSetId int 20 | DECLARE @errormessage nvarchar(2000) 21 | 22 | DECLARE db_cursor CURSOR FOR 23 | SELECT name FROM master.sys.databases 24 | WHERE database_id > 4 --USER databases only. Should be another job for system databases. 25 | and state_desc='ONLINE' and is_read_only = 0 26 | and is_in_standby = 0 27 | 28 | OPEN db_cursor 29 | FETCH NEXT FROM db_cursor INTO @DB_name 30 | WHILE @@FETCH_STATUS = 0 31 | BEGIN 32 | SET @BackupfileName = @BackupLoc + @DB_name + '_full_' + 33 | + convert(varchar, datepart(year, sysdatetime())) + Right(Replicate('0',2) 34 | + convert(varchar(2), datepart(month, sysdatetime())),2) + Right(Replicate('0',2) 35 | + convert(varchar(2), datepart(day, sysdatetime())),2) + Right(Replicate('0',2) 36 | + convert(varchar(2), datepart(hour, sysdatetime())),2) + Right(Replicate('0',2) 37 | + convert(varchar(2), datepart(minute, sysdatetime())),2) + 38 | + '.bak' 39 | 40 | select @BackupfileName 41 | BEGIN TRY 42 | BACKUP DATABASE @DB_name TO URL = @BackupfileName 43 | WITH 44 | COMPRESSION, CHECKSUM, FORMAT, MAXTRANSFERSIZE = 4194304, BLOCKSIZE = 65536; 45 | 46 | --verify the backup 47 | select @backupSetId = position from msdb..backupset where database_name= @DB_name and backup_set_id=(select max(backup_set_id) from msdb..backupset where database_name= @DB_name ) 48 | select @errormessage = N'Verify failed. Backup information for database '+@DB_name+' not found.' 49 | if @backupSetId is null begin raiserror(@errormessage, 16, 1) end 50 | RESTORE VERIFYONLY FROM URL = @BackupfileName WITH 51 | FILE = @backupSetId, NOUNLOAD, NOREWIND 52 | 53 | END TRY 54 | BEGIN CATCH 55 | THROW --to actually cause a failure. Reports both error codes 4208 and 3013, SQL Agent job handles the capture. 56 | 57 | END CATCH 58 | 59 | FETCH NEXT FROM db_cursor INTO @DB_name 60 | 61 | END 62 | CLOSE db_cursor 63 | DEALLOCATE db_cursor 64 | -------------------------------------------------------------------------------- /basic log backup to URL.sql: -------------------------------------------------------------------------------- 1 | --Basic job script to backup to URL, SQL 2016+. 2 | --Performs log backups on all databases not in SIMPLE mode. 3 | 4 | --This script assumes that an SAS Credential is in place for the storage container. 5 | --Verify: select * from sys.credentials where credential_identity = 'Shared Access Signature' 6 | --If you need to create an SAS Credential, see toolbox\sas credential.sql 7 | --SAS Credentials only work for SQL 2016+. 8 | --More info: https://techcommunity.microsoft.com/t5/DataCAT/SQL-Server-Backup-to-URL-a-cheat-sheet/ba-p/346358?advanced=false&collapse_discussion=true&q=MAXTRANSFERSIZE&search_type=thread 9 | --Note that WITH CERTIFICATE to use a storage account credential is not needed in the script, this was for behavior <2016+ and is still needed for those older versions. 10 | --See: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017#credential 11 | 12 | DECLARE @BackupLoc VARCHAR(512) -- path for backup files 13 | -- specify database backup container location 14 | SET @BackupLoc = 'https://storageaccountwhatever.blob.core.windows.net/containerwhatever/servername/'; 15 | 16 | DECLARE @DB_name VARCHAR(255) -- database name 17 | DECLARE @BackupfileName VARCHAR(1024) -- filename for backup 18 | DECLARE @fileDate VARCHAR(20) -- used for file name 19 | DECLARE @backupSetId int 20 | DECLARE @errormessage nvarchar(2000) 21 | 22 | DECLARE db_cursor CURSOR FOR 23 | SELECT name FROM master.sys.databases 24 | WHERE state_desc='ONLINE' and is_read_only = 0 25 | and is_in_standby = 0 26 | and recovery_model_desc <> 'SIMPLE'; 27 | 28 | OPEN db_cursor 29 | FETCH NEXT FROM db_cursor INTO @DB_name 30 | WHILE @@FETCH_STATUS = 0 31 | BEGIN 32 | SET @BackupfileName = @BackupLoc + @DB_name + '_log_' + 33 | + convert(varchar, datepart(year, sysdatetime())) + Right(Replicate('0',2) 34 | + convert(varchar(2), datepart(month, sysdatetime())),2) + Right(Replicate('0',2) 35 | + convert(varchar(2), datepart(day, sysdatetime())),2) + Right(Replicate('0',2) 36 | + convert(varchar(2), datepart(hour, sysdatetime())),2) + Right(Replicate('0',2) 37 | + convert(varchar(2), datepart(minute, sysdatetime())),2) + 38 | + '.trn' 39 | 40 | select @BackupfileName 41 | BEGIN TRY 42 | BACKUP LOG @DB_name TO URL = @BackupfileName 43 | WITH 44 | COMPRESSION, CHECKSUM, FORMAT, MAXTRANSFERSIZE = 4194304, BLOCKSIZE = 65536; 45 | 46 | --verify the backup 47 | select @backupSetId = position from msdb..backupset 48 | where database_name= @DB_name 49 | and backup_set_id=(select max(backup_set_id) from msdb..backupset 50 | where database_name= @DB_name ) 51 | select @errormessage = N'Verify failed. Backup information for log file backup for database '+@DB_name+' not found.' 52 | if @backupSetId is null begin raiserror(@errormessage, 16, 1) end 53 | RESTORE VERIFYONLY FROM URL = @BackupfileName WITH 54 | FILE = @backupSetId, NOUNLOAD, NOREWIND 55 | 56 | END TRY 57 | BEGIN CATCH 58 | THROW --to actually cause a failure. Reports both error codes 4208 and 3013, SQL Agent job handles the capture. 59 | 60 | END CATCH 61 | 62 | FETCH NEXT FROM db_cursor INTO @DB_name 63 | 64 | END 65 | CLOSE db_cursor 66 | DEALLOCATE db_cursor 67 | -------------------------------------------------------------------------------- /capture login info xevent.sql: -------------------------------------------------------------------------------- 1 | CREATE EVENT SESSION [CollectLogonData] ON SERVER 2 | ADD EVENT sqlserver.login( 3 | ACTION(sqlserver.client_app_name,sqlserver.client_connection_id,sqlserver.client_hostname,sqlserver.client_pid,sqlserver.database_id,sqlserver.database_name,sqlserver.nt_username,sqlserver.server_instance_name,sqlserver.server_principal_name,sqlserver.server_principal_sid,sqlserver.session_id,sqlserver.session_nt_username,sqlserver.username)) 4 | ADD TARGET package0.event_file(SET filename=N'c:\[RenameMe]\CollectLogindata.xel') 5 | WITH (MAX_MEMORY=4096 KB,EVENT_RETENTION_MODE=ALLOW_SINGLE_EVENT_LOSS,MAX_DISPATCH_LATENCY=30 SECONDS,MAX_EVENT_SIZE=0 KB,MEMORY_PARTITION_MODE=NONE,TRACK_CAUSALITY=OFF,STARTUP_STATE=ON) 6 | GO 7 | 8 | ALTER EVENT SESSION [CollectLogonData] on server STATE = START 9 | --ALTER EVENT SESSION [CollectLogonData] on server STATE = STOP -------------------------------------------------------------------------------- /change tracking autocleanup diagnostic.sql: -------------------------------------------------------------------------------- 1 | --In an AG, run on primary replica. 2 | --Run in the user database with Change Tracking enabled. 3 | 4 | SELECT 5 | number_commits = count(1) 6 | , min_commit_time = MIN(commit_time) 7 | , max_commit_time = MAX(commit_time) 8 | , now = getdate() 9 | FROM sys.dm_tran_commit_table; 10 | 11 | --The minimum_commit_time should progress forward periodically. 12 | --As default configured with 2 days retention, the minimum_commit_time should be slightly more than 2 days ago if CT is keeping up. 13 | 14 | --CT may get behind in the autocleanup. This is not a severe problem but will surface with: 15 | --Error: 22123, Severity: 16, State: 1. 16 | --Change Tracking autocleanup is blocked on side table of "tablename". If the failure persists, check whether the table "tablename" is blocked by any process, like in toolbox\uncommitted transaction.sql. -------------------------------------------------------------------------------- /checksum vs hashbytes.sql: -------------------------------------------------------------------------------- 1 | --same 2 | Select CHECKSUM('sha2_512','Sparkhound') 3 | Select CHECKSUM('sha2_512','AAAAAAAAAAAAAAAASparkhound') 4 | 5 | --same 6 | Select BINARY_CHECKSUM('sha2_512','Sparkhound') 7 | Select BINARY_CHECKSUM('sha2_512','AAAAAAAAAAAAAAAASparkhound') 8 | 9 | --same 10 | select CHECKSUM('aaaa') 11 | select CHECKSUM('aaaaaaaaaaaaaaaaaaaa') 12 | 13 | --not the same 14 | select HASHBYTES('SHA2_512', N'aaaa') 15 | select HASHBYTES('SHA2_512', N'aaaaaaaaaaaaaaaaaaaa') 16 | 17 | --not the same 18 | Select HASHBYTES('sha2_512','Sparkhound') 19 | Select HASHBYTES('sha2_512','AAAAAAAAAAAAAAAASparkhound') 20 | 21 | -------------------------------------------------------------------------------- /collect connections.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | USE [DBALogging] 4 | GO 5 | DROP TABLE [dbo].[ExecRequests_connections] 6 | GO 7 | CREATE TABLE [dbo].[ExecRequests_connections]( 8 | id int not null IDENTITY(1,1), 9 | [login_name] [nvarchar](128) NOT NULL, 10 | [client_interface_name] [nvarchar](32) NULL, 11 | [host_name] [nvarchar](128) NULL, 12 | [nt_domain] [nvarchar](128) NULL, 13 | [nt_user_name] [nvarchar](128) NULL, 14 | [endpoint_name] [sysname] NULL, 15 | [program_name] [nvarchar](128) NULL, 16 | [observed_count] bigint NOT NULL CONSTRAINT DF_ExecRequests_connections_observed_count DEFAULT(0), 17 | CONSTRAINT pk_execrequests_connections_id PRIMARY KEY (ID) 18 | ) ON [PRIMARY] 19 | CREATE INDEX idx_execrequests_connections ON execrequests_connections (login_name, client_interface_name, [host_name], nt_domain, nt_user_name, endpoint_name, [program_name]) 20 | GO 21 | 22 | 23 | */ 24 | 25 | insert into dbo.ExecRequests_connections ( 26 | login_name, client_interface_name, [host_name], nt_domain, nt_user_name, [program_name], endpoint_name 27 | ) 28 | select 29 | LEFT(s.login_name, 128), LEFT(s.client_interface_name, 128), LEFT(s.[host_name], 128), LEFT(s.nt_domain, 128), LEFT(s.nt_user_name, 128), LEFT(s.[program_name], 128), e.name 30 | from sys.dm_exec_sessions s 31 | left outer join sys.endpoints E ON E.endpoint_id = s.endpoint_id 32 | left outer join dbo.ExecRequests_connections erc 33 | on 34 | erc.login_name = LEFT(s.login_name, 128) 35 | and erc.client_interface_name = LEFT(s.client_interface_name, 128) 36 | and erc.[host_name] = LEFT(s.[host_name], 128) 37 | and erc.nt_domain = LEFT(s.nt_domain, 128) 38 | and erc.nt_user_name = LEFT(s.nt_user_name, 128) 39 | and erc.[program_name] = LEFT(s.[program_name], 128) 40 | and erc.endpoint_name = e.name 41 | where 42 | s.session_id >= 50 --retrieve only user spids 43 | and s.session_id <> @@SPID --ignore myself 44 | and erc.id is null 45 | GROUP BY LEFT(s.login_name, 128), LEFT(s.client_interface_name, 128), LEFT(s.[host_name], 128), LEFT(s.nt_domain, 128), LEFT(s.nt_user_name, 128), LEFT(s.[program_name], 128), e.name 46 | 47 | 48 | GO 49 | 50 | UPDATE erc 51 | SET observed_count = observed_count + s.session_id_count 52 | FROM dbo.ExecRequests_connections erc 53 | inner join (select s.login_name, s.client_interface_name, s.[host_name], s.nt_domain, s.nt_user_name, s.[program_name], s.endpoint_id, session_id_count = count(session_id) from sys.dm_exec_sessions s 54 | where 55 | s.session_id >= 50 --retrieve only user spids 56 | and s.session_id <> @@SPID --ignore myself 57 | GROUP BY s.login_name, s.client_interface_name, s.[host_name], s.nt_domain, s.nt_user_name, s.[program_name], s.endpoint_id 58 | ) s 59 | on 60 | erc.login_name = LEFT(s.login_name, 128) 61 | and erc.client_interface_name = LEFT(s.client_interface_name, 128) 62 | and erc.[host_name] = LEFT(s.[host_name], 128) 63 | and erc.nt_domain = LEFT(s.nt_domain, 128) 64 | and erc.nt_user_name = LEFT(s.nt_user_name, 128) 65 | and erc.[program_name] = LEFT(s.[program_name], 128) 66 | left outer join sys.endpoints e 67 | ON E.endpoint_id = s.endpoint_id 68 | and erc.endpoint_name = e.name 69 | 70 | --select * from dbo.ExecRequests_connections erc -------------------------------------------------------------------------------- /compress indexes.sql: -------------------------------------------------------------------------------- 1 | --Index/partitions in current database 2 | select SizeMb= (p.in_row_reserved_page_count*8.)/1024. 3 | , indexname = i.name 4 | , tablename = '[' + s.name + '].[' + o.name + ']' 5 | , pr.data_compression_desc 6 | , p.partition_number 7 | , rebuildcompress = 8 | CASE WHEN pr.data_compression_desc = 'columnstore' THEN NULL ELSE 9 | 'ALTER INDEX [' + i.name + '] ON [' + s.name + '].[' + o.name + '] REBUILD ' + 10 | CASE WHEN MAX(p.partition_number) OVER (PARTITION by i.name) > 1 THEN 11 | 'PARTITION = ' + cast(p.partition_number as varchar(5)) ELSE '' END + 12 | ' WITH (SORT_IN_TEMPDB = ON 13 | , DATA_COMPRESSION = PAGE) ' + CHAR(10) + CHAR(13) 14 | END 15 | , * 16 | from sys.dm_db_partition_stats p 17 | inner join sys.partitions pr on p.partition_id = pr.partition_id 18 | inner join sys.objects o on p.object_id = o.object_id 19 | inner join sys.schemas s on s.schema_id = o.schema_id 20 | left outer join sys.indexes i on i.object_id = o.object_id and i.index_id = p.index_id 21 | WHERE o.is_ms_shipped = 0 22 | 23 | order by SizeMb desc 24 | 25 | /* --Estimate size savings with compression using 26 | --Example: 27 | use [database] 28 | go 29 | exec sp_estimate_data_compression_savings 30 | @schema_name = 'dbo' 31 | , @object_name = 'whatevertable' 32 | , @index_id = null --null for all indexes on table, or try a specific index. The compression savings will vary. 33 | , @partition_number = null --specify partitions if applicable 34 | , @data_compression = 'PAGE'; --or ROW, or for columnstore, can use COLUMNSTORE 35 | 36 | */ -------------------------------------------------------------------------------- /cpu utilization.sql: -------------------------------------------------------------------------------- 1 | --This is simple use of the ring_buffer for historical CPU, goes back a little over 4 hours. 2 | -- for more CPU and Memory, look at toolbox/sys_dm_os_ring_buffers.sql 3 | 4 | use TempDB; 5 | GO 6 | 7 | 8 | declare @numa_nodes int; 9 | select @numa_nodes = count(memory_node_id) from sys.dm_os_memory_nodes --get number of numa nodes for the SQL instance 10 | where memory_node_id <> 64 -- exclude the internal node for the DAC 11 | 12 | SELECT *, SQL_numa_node_count = @numa_nodes from 13 | (SELECT 'InstanceName' = @@SERVERNAME 14 | , logical_cpu_count = cpu_count, hyperthread_ratio , physical_cpu_count = cpu_count/hyperthread_ratio FROM sys.dm_os_sys_info ) as os 15 | --this below line SQL 2016 SP1+, 2012 SP4+ 16 | cross apply (select socket_count, cores_per_socket, Windows_numa_node_count = numa_node_count FROM sys.dm_os_sys_info ) as si 17 | 18 | select 19 | Avg_SystemIdle_Pct = AVG( record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') ) 20 | , Avg_SQLProcessUtilization_Pct = AVG( record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') ) / @numa_nodes 21 | , Max_SQLProcessUtilization_Pct = MAX( record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') ) / @numa_nodes 22 | from ( 23 | select timestamp, convert(xml, record) as record 24 | from sys.dm_os_ring_buffers 25 | where ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR' 26 | and record like '%%') as x 27 | 28 | declare @ts_now bigint 29 | --select @ts_now = cpu_ticks / convert(float, cpu_ticks_in_ms) from sys.dm_os_sys_info 30 | select @ts_now = cpu_ticks / (cpu_ticks/ms_ticks) from sys.dm_os_sys_info; 31 | select record_id 32 | , EventTime = dateadd(ms, -1 * (@ts_now - [timestamp]), GetDate()) 33 | , SQLProcessUtilization 34 | , SystemIdle 35 | , OtherProcessUtilization = 100 - SystemIdle - SQLProcessUtilization 36 | from ( 37 | select 38 | record_id = record.value('(./Record/@id)[1]', 'int') 39 | , SystemIdle = record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') 40 | , SQLProcessUtilization = record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') / @numa_nodes 41 | , timestamp 42 | from ( 43 | select timestamp, convert(xml, record) as record 44 | from sys.dm_os_ring_buffers 45 | where ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR' 46 | and record like '%%') as x 47 | ) as y 48 | order by record_id desc 49 | 50 | 51 | --Inspired by: http://sqlblog.com/blogs/ben_nevarez/archive/2009/07/26/getting-cpu-utilization-data-from-sql-server.aspx -------------------------------------------------------------------------------- /database mail diag.sql: -------------------------------------------------------------------------------- 1 | SELECT is_broker_enabled FROM sys.databases WHERE name = 'msdb' ; -- should be 1 2 | EXECUTE msdb.dbo.sysmail_help_status_sp ; --should say STARTED 3 | --EXECUTE msdb.dbo.sysmail_start_sp --start the database mail queues; 4 | GO 5 | 6 | --Find recent unsent emails, hopefully there are none 7 | SELECT m.send_request_date, m.recipients, m.copy_recipients, m.blind_copy_recipients 8 | , m.[subject], sent_account = a.name, m.send_request_user, m.sent_status 9 | , Error_Description = l.description 10 | FROM msdb.dbo.sysmail_allitems m 11 | LEFT OUTER JOIN msdb.dbo.sysmail_account a 12 | ON m.sent_account_id = a.account_id 13 | LEFT OUTER JOIN msdb.dbo.sysmail_event_log AS l 14 | ON m.mailitem_id = l.mailitem_id 15 | WHERE 1=1 16 | AND m.send_request_date > dateadd(day, -45, sysdatetime()) -- Only show recent day(s) 17 | AND m.sent_status <> 'sent' -- Possible values are sent (successful), unsent (in process), retrying (failed but retrying), failed (no longer retrying) 18 | ORDER BY m.send_request_date DESC; 19 | GO 20 | 21 | --Send mail test 22 | --exec msdb.dbo.sp_send_dbmail @profile_name ='hotmail', @recipients ='williamdassaf@hotmail.com', @subject ='test', @body = 'test' 23 | 24 | --ALTER DATABASE msdb SET ENABLE_BROKER; -------------------------------------------------------------------------------- /database ownership.sql: -------------------------------------------------------------------------------- 1 | --Find database owners that are not desired 2 | 3 | declare @Desired_DB_owner varchar(255) = 'sa' --'sa' is just an example, change to desired service account, example: domain\accountname 4 | 5 | select 6 | database_name = d.name 7 | , principal_name = SUSER_SNAME (d.owner_sid) 8 | , set_to_desired = 'alter authorization on database::[' + d.name + '] to [' + @Desired_DB_owner + ']' 9 | , set_to_current = case when SUSER_SNAME (d.owner_sid) <> @Desired_DB_owner THEN 'alter authorization on database::[' + d.name + '] to [' + SUSER_SNAME (d.owner_sid) + ']' ELSE NULL END 10 | , * 11 | from sys.databases d 12 | where SUSER_SNAME (d.owner_sid) <> @Desired_DB_owner 13 | 14 | 15 | -------------------------------------------------------------------------------- /dbatools.Copy-DbaLogin.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | 3 | - MIGRATION OF LOGINS UTILIZING DBATOOLS.IO 4 | - https://docs.dbatools.io/#Copy-DbaLogin 5 | - This is a PS script to migrate logins including: 6 | 1. SIDs 7 | 2. Passwords 8 | 3. Defaultdb 9 | 4. Server roles & securables 10 | 5. Database permissions & securables 11 | 6. Login attributes 12 | - Permissions to run the script: 13 | - Requires PowerShell Remoting enabled on host 14 | - Local admin should be sufficient 15 | - Requires being a sysadmin on both Source and Destination SQL Servers 16 | 17 | NOTE: Preferably run after databases are migrated; No backwards compatability; DO NOT run the whole script at once, follow the steps. 18 | 19 | #> 20 | 21 | 22 | #1. Installs DBAtools.io gallery 23 | Install-Module dbatools 24 | 25 | 26 | #2. Change SQLSourceInstance to Source SQL Server name that logins will be migrated from and SQLDestinationInstance to Destination SQL Server name that logins will be migrated to. 27 | $Source = SQLSourceInstance 28 | $Destination = SQLDestinationInstance 29 | 30 | 31 | #3. Copies Source logins to Destination. 32 | <# 33 | See the syntax below if applicable. 34 | 1. -Force = If login found on Destination that matches Source, drops and recreate. If active connections are found (also being an owner of a job), copy of the login will fail. 35 | 2. -KillActiveConnection = If any active connections are found for the login, it will be killed. 36 | 3. -SyncOnly = Syncs SQL Server login permissions, roles, etc. 37 | 4. -ExcludeSystemLogins = excludes system logins 38 | 39 | An Example with the syntax is: 40 | Copy-DbaLogin -Source SQLA -Destination SQLB -ExcludeSystemLogins -Force -KillActiveConnection 41 | #> 42 | Copy-DbaLogin -Source $Source -Destination $Target -ExcludeSystemLogins -------------------------------------------------------------------------------- /defrag columnstore.sql: -------------------------------------------------------------------------------- 1 | --This script only works for SQL 2016+, when nonclustered columnstore indexes are writeable 2 | --https://docs.microsoft.com/en-us/sql/relational-databases/indexes/columnstore-indexes-defragmentation 3 | 4 | SELECT 5 | TableName = SCHEMA_NAME (o.schema_id) + '.' + o.name 6 | , IndexName = i.name 7 | , RowGroup_count = count(gps.row_group_id) 8 | , RowGroup_State = gps.state_desc 9 | , gps.partition_number 10 | , gps.Number_of_partitions 11 | , RowGroup_rows = sum(gps.total_rows) 12 | , Size_GB = SUM(gps.size_in_bytes/1024./1024.) 13 | , Rebuild_TSQL_if_needed = CASE WHEN state_desc <> 'COMPRESSED' and sum(total_rows)>0 THEN 14 | 'ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE '+CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' WITH (COMPRESS_ALL_ROW_GROUPS = ON); 15 | ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE ' +CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' ; 16 | ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE ' +CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' ; 17 | --Consolidate the Open rowgroups with COMPRESS_ALL_ROW_GROUPS, 18 | --then again to compress the COMPRESSED rowgroups, 19 | --then a third time to remove the TOMBSTONE rowgroups' 20 | ELSE '' END 21 | , Rebuild_TSQL = 22 | 'ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE '+CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' WITH (COMPRESS_ALL_ROW_GROUPS = ON); 23 | ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE '+CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' ; 24 | ALTER INDEX '+i.name+' ON '+object_name(gps.object_id)+' REORGANIZE '+CASE WHEN gps.Number_of_partitions > 1 THEN 'PARTITION = '+ cast(gps.partition_number as varchar(10)) ELSE '' END +' ; 25 | --Reorganize to consolidate the Open rowgroups with COMPRESS_ALL_ROW_GROUPS, 26 | --then Reorganize again to compress the COMPRESSED rowgroups, 27 | --then Reorganize potentially a third time to remove any remaining TOMBSTONE rowgroups' 28 | FROM (SELECT *, Number_of_partitions = MAX(partition_number) OVER (PARTITION BY index_id) 29 | FROM sys.dm_db_column_store_row_group_physical_stats) as gps 30 | INNER JOIN sys.indexes i on gps.object_id = i.object_id and gps.index_id = i.index_id 31 | INNER JOIN sys.objects o on i.object_id = o.object_id 32 | --WHERE object_name(gps.object_id)= 'DimShipTo' 33 | GROUP BY o.name, gps.state_desc, gps.object_id, o.schema_id, i.name, gps.size_in_bytes, gps.partition_number, gps.Number_of_partitions 34 | ORDER BY o.name, i.name; 35 | GO 36 | -------------------------------------------------------------------------------- /dependencies.sql: -------------------------------------------------------------------------------- 1 | 2 | USE [AdventureWorks2012] 3 | GO 4 | --dependencies by text 5 | select s.name +'.' + o.name, o.type_desc, m.definition, LEN(m.definition) 6 | from sys.sql_modules m 7 | inner join sys.objects o on m.object_id = o.object_id 8 | inner join sys.schemas s on s.schema_id = o.schema_id 9 | where definition like '%JobCode%' 10 | order by o.name 11 | 12 | 13 | --dependencies by dependency 14 | select 15 | ReferencingObjectName = rs.name + '.' + ro.name 16 | , ReferencingObjectType = ro.type_desc 17 | , ReferencedObjectName = s.name + '.' + o.name 18 | , ReferencedObjectType = o.type_desc 19 | 20 | from sys.sql_expression_dependencies d 21 | inner join sys.objects o on d.referenced_id = o.object_id or d.referenced_minor_id = o.object_id 22 | inner join sys.schemas s on o.schema_id = s.schema_id 23 | inner join sys.objects ro on d.referencing_id = ro.object_id 24 | inner join sys.schemas rs on ro.schema_id = rs.schema_id 25 | where 26 | -- ro.is_ms_shipped = 0 27 | --and o.is_ms_shipped = 0 28 | --and op.type_desc = 'SQL_STORED_PROCEDURE' 29 | --and s.name = 'Loading' 30 | o.name like '%JobCode%' 31 | group by rs.name , ro.name, s.name , o.name , ro.type_desc, o.type_desc 32 | order by ro.name, o.name 33 | 34 | 35 | -------------------------------------------------------------------------------- /deprecated usage counter.sql: -------------------------------------------------------------------------------- 1 | 2 | --Look up deprecated counts, SQL 2008 and above 3 | --http://msdn.microsoft.com/en-us/library/bb510662.aspx 4 | -- 5 | SELECT object_name, instance_name, cntr_value 6 | 7 | FROM sys.dm_os_performance_counters 8 | 9 | WHERE object_name like '%Deprecated Features%' 10 | -------------------------------------------------------------------------------- /dm_exec_query_memory_grants.sql: -------------------------------------------------------------------------------- 1 | 2 | --memory grant waits in progress 3 | 4 | select 5 | mg.session_id 6 | , mg.group_id 7 | , mg.request_time 8 | , mg.grant_time 9 | , requested_memory_gb = mg.requested_memory_kb/1024./1024. 10 | , granted_memory_gb = mg.granted_memory_kb/1024./1024. 11 | , mg.required_memory_kb 12 | , used_memory_gb = mg.used_memory_kb/1024./1024. 13 | , mg.max_used_memory_kb 14 | , ideal_memory_gb = mg.ideal_memory_kb/1024./1024. 15 | , mg.query_cost 16 | , r.granted_query_memory 17 | , r.status 18 | , [db_name] = db_name(r.database_id) 19 | , r.wait_time 20 | , r.wait_type 21 | , r.cpu_time 22 | , r.total_elapsed_time 23 | , r.reads 24 | , r.writes 25 | , r.logical_reads 26 | , est.objectid 27 | , est.text 28 | , offsettext = CASE WHEN r.statement_start_offset = 0 and r.statement_end_offset= 0 THEN NULL 29 | ELSE 30 | SUBSTRING (est.[text], r.statement_start_offset/2 + 1, 31 | CASE WHEN r.statement_end_offset = -1 THEN LEN (CONVERT(nvarchar(max), est.[text])) 32 | ELSE r.statement_end_offset/2 - r.statement_start_offset/2 + 1 33 | END) 34 | END 35 | from sys.dm_exec_query_memory_grants mg 36 | inner join sys.dm_exec_requests r on mg.session_id = r.session_id 37 | outer apply sys.dm_exec_sql_text (r.sql_handle) est 38 | order by query_cost desc 39 | GO -------------------------------------------------------------------------------- /dm_exec_session_wait_stats.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/dm_exec_session_wait_stats.sql -------------------------------------------------------------------------------- /dm_os_performance_counters.sql: -------------------------------------------------------------------------------- 1 | --select distinct object_name, counter_name from sys.dm_os_performance_counters 2 | 3 | --Buffer Cache Hit Ratio, as an example 4 | SELECT [BufferCacheHitRatio] = (bchr * 1.0 / bchrb) * 100.0 5 | FROM 6 | (SELECT bchr = cntr_value FROM 7 | sys.dm_os_performance_counters 8 | WHERE counter_name = 'Buffer cache hit ratio' 9 | AND object_name like '%Buffer Manager%') AS r 10 | CROSS APPLY 11 | (SELECT bchrb= cntr_value FROM 12 | sys.dm_os_performance_counters 13 | WHERE counter_name = 'Buffer cache hit ratio base' 14 | and object_name like '%Buffer Manager%') AS rb 15 | 16 | --Target, Total memory 17 | select counter_name, [cntr_value (MB)] = cntr_value/1024. 18 | from sys.dm_os_performance_counters 19 | where OBJECT_NAME like '%Memory Manager%' 20 | and counter_name in ('Target Server Memory (KB)','Total Server Memory (KB)') 21 | 22 | --Page Lookups/Batch Requests Ratio 23 | SELECT 24 | [Page Lookups/s] = a.cntr_value 25 | , [Batch Requests/s] = b.cntr_value 26 | , [Ratio (ideally <100)] = (a.cntr_value * 1. / b.cntr_value) --should be <100 27 | FROM ( 28 | select * FROM sys.dm_os_performance_counters 29 | where OBJECT_NAME like '%Buffer Manager%' 30 | and counter_name = 'Page lookups/sec') a 31 | CROSS APPLY 32 | (select * FROM sys.dm_os_performance_counters 33 | where OBJECT_NAME like '%SQL Statistics%' 34 | and counter_name = 'Batch Requests/sec') b 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /dm_os_waiting_tasks.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/dm_os_waiting_tasks.sql -------------------------------------------------------------------------------- /endpoint owners.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT SUSER_NAME(principal_id) as endpoint_owner, * 3 | from sys.endpoints 4 | WHERE SUSER_NAME(principal_id) <> 'sa' 5 | AND SUSER_NAME(principal_id) <> 'whatever\sqlservices' 6 | 7 | --alter authorization on endpoint::[mirroring] to [whatver\sqlservices] -------------------------------------------------------------------------------- /failover cluster force quorum.ps1: -------------------------------------------------------------------------------- 1 | #Forced failover 2 | 3 | import-module failoverclusters 4 | Stop-ClusterNode -Name "servername" #Intended server to failover to 5 | Start-ClusterNode -Name "servername" -FixQuorum 6 | 7 | (Get-ClusterNode -Name "servername").NodeWeight=1 8 | 9 | $Nodes = Get-ClusterNode -Cluster "servername" 10 | $Nodes 11 | -------------------------------------------------------------------------------- /filtered index opportunities.sql: -------------------------------------------------------------------------------- 1 | 2 | select 3 | [Database Name] = db_name() 4 | , [Table Name] = s.name + '.' + o.name 5 | , [Column Name] = c.name 6 | , [Total_rows] = sum(ps.row_count) 7 | --Review the distribution of the data in the table 8 | , [TSQL_Testing_Row_Distribution] = 'select [' + c.name + '], count(1) from ['+ s.name + '].[' + o.name+'] group by [' + c.name + ']' 9 | from 10 | sys.objects o 11 | inner join 12 | sys.schemas s 13 | on o.schema_id = s.schema_id 14 | inner join 15 | sys.dm_db_partition_stats ps 16 | on ps.object_id = o.object_id 17 | and index_id <= 1 --heap or cluster index, ignore NC indexes 18 | 19 | left outer join 20 | sys.columns c on c.object_id = o.object_id 21 | left outer join 22 | sys.types t on c.user_type_id = t.user_type_id 23 | WHERE 24 | o.name <> 'dtproperties' 25 | and is_ms_shipped = 0 26 | and o.type = 'u' 27 | and ( c.name like 'is%' 28 | or c.name like '%active%' 29 | or c.name like '%ignore%' 30 | or c.name like 'has%' 31 | or c.name like '%current%' 32 | or c.name like '%archived%' 33 | or c.name like '%flag%' 34 | or c.name like '%bit%' 35 | or t.name = 'bit' 36 | --Add any more known naming conventions here 37 | ) 38 | group by c.name, s.name, o.name 39 | having sum(ps.row_count) > 100000 40 | order by [Total_rows] desc 41 | go 42 | 43 | --Existing filtered indexes 44 | SELECT 45 | [Database Name] = db_name() 46 | , [Table Name] = s.name + '.' + o.name 47 | , [Index Name] = i.name 48 | from 49 | sys.objects o 50 | inner join 51 | sys.schemas s 52 | on o.schema_id = s.schema_id 53 | inner join 54 | sys.indexes i 55 | on i.object_id = o.object_id 56 | inner join 57 | sys.dm_db_partition_stats ps 58 | on ps.object_id = o.object_id and ps.index_id = i.index_id 59 | WHERE 60 | i.has_filter = 1 61 | ORDER BY 62 | s.name, o.name, i.name 63 | 64 | 65 | /* 66 | --Potential Filtered index opportunities 67 | select iscurrentphase, count(1) from RepairOrderRepairPhases group by IsCurrentPhase 68 | select isEnabled, count(1) from dbo.Users group by IsEnabled 69 | select [ISPUBLIC], count(1) from [dbo].[TNOTE] group by [ISPUBLIC] 70 | 71 | --Potential filtered index 72 | CREATE INDEX IDX_NC_F_Testing on dbo.testtable (Whatever1, whatever2) INCLUDE (whatever3) 73 | WHERE IsActive = 1; 74 | 75 | */ -------------------------------------------------------------------------------- /find memory mini dumps.sql: -------------------------------------------------------------------------------- 1 | -- Get information on location, time and size of any memory dumps from SQL Server 2 | -- Only SQL 2008R2+ 3 | SELECT [filename], creation_time, size_in_bytes/1048576.0 AS [Size (MB)] 4 | FROM sys.dm_server_memory_dumps 5 | ORDER BY creation_time DESC OPTION (RECOMPILE); -------------------------------------------------------------------------------- /fix orphaned sid.sql: -------------------------------------------------------------------------------- 1 | --Only works for SQL 2005 SP2 or later! 2 | /** GENERATE TSQL TO FIX ORPHANS **/ 3 | Select 4 | DBUser_Name = dp.name 5 | , DBUser_SID = dp.sid 6 | , Login_Name = sp.name 7 | , Login_SID = sp.sid 8 | , SQLtext = 'ALTER USER [' + dp.name + '] WITH LOGIN = [' + ISNULL(sp.name, '???') + ']' 9 | from sys.database_principals dp 10 | left outer join sys.server_principals sp 11 | on dp.name = sp.name 12 | where 13 | dp.is_fixed_role = 0 14 | and sp.sid <> dp.sid 15 | and dp.principal_id > 1 16 | and dp.sid is not null 17 | and dp.sid <> 0x0 18 | order by dp.name 19 | go 20 | 21 | 22 | 23 | 24 | /* multi-database 25 | 26 | exec sp_MSforeachdb N'use [?]; 27 | Select [?]= ''use [?];'', ''ALTER USER ['' + dp.name + ''] WITH LOGIN = ['' + dp.name + '']'', * 28 | from sys.database_principals dp 29 | inner join sys.server_principals sp 30 | on dp.name COLLATE SQL_Latin1_General_CP1_CI_AS = sp.name COLLATE SQL_Latin1_General_CP1_CI_AS 31 | where 32 | dp.is_fixed_role = 0 33 | and (dp.sid is not null and dp.sid <> 0x0) 34 | and sp.sid <> dp.sid 35 | and dp.principal_id > 1 36 | order by dp.name 37 | ' 38 | */ 39 | 40 | 41 | /***** OLD ********/ 42 | /* 43 | select * from sysusers 44 | where issqluser = 1 and (sid is not null and sid <> 0x0) and suser_sname(sid) is null 45 | order by name 46 | 47 | --Only works for SQL 2005 SP2 or later! 48 | GO 49 | DECLARE @SQL varchar(100) 50 | DECLARE curSQL CURSOR FOR 51 | Select 'ALTER USER [' + name + '] WITH LOGIN = [' + name + ']' 52 | from sysusers 53 | where issqluser = 1 and (sid is not null and sid <> 0x0) and suser_sname(sid) is null 54 | order by name 55 | OPEN curSQL 56 | FETCH curSQL into @SQL 57 | WHILE @@FETCH_STATUS = 0 58 | BEGIN 59 | print @SQL 60 | EXEC (@SQL) 61 | FETCH curSQL into @SQL 62 | END 63 | CLOSE curSQL 64 | DEALLOCATE curSQL 65 | 66 | GO 67 | */ 68 | 69 | 70 | 71 | /* 72 | 73 | --Lab: Create orphaned SID 74 | 75 | USE [master] 76 | GO 77 | CREATE LOGIN [test] WITH PASSWORD=N'test', DEFAULT_DATABASE=[master], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF 78 | GO 79 | 80 | USE w 81 | GO 82 | CREATE USER test 83 | GO 84 | 85 | 86 | test 0x1E1EEF7790E11745B42B9A33083DFF55 NULL NULL ALTER USER [test] WITH LOGIN = [test] 87 | 88 | ALTER USER [test] WITH LOGIN = [test] 89 | 90 | */ 91 | -------------------------------------------------------------------------------- /fk untrusted or disabled check.sql: -------------------------------------------------------------------------------- 1 | --Check for untrusted or disabled FK's 2 | --Could be a silent performance drag if FK's exist but aren't trusted. 3 | --See also: "lab - fk untrusted or disabled check.ipnynb" or "lab - fk untrusted or disabled check.sql" 4 | --Careful! Trusting or enabling a FK could cause application errors if invalid child table entries are expected 5 | 6 | SELECT 7 | Table_Name = s.name + '.' +o.name 8 | , FK_Name = fk.name 9 | , fk.is_not_trusted 10 | , fk.is_disabled 11 | , 'ALTER TABLE [' + s.name + '].[' +o.name +'] 12 | WITH CHECK 13 | CHECK CONSTRAINT ['+fk.name+'];' --trusts and enables the FK 14 | FROM sys.foreign_keys as fk 15 | INNER JOIN sys.objects as o ON fk.parent_object_id = o.object_id 16 | INNER JOIN sys.schemas as s ON o.schema_id = s.schema_id 17 | where fk.is_not_trusted = 1 18 | or fk.is_disabled = 1 19 | 20 | /* 21 | 22 | --Check all databases: 23 | --Some not trusted FK's are common in the SSRS ReportServer and ReportServerTempDB databases 24 | exec sp_msforeachdb 'use [?]; 25 | SELECT 26 | DB_Name = ''?'' 27 | , Table_Name = s.name + ''.'' +o.name 28 | , FK_Name = fk.name 29 | , fk.is_not_trusted 30 | , fk.is_disabled 31 | , ''ALTER TABLE ['' + s.name + ''].['' +o.name +''] 32 | WITH CHECK 33 | CHECK CONSTRAINT [''+fk.name+''];'' --trusts and enables the FK 34 | FROM sys.foreign_keys as fk 35 | INNER JOIN sys.objects as o ON fk.parent_object_id = o.object_id 36 | INNER JOIN sys.schemas as s ON o.schema_id = s.schema_id 37 | where fk.is_not_trusted = 1 38 | or fk.is_disabled = 1 '; 39 | 40 | */ 41 | 42 | /* --Sample: 43 | 44 | ALTER TABLE [dbo].[table2] 45 | WITH CHECK 46 | CHECK CONSTRAINT [FK_table2_table1]; 47 | 48 | */ -------------------------------------------------------------------------------- /fulltext index demo.sql: -------------------------------------------------------------------------------- 1 | drop table if exists dbo.fttest 2 | go 3 | create table dbo.fttest 4 | (id int identity(1,1) not null constraint pk_fftest primary key 5 | , text1 varchar(2000) 6 | , dateinserted datetimeoffset(2) not null constraint df_fttest_dateinserted default (sysdatetimeoffset()) 7 | ) 8 | insert into dbo.fttest (Text1) values ( REPLICATE (CHAR((rand()*64)+64), FLOOR(RAND()*2000))) 9 | go 10 | insert into dbo.fttest (Text1) 11 | select ( REPLICATE (CHAR((rand()*64)+64), FLOOR(RAND()*2000))) from fttest 12 | go 14 13 | 14 | select count(1) from fttest 15 | GO 16 | 17 | IF EXISTS ( SELECT * FROM sys.fulltext_catalogs WHERE name = N'ft_cat') 18 | DROP FULLTEXT CATALOG ft_cat 19 | GO 20 | 21 | CREATE FULLTEXT CATALOG ft_cat 22 | GO 23 | CREATE FULLTEXT INDEX ON dbo.fttest (text1) 24 | KEY INDEX pk_fftest 25 | ON ft_cat 26 | WITH (CHANGE_TRACKING = AUTO, STOPLIST = SYSTEM) 27 | GO 28 | 29 | 30 | --use fulltext index status.sql to observe. Wait for it to get caught up. 31 | 32 | INSERT INTO dbo.fttest (Text1) 33 | SELECT ( REPLICATE (CHAR((rand()*64)+64), FLOOR(RAND()*2000))) from fttest 34 | GO --insert a ton of rows and get the fulltext catalog "behind" 35 | DELETE FROM fttest where text1 = 'whatever'; 36 | GO 37 | INSERT INTO dbo.fttest (Text1) OUTPUT inserted.dateinserted select 'whatever' --insert needle in haystack 38 | GO 39 | SELECT sysdatetimeoffset(), * from dbo.fttest t where text1 = 'whatever' 40 | SELECT sysdatetimeoffset(), * from dbo.fttest t where CONTAINS (Text1, '"whatever"'); 41 | GO 42 | WHILE NOT EXISTS (select text1 from dbo.fttest t where CONTAINS (Text1, '"whatever"')) 43 | BEGIN 44 | WAITFOR DELAY '00:00:01' --1s 45 | print 'waiting 1s'; 46 | IF EXISTS (select text1 from dbo.fttest t where CONTAINS (Text1, '"whatever"')) 47 | BEGIN 48 | --Wait for haystack to show up in the FT index. Might be a while!! 49 | select Found = sysdatetimeoffset(), * from dbo.fttest t where CONTAINS (Text1, '"whatever"'); 50 | BREAK; 51 | END 52 | ELSE 53 | CONTINUE; 54 | END 55 | GO -------------------------------------------------------------------------------- /fulltext index status.sql: -------------------------------------------------------------------------------- 1 | --Identify if fulltext catalog feature is installed 2 | --Skip this step in Azure SQL DB 3 | IF (SELECT FullText_Indexing_Is_Installed = fulltextserviceproperty('IsFullTextInstalled')) <> 1 4 | BEGIN 5 | THROW 51000, 'Full text indexing is not installed.',0; 6 | END; 7 | GO 8 | 9 | --Identify databases with a fulltext catalog present 10 | --Skip this step in Azure SQL DB 11 | EXEC sp_MSforeachdb 'use[?]; select Database_name = DB_Name(), fc.name from sys.fulltext_catalogs fc' 12 | GO 13 | 14 | SELECT 15 | Fulltext_Catalog = c.name 16 | , Table_Name = o.name 17 | , Key_Index = i.name 18 | , Catalog_Populate_Status = FULLTEXTCATALOGPROPERTY(c.name,'PopulateStatus') 19 | , Catalog_Populate_Status_Desc = 20 | (SELECT CASE FULLTEXTCATALOGPROPERTY(c.name,'PopulateStatus') 21 | WHEN 0 THEN 'Idle' --caught up and keeping up 22 | WHEN 1 THEN 'Full Population In Progress' --initial status upon creation 23 | WHEN 2 THEN 'Paused'--PROBLEM? 24 | WHEN 3 THEN 'Throttled' 25 | WHEN 4 THEN 'Recovering'--PROBLEM 26 | WHEN 5 THEN 'Shutdown'--PROBLEM 27 | WHEN 6 THEN 'Incremental Population In Progress' 28 | WHEN 7 THEN 'Building Index' 29 | WHEN 8 THEN 'Disk Full. Paused.' --PROBLEM 30 | WHEN 9 THEN 'Change Tracking' --expected when it is catching up and not up to date yet 31 | END) --https://docs.microsoft.com/en-us/sql/t-sql/functions/fulltextcatalogproperty-transact-sql 32 | , LastCrawlStart = fi.crawl_start_date 33 | , LastCrawlEnd = fi.crawl_end_date --null when currently crawling 34 | , fi.is_enabled 35 | , c.is_default 36 | , fi.crawl_type_desc 37 | , fi.change_tracking_state_desc 38 | , fi.has_crawl_completed 39 | , c.is_importing -- Indicates whether the full-text catalog is being imported: 1 = The catalog is being imported. 2 = The catalog is not being imported. 40 | 41 | FROM sys.fulltext_catalogs c 42 | LEFT OUTER JOIN sys.fulltext_indexes fi ON fi.fulltext_catalog_id = c.fulltext_catalog_id 43 | LEFT OUTER JOIN sys.objects o ON o.[object_id] = fi.[object_id] 44 | LEFT OUTER JOIN sys.indexes i ON fi.unique_index_id = i.index_id AND fi.[object_id] = i.[object_id] 45 | /* 46 | WHERE (fi.crawl_end_date is null --is currently crawling 47 | or fi.crawl_end_date < dateadd(day, -1, getdate())) --look for any ft index that hasn't updated recently 48 | */ 49 | ORDER BY c.name, o.name, i.name, fi.crawl_start_date; -------------------------------------------------------------------------------- /gather log events - remoting.ps1: -------------------------------------------------------------------------------- 1 | #Pulls logs from a remote machine via Windows Authentication via PowerShell remoting. 2 | #If needed for the local instance without PowerShell remoting, consider "gather log events.ps1" 3 | 4 | #Must launch PowerShell as an Administrator to read from the Security log 5 | 6 | #TODO: change the remote server name in TWO places. 7 | 8 | ##Run this block first to enter the remote session 9 | ##Execute this block with F8 not F5 10 | Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope Process 11 | $target = "CA-SQL2017" #TODO CHANGE 12 | Enter-PSSession -ComputerName $target 13 | 14 | #-------------------------------------------------------------------------------------- 15 | ##Because you lose scope when you enter the remote session, this must be executed separately. The entire remoting can't be in a single script execution. 16 | #-------------------------------------------------------------------------------------- 17 | 18 | ##Execute this block with F8 not F5 19 | ##This one takes a while 20 | $target = "CA-SQL2017" #TODO CHANGE 21 | $loglocalfile = "C:\temp\"+$target+" log export.csv" 22 | $NumDays = -90 23 | $EventLog_Application = Get-EventLog -LogName "Application" -After (Get-Date).AddDays($NumDays) | 24 | ? { $_.entryType -Match "Error" -and "Critical" -and "Warning" } | Group-Object -Property EventID | 25 | ForEach-Object { $_.Group[0] | Add-Member -PassThru -NotePropertyName Count -NotePropertyValue $_.Count | Add-Member -PassThru -NotePropertyName LogSource -NotePropertyValue "Application" } | 26 | Sort-Object Count -Descending -Unique | 27 | Select-Object LogSource, Count, @{name="Latest";expression={$_.TimeGenerated}}, EventID, Source, Message ; 28 | $EventLog_System = Get-EventLog -LogName "System" -After (Get-Date).AddDays($NumDays) | 29 | ? { $_.entryType -Match "Error" -and "Critical" -and "Warning" } | Group-Object -Property EventID | 30 | ForEach-Object { $_.Group[0] | Add-Member -PassThru -NotePropertyName Count -NotePropertyValue $_.Count | Add-Member -PassThru -NotePropertyName LogSource -NotePropertyValue "System" } | 31 | Sort-Object Count -Descending -Unique | 32 | Select-Object LogSource, Count, @{name="Latest";expression={$_.TimeGenerated}}, EventID, Source, Message ; 33 | $EventLog_Security = Get-EventLog -LogName "Security" -After (Get-Date).AddDays($NumDays) | 34 | ? { $_.entryType -Match "Error" -and "Critical" } | Group-Object -Property EventID | 35 | ForEach-Object { $_.Group[0] | Add-Member -PassThru -NotePropertyName Count -NotePropertyValue $_.Count | Add-Member -PassThru -NotePropertyName LogSource -NotePropertyValue "Security" } | 36 | Sort-Object Count -Descending -Unique | 37 | Select-Object LogSource, Count, @{name="Latest";expression={$_.TimeGenerated}}, EventID, Source, Message ; 38 | @( $EventLog_System; $EventLog_Application; $EventLog_Security) | Export-Csv -Path $loglocalfile -Encoding ascii -NoTypeInformation; 39 | Exit-PSSession 40 | 41 | #-------------------------------------------------------------------------------------- 42 | ##Run this block outside of the remote session 43 | #-------------------------------------------------------------------------------------- 44 | 45 | ##Execute this block with F8 not F5 46 | $timestamp = (Get-Date).ToString('yyyyMMddTHHmmss') 47 | $logtargetfile = "\\"+$target+"\C$\temp\"+$target+" log export.csv" 48 | $loglocalfile = "C:\temp\"+$target+" log export "+$timestamp+".csv" 49 | copy-item $logtargetfile $loglocalfile 50 | -------------------------------------------------------------------------------- /generate error tables.sql: -------------------------------------------------------------------------------- 1 | SET NOCOUNT ON 2 | 3 | DECLARE BuildErrorTables CURSOR FAST_FORWARD 4 | FOR 5 | select 6 | o.object_id 7 | from sys.objects o 8 | inner join sys.schemas s 9 | on o.schema_id = s.schema_id 10 | where s.name = 'WH' 11 | and type_desc = 'USER_TABLE' 12 | and o.name <> 'DimDate' 13 | 14 | OPEN BuildErrorTables 15 | 16 | declare @object_id int 17 | 18 | FETCH NEXT FROM BuildErrorTables 19 | INTO @object_id 20 | 21 | DECLARE @BuildErrorTable TABLE 22 | (tsql varchar(500) null) 23 | 24 | WHILE @@FETCH_STATUS = 0 25 | BEGIN 26 | 27 | insert into @BuildErrorTable (tsql) 28 | select 'IF EXISTS (SELECT * FROM sys.objects o inner join sys.schemas s on o.schema_id = s.schema_id where o.name = '''+ object_name(@object_id) + ''' and s.name = ''ERROR'')' 29 | 30 | insert into @BuildErrorTable (tsql) 31 | select 'DROP table ERROR.[' + convert(sysname, object_name(@object_id)) + ']' 32 | 33 | insert into @BuildErrorTable (tsql) 34 | select 'GO' 35 | 36 | insert into @BuildErrorTable (tsql) 37 | select 'create table ERROR.[' + convert(sysname, object_name(@object_id)) + '] (' 38 | 39 | insert into @BuildErrorTable (tsql) 40 | select ' ID bigint not null IDENTITY(1,1) PRIMARY KEY ' 41 | 42 | insert into @BuildErrorTable (tsql) 43 | select 44 | column_name = ', ' + c.name + ' ' + case when t.name in ('binary', 'sysname', 'smallint', 'int', 'bigint', 'decimal', 'float', 'real', 'date', 'time', 'datetime', 'datetime2', 'datetimeoffset' 45 | , 'timestamp', 'numeric', 'money', 'smallmoney') 46 | THEN 'varchar (100)' 47 | when t.name in ('bit', 'tinyint') THEN 'varchar (10)' 48 | when t.name in ('char','varchar') and c.max_length <= 4000 THEN 'varchar (' + cast(c.max_length * 2 as varchar(5)) + ')' 49 | when t.name in ('char','varchar') and c.max_length > 4000 THEN 'varchar (8000)' 50 | when t.name in ('nchar','nvarchar') and c.max_length <= 2000 THEN 'nvarchar (' + cast(c.max_length * 2 as varchar(5)) + ')' 51 | when t.name in ('nchar','nvarchar') and c.max_length > 2000 THEN 'nvarchar (4000)' 52 | when t.name in ('text') THEN 'varchar (8000)' 53 | when t.name in ('ntext') THEN 'nvarchar (4000)' 54 | else 'varchar(8000)' 55 | END + ' NULL' 56 | from 57 | sys.objects o 58 | inner join sys.schemas s 59 | on o.schema_id = s.schema_id 60 | inner join sys.columns c 61 | on c.object_id = o.object_id 62 | inner join sys.types t 63 | on c.user_type_id = t.user_type_id 64 | where 65 | o.object_id = @object_id 66 | 67 | insert into @BuildErrorTable (tsql) 68 | select ', ErrorDate datetime2(0) not null constraint DF_' + replace(convert(sysname, object_name(@object_id)), ' ','_') + '_ErrorDate DEFAULT (getdate())' 69 | insert into @BuildErrorTable (tsql) 70 | select ', ErrorText varchar(100) null' 71 | insert into @BuildErrorTable (tsql) 72 | select ', ErrorCode varchar(100) null' 73 | insert into @BuildErrorTable (tsql) 74 | select ', AuditID int null' 75 | 76 | insert into @BuildErrorTable (tsql) 77 | select ');' 78 | 79 | insert into @BuildErrorTable (tsql) 80 | select 'go' 81 | 82 | FETCH NEXT FROM BuildErrorTables 83 | INTO @object_id 84 | 85 | END 86 | 87 | 88 | select * from @BuildErrorTable -------------------------------------------------------------------------------- /get disk block allocation size.ps1: -------------------------------------------------------------------------------- 1 | ##Get Block size per 2 | ##Look for Bytes Per Cluster. By default 4096, should be 65536 for SQL data, logs, and tempdb volumes. 3 | Fsutil fsinfo ntfsinfo d: 4 | Fsutil fsinfo ntfsinfo e: 5 | ##Run once per volume, etc. 6 | 7 | ##Get Starting Offset 8 | ##wmic partition get BlockSize, StartingOffset, Name, Index 9 | 10 | ## May be inaccurate: 11 | ##$WMIQuery = "SELECT Label, Blocksize, Name FROM Win32_Volume WHERE FileSystem='NTFS'" 12 | ##Get-WmiObject -Query $WMIQuery | Select-Object Label, @{Name="Blocksize_KB";Expression={$_.Blocksize}} , Name 13 | -------------------------------------------------------------------------------- /guest permissions.sql: -------------------------------------------------------------------------------- 1 | --Check all DB's for Guest permissions. 2 | --Guest user should NOT be disabled on system databases https://support.microsoft.com/en-us/kb/2539091 3 | 4 | exec sp_MSforeachdb ' 5 | SELECT Db_name = ''?'' 6 | , prins.name AS grantee_name 7 | , perms.permission_name 8 | , state_desc 9 | , Revoke_TSQL = CASE WHEN state_desc = ''GRANT'' and db_ID(''?'') > 4 THEN ''use [?]; REVOKE CONNECT TO GUEST;'' END 10 | , * 11 | FROM [?].sys.database_principals AS prins 12 | INNER JOIN [?].sys.database_permissions AS perms 13 | ON perms.grantee_principal_id = prins.principal_id 14 | WHERE prins.name = ''guest'' 15 | AND state_desc = ''GRANT'' 16 | 17 | ' 18 | -------------------------------------------------------------------------------- /hypothetical cleanup.sql: -------------------------------------------------------------------------------- 1 | --Finds objects to drop that the Database Tuning Advisor (DTA) leaves behind when it inevitably crashes. 2 | 3 | SELECT 'drop index [' + i.name+ '] on [' + schema_name(o.schema_id) + '].[' + object_name(i.[object_id]) + ']' 4 | FROM sys.indexes i 5 | INNER JOIN sys.objects o 6 | ON i.object_id = o.object_id 7 | WHERE 1=1 8 | and o.is_ms_shipped = 0 9 | and o.type = 'u' 10 | and i.name is not null 11 | and i.is_hypothetical = 1 12 | 13 | select 'drop statistics [' + schema_name(o.schema_id) + '].[' + object_name(i.[object_id]) + '].['+ i.[name] + ']' 14 | FROM sys.stats i 15 | inner join sys.objects o 16 | on i.object_id = o.object_id 17 | WHERE 1=1 18 | and o.is_ms_shipped = 0 19 | and o.type = 'u' 20 | and i.[name] LIKE '_dta%' 21 | 22 | -------------------------------------------------------------------------------- /index ALLOW_PAGE_LOCKS.sql: -------------------------------------------------------------------------------- 1 | select 'alter INDEX [' + i.name + '] ON [' + s.name + '].[' + o.name + '] SET (ALLOW_PAGE_LOCKS = ON) --this is default' 2 | , * from sys.indexes i 3 | inner join sys.objects o on i.object_id = o.object_id 4 | inner join sys.schemas s on s.schema_id = o.schema_id 5 | where allow_page_locks = 0 6 | and o.is_ms_shipped = 0 7 | 8 | 9 | -------------------------------------------------------------------------------- /index_usage_stats.sql: -------------------------------------------------------------------------------- 1 | --Discover indexes that aren't helping reads but still hurting writes 2 | --Does not show tables that have never been written to 3 | 4 | --Cleared when SQL Server restarts. This DMV returns the service start time for both SQL Server and Azure SQL DB. 5 | SELECT sqlserver_start_time FROM sys.dm_os_sys_info; 6 | GO 7 | 8 | SELECT DatabaseName = d.name 9 | , s.object_id 10 | , TableName = ' [' + sc.name + '].[' + o.name + ']' 11 | , IndexName = i.name 12 | , s.user_seeks 13 | , s.user_scans 14 | , s.user_lookups 15 | , s.user_updates 16 | , ps.row_count 17 | , SizeMb = cast((ps.in_row_reserved_page_count*8.)/1024. as decimal(19,2)) 18 | , s.last_user_lookup 19 | , s.last_user_scan 20 | , s.last_user_seek 21 | , s.last_user_update 22 | , Partition_Schema_Name = psch.[name] 23 | , Partition_Number = pr.partition_number 24 | , [tSQL] = '--caution! DROP INDEX [' + i.name + '] ON [' + sc.name + '].[' + o.name + ']' --caution!! 25 | --select object_name(object_id), * 26 | FROM sys.dm_db_index_usage_stats s 27 | INNER JOIN sys.objects o 28 | ON o.object_id=s.object_id 29 | inner join sys.schemas sc 30 | on sc.schema_id = o.schema_id 31 | INNER JOIN sys.indexes i 32 | ON i.object_id = s.object_id 33 | AND i.index_id = s.index_id 34 | left outer join sys.partitions pr 35 | on pr.object_id = i.object_id 36 | and pr.index_id = i.index_id 37 | left outer join sys.dm_db_partition_stats ps 38 | on ps.object_id = i.object_id 39 | and ps.partition_id = pr.partition_id 40 | left outer join sys.partition_schemes psch 41 | on psch.data_space_id = i.data_space_id 42 | inner join sys.databases d 43 | on s.database_id = d.database_id 44 | and db_name() = d.name 45 | WHERE 1=1 46 | --Strongly recommended filters 47 | and o.is_ms_shipped = 0 48 | and o.type_desc = 'USER_TABLE' 49 | and i.type_desc = 'NONCLUSTERED' 50 | and is_unique = 0 51 | and is_primary_key = 0 52 | and is_unique_constraint = 0 53 | 54 | --Optional filters 55 | --and user_updates / 50. > (user_seeks + user_scans + user_lookups ) --arbitrary 56 | --and o.name in ('ContactBase') 57 | --and o.name not like '%cascade%' 58 | --and (ps.in_row_reserved_page_count) > 1280 --10mb 59 | 60 | order by user_seeks + user_scans + user_lookups asc, s.user_updates desc; --most useless indexes show up first 61 | 62 | GO -------------------------------------------------------------------------------- /indirect checkpoints enable.sql: -------------------------------------------------------------------------------- 1 | --For SQL 2016+ and above, where 60 (indirect checkpoints) is now the default 2 | use master 3 | go 4 | select 'ALTER DATABASE ['+d.name+'] SET TARGET_RECOVERY_TIME = 60 SECONDS WITH NO_WAIT' from sys.databases d where target_recovery_time_in_seconds = 0 5 | 6 | -------------------------------------------------------------------------------- /install failover clustering features.ps1: -------------------------------------------------------------------------------- 1 | Invoke-Command -script {Install-WindowsFeature -Name "Failover-Clustering" } -ComputerName SQLDEV11, SQLDEV12, SQLQA11, SQLQA12 2 | Invoke-Command -script {Install-WindowsFeature -Name "RSAT-Clustering-Mgmt" } -ComputerName SQLDEV11, SQLDEV12, SQLQA11, SQLQA12 3 | Invoke-Command -script {Install-WindowsFeature -Name "RSAT-Clustering-PowerShell" } -ComputerName SQLDEV11, SQLDEV12, SQLQA11, SQLQA12 4 | -------------------------------------------------------------------------------- /instant_file_initialization.sql: -------------------------------------------------------------------------------- 1 | --Check to see if the SQL Server Database Engine service has instant_file_initialization_enabled. 2 | --Works on SQL 2016 SP1, 2012 SP4+ 3 | Use TempDB 4 | 5 | select servicename, instant_file_initialization_enabled, @@VERSION 6 | from sys.dm_server_services 7 | where filename like '%sqlservr.exe%' -------------------------------------------------------------------------------- /job - job failure notifications.sql: -------------------------------------------------------------------------------- 1 | --CREATES A SQL AGENT JOB 2 | 3 | --TODO: Change the operator name sql.alerts@sparkhound.com 4 | 5 | USE [msdb] 6 | GO 7 | BEGIN TRANSACTION 8 | DECLARE @ReturnCode INT 9 | SELECT @ReturnCode = 0 10 | IF NOT EXISTS (SELECT name FROM msdb.dbo.syscategories WHERE name=N'[Uncategorized (Local)]' AND category_class=1) 11 | BEGIN 12 | EXEC @ReturnCode = msdb.dbo.sp_add_category @class=N'JOB', @type=N'LOCAL', @name=N'[Uncategorized (Local)]' 13 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 14 | END 15 | DECLARE @jobId BINARY(16) 16 | EXEC @ReturnCode = msdb.dbo.sp_add_job @job_name=N'Add Failure Notifications', 17 | @enabled=1, 18 | @notify_level_eventlog=2, 19 | @notify_level_email=2, 20 | @notify_level_netsend=0, 21 | @notify_level_page=0, 22 | @delete_level=0, 23 | @description=N'Adds failure notification emails to any jobs that are created', 24 | @category_name=N'[Uncategorized (Local)]', 25 | @owner_login_name=N'sa', 26 | @notify_email_operator_name=N'sql.alerts@sparkhound.com', @job_id = @jobId OUTPUT --TODO: CHANGE THIS OPERATOR NAME 27 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 28 | EXEC @ReturnCode = msdb.dbo.sp_add_jobstep @job_id=@jobId, @step_name=N'tsql', 29 | @step_id=1, 30 | @cmdexec_success_code=0, 31 | @on_success_action=1, 32 | @on_success_step_id=0, 33 | @on_fail_action=2, 34 | @on_fail_step_id=0, 35 | @retry_attempts=0, 36 | @retry_interval=0, 37 | @os_run_priority=0, @subsystem=N'TSQL', 38 | @command=N'USE [msdb] 39 | GO 40 | DECLARE AddFailureNotifications CURSOR FAST_FORWARD 41 | FOR 42 | select convert(nvarchar(4000), '' 43 | EXEC msdb.dbo.sp_update_job @job_id=N''''''+convert(varchar(64), job_id)+'''''', 44 | @notify_level_email=2, 45 | @notify_level_netsend=2, 46 | @notify_level_page=2, 47 | @notify_email_operator_name=N''''sql.alerts@sparkhound.com'''''') --TODO: CHANGE THIS OPERATOR NAME 48 | from msdb.dbo.sysjobs 49 | where notify_email_operator_id = 0 50 | declare @tsql nvarchar(4000) = null 51 | OPEN AddFailureNotifications 52 | FETCH NEXT FROM AddFailureNotifications 53 | INTO @tsql 54 | WHILE @@FETCH_STATUS = 0 55 | BEGIN 56 | EXEC (@TSQL) 57 | FETCH NEXT FROM AddFailureNotifications 58 | INTO @tsql 59 | END', 60 | @database_name=N'msdb', 61 | @flags=4 62 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 63 | EXEC @ReturnCode = msdb.dbo.sp_update_job @job_id = @jobId, @start_step_id = 1 64 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 65 | EXEC @ReturnCode = msdb.dbo.sp_add_jobschedule @job_id=@jobId, @name=N'nightly', 66 | @enabled=1, 67 | @freq_type=4, 68 | @freq_interval=1, 69 | @freq_subday_type=1, 70 | @freq_subday_interval=0, 71 | @freq_relative_interval=0, 72 | @freq_recurrence_factor=0, 73 | @active_start_date=20150216, 74 | @active_end_date=99991231, 75 | @active_start_time=200000, 76 | @active_end_time=235959, 77 | @schedule_uid=N'fd1a2b03-1e0b-487d-ac6f-9eac60fc4f6a' 78 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 79 | EXEC @ReturnCode = msdb.dbo.sp_add_jobserver @job_id = @jobId, @server_name = N'(local)' 80 | IF (@@ERROR <> 0 OR @ReturnCode <> 0) GOTO QuitWithRollback 81 | COMMIT TRANSACTION 82 | GOTO EndSave 83 | QuitWithRollback: 84 | IF (@@TRANCOUNT > 0) ROLLBACK TRANSACTION 85 | EndSave: 86 | GO 87 | */ 88 | -------------------------------------------------------------------------------- /job failure notifications.sql: -------------------------------------------------------------------------------- 1 | --TODO: Change the operator name sql.alerts@sparkhound.com 2 | 3 | declare @Desired_operator varchar(255) = 'sql.alerts@sparkhound.com' --is just an example, change to desired operator listed in msdb.dbo.sysoperators 4 | 5 | --Finds any jobs not sending a failure notification to someone 6 | SELECT 7 | JobName = j.name 8 | , j.description 9 | , j.enabled 10 | , OwnerName = suser_sname(j.owner_sid) 11 | , date_created 12 | , date_modified 13 | , TSQL_Add_Failure_Notification = convert(nvarchar(4000), 'EXEC msdb.dbo.sp_update_job @job_id=N'''+convert(varchar(64), job_id)+''', /*'+j.name+'*/ 14 | @notify_level_email=2, 15 | @notify_email_operator_name=N'''+@Desired_operator+'''') 16 | from msdb.dbo.sysjobs j 17 | where j.notify_email_operator_id = 0 18 | and j.enabled = 1 19 | 20 | -------------------------------------------------------------------------------- /job owners.sql: -------------------------------------------------------------------------------- 1 | use msdb 2 | go 3 | --TODO Change @owner_login_name to desired SQL agent service account to own the job 4 | 5 | declare @Desired_job_owner varchar(255) = 'SPARKHOUND\svcaccount' --'sa' is just an example, change to desired service account, example: domain\accountname 6 | 7 | --sql 2005 and above 8 | select owner = SUSER_SNAME (j.owner_sid), jobname = j.name, j.job_id 9 | , change_tsql = N'EXEC msdb.dbo.sp_update_job @job_id=N'''+convert(nvarchar(100), j.job_id)+N''', @owner_login_name=N'''+@Desired_job_owner+'''' 10 | , revert_tsql = N'EXEC msdb.dbo.sp_update_job @job_id=N'''+convert(nvarchar(100), j.job_id)+N''', @owner_login_name=N'''+SUSER_SNAME (j.owner_sid)+'''' 11 | from sysjobs j 12 | left outer join sys.server_principals sp on j.owner_sid = sp.sid 13 | where 14 | (sp.name not in ('sa','distributor_admin','NT SERVICE\ReportServer') 15 | and sp.name <> @Desired_job_owner 16 | and sp.name not like '##%') 17 | or sp.name is null 18 | 19 | /* 20 | --sql 2000 21 | select sp.name, j.name, j.job_id from msdb.dbo.sysjobs j 22 | left outer join master.dbo.syslogins sp on j.owner_sid = sp.sid 23 | where sp.name not in ('sa','distributor_admin') or sp.name is null 24 | --EXEC msdb.dbo.sp_update_job @job_id=N'8eab379e-958e-4576-92ae-b5999aeec01c', @owner_login_name=N'distributor_admin' 25 | */ 26 | 27 | /* 28 | --Sample usage 29 | 30 | EXEC msdb.dbo.sp_update_job @job_id=N'BDAFAC9B-1705-4E47-9C26-6C4B813CB165', @owner_login_name=N'sa' 31 | EXEC msdb.dbo.sp_update_job @job_id=N'987AF666-A516-4847-8BA3-73DE337CFF94', @owner_login_name=N'sa' 32 | EXEC msdb.dbo.sp_update_job @job_id=N'AEBF4C5C-EC6D-4635-96B6-797BF4AEEC62', @owner_login_name=N'sa' 33 | 34 | */ 35 | 36 | -------------------------------------------------------------------------------- /job status.sql: -------------------------------------------------------------------------------- 1 | --jobs still running 2 | declare @xp_sqlagent_enum_jobs table ( 3 | id int not null IDENTITY(1,1) PRIMARY KEY, 4 | Job_ID uniqueidentifier not null, 5 | Last_Run_Date int not null, 6 | Last_Run_Time int not null, 7 | Next_Run_Date int not null, 8 | Next_Run_Time int not null, 9 | Next_Run_Schedule_ID int not null, 10 | Requested_To_Run int not null, 11 | Request_Source int not null, 12 | Request_Source_ID varchar(100) null, 13 | Running int not null, 14 | Current_Step int not null, 15 | Current_Retry_Attempt int not null, 16 | [State] int not null); 17 | 18 | INSERT INTO @xp_sqlagent_enum_jobs 19 | EXEC master.dbo.xp_sqlagent_enum_jobs 1,''; 20 | 21 | SELECT j.name 22 | , state_desc = CASE ej.state 23 | WHEN 0 THEN 'not idle or suspended' 24 | WHEN 1 THEN 'Executing' 25 | WHEN 2 THEN 'Waiting for thread' 26 | WHEN 3 THEN 'Between retries' 27 | WHEN 4 THEN 'Idle' 28 | WHEN 5 THEN 'Suspended' 29 | WHEN 7 THEN 'Performing completion actions' 30 | --https://docs.microsoft.com/en-us/sql/relational-databases/system-stored-procedures/sp-help-job-transact-sql 31 | END 32 | , * 33 | FROM msdb.dbo.sysjobs j 34 | LEFT OUTER JOIN @xp_sqlagent_enum_jobs ej 35 | ON j.job_id = ej.Job_ID 36 | ORDER BY j.name; -------------------------------------------------------------------------------- /kill detect blocking sessions setup.sql: -------------------------------------------------------------------------------- 1 | use dbalogging 2 | go 3 | --setup kill detect blocking sessions 4 | --drop table dbo.ExecRequestsLog 5 | go 6 | create table dbo.ExecRequestsLog ( 7 | id int IDENTITY(1,1) not null PRIMARY KEY 8 | , timecaptured datetime 9 | , session_id smallint not null 10 | , request_id int null 11 | , blocking_session_id int null 12 | , blocking_these varchar(1000) NULL 13 | , request_start_time datetime null 14 | , login_time datetime not null 15 | , login_name nvarchar(256) null 16 | , client_interface_name nvarchar(64) 17 | , session_status nvarchar(60) null 18 | , request_status nvarchar(60) null 19 | , command nvarchar(32) null 20 | , sql_handle varbinary(64) null 21 | , statement_start_offset int null 22 | , statement_end_offset int null 23 | , plan_handle varbinary (64) null 24 | , database_id smallint null 25 | , user_id int null 26 | , wait_type nvarchar (120) null 27 | , wait_time_s int null 28 | , wait_resource nvarchar(120) null 29 | , last_wait_type nvarchar(120) null 30 | , cpu_time_s int null 31 | , tot_time_s int null 32 | , reads bigint null 33 | , writes bigint null 34 | , logical_reads bigint null 35 | , [host_name] nvarchar(256) null 36 | , [program_name] nvarchar(256) null 37 | , percent_complete int null 38 | , session_transaction_isolation_level varchar(20) null 39 | , request_transaction_isolation_level varchar(20) null 40 | , offsettext nvarchar(4000) null 41 | , kill_text nvarchar(100) null 42 | ) 43 | -------------------------------------------------------------------------------- /kill detect blocking spids job.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/kill detect blocking spids job.sql -------------------------------------------------------------------------------- /lab - FORMATMESSAGE in SQL2016.sql: -------------------------------------------------------------------------------- 1 | 2 | DECLARE @firststring varchar(10) = 'first', @secondstring varchar(10) = 'second', @thirdstring varchar(10) = 'third'; 3 | SELECT Formatted = FORMATMESSAGE('This is the %s and this is the %s and finally the %s.', @firststring, @secondstring, @thirdstring); 4 | 5 | SELECT FORMATMESSAGE('Signed int %i, %d %i, %d, %+i, %+d, %+i, %+d', 5, -5, 50, -50, -11, -11, 11, 11); 6 | SELECT FORMATMESSAGE('Signed int with leading zero %020i', 5); 7 | SELECT FORMATMESSAGE('Signed int with leading zero 0 %020i', -55); 8 | SELECT FORMATMESSAGE('Unsigned int %u, %u', 50, -50); 9 | SELECT FORMATMESSAGE('Unsigned octal %o, %o', 50, -50); 10 | SELECT FORMATMESSAGE('Unsigned hexadecimal %x, %X, %X, %X, %x', 11, 11, -11, 50, -50); 11 | SELECT FORMATMESSAGE('Unsigned octal with prefix: %#o, %#o', 50, -50); 12 | SELECT FORMATMESSAGE('Unsigned hexadecimal with prefix: %#x, %#X, %#X, %X, %x', 11, 11, -11, 50, -50); 13 | SELECT FORMATMESSAGE('Hello %s!', 'TEST'); 14 | SELECT FORMATMESSAGE('Hello %20s!', 'TEST'); 15 | SELECT FORMATMESSAGE('Hello %-20s!', 'TEST'); 16 | -------------------------------------------------------------------------------- /lab - TSQL 101.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS dbo.SomeTable 2 | GO 3 | CREATE TABLE dbo.SomeTable 4 | ( ID int IDENTITY(1,1) CONSTRAINT PK_SomeTable PRIMARY KEY 5 | , SomeNumber decimal(9,2) not null 6 | , SomeWords varchar(50) not null 7 | ) 8 | GO 9 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (1, 'abc') 10 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (2, 'abc') 11 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (3, 'abc') 12 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (4, 'abc') 13 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (5, 'abc') 14 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (1, 'def') 15 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (2, 'def') 16 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (3, 'def') 17 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (4, 'def') 18 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (5, 'def') 19 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (1, 'ghi') 20 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (2, 'ghi') 21 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (3, 'ghi') 22 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (4, 'ghi') 23 | INSERT INTO dbo.SomeTable (SomeNumber, SomeWords) VALUES (5, 'ghi') 24 | go 25 | SELECT * FROM dbo.SomeTable 26 | GO 27 | DROP TABLE IF EXISTS dbo.SomeTable2 28 | GO 29 | CREATE TABLE dbo.SomeTable2 30 | ( ID int IDENTITY(1,1) CONSTRAINT PK_SomeTable2 PRIMARY KEY 31 | , SomeNumber decimal(9,2) not null 32 | , SomeDate date not null 33 | ) 34 | GO 35 | INSERT INTO dbo.SomeTable2 (SomeNumber, SomeDate) VALUES (3, '2018-03-01') 36 | INSERT INTO dbo.SomeTable2 (SomeNumber, SomeDate) VALUES (4, '2018-04-01') 37 | INSERT INTO dbo.SomeTable2 (SomeNumber, SomeDate) VALUES (5, '2018-05-01') 38 | INSERT INTO dbo.SomeTable2 (SomeNumber, SomeDate) VALUES (6, '2018-06-01') 39 | INSERT INTO dbo.SomeTable2 (SomeNumber, SomeDate) VALUES (7, '2018-07-01') 40 | 41 | go 42 | SELECT * FROM dbo.SomeTable2 43 | GO 44 | --Works 45 | SELECT * --Not best practice 46 | FROM SomeTable --Every table has a schema 47 | WHERE SomeNumber > 2 --semicolon please 48 | 49 | --Better 50 | SELECT ID, SomeNumber, SomeWords 51 | FROM dbo.SomeTable 52 | WHERE SomeNumber > 2 53 | 54 | SELECT SomeWords 55 | FROM dbo.SomeTable 56 | WHERE SomeWords like 'a%' 57 | --WHERE LEFT(SomeWords, 1) = 'a' 58 | 59 | --Works 60 | SELECT SomeTable.SomeNumber, SomeWords, SomeDate 61 | FROM dbo.SomeTable --No tables aliases, poor readability of code 62 | INNER JOIN dbo.SomeTable2 63 | ON SomeTable.SomeNumber = SomeTable2.SomeNumber 64 | ORDER BY SomeTable.SomeNumber; --Order by may not make business sense 65 | 66 | --Better 67 | SELECT t1.SomeNumber, t1.SomeWords, t2.SomeDate 68 | FROM dbo.SomeTable AS t1 69 | INNER JOIN dbo.SomeTable2 AS t2 70 | ON t1.SomeNumber = t2.SomeNumber 71 | ORDER BY t1.SomeNumber, t1.SomeWords, t2.SomeDate; 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /lab - TemporalTable demo.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/lab - TemporalTable demo.sql -------------------------------------------------------------------------------- /lab - VLF generation example.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/lab - VLF generation example.sql -------------------------------------------------------------------------------- /lab - azure blob storage retention plan.ps1: -------------------------------------------------------------------------------- 1 | #Retention plan for BACKUP TO URL backups in Azure blob storage 2 | #Breaking lease if necessary: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/deleting-backup-blob-files-with-active-leases 3 | 4 | Clear-Host 5 | 6 | #TODO 7 | $context = New-AzureStorageContext -StorageAccountName "ACCOUNT NAME HERE!" -StorageAccountKey "STORAGE KEY HERE!" 8 | $container = "CONTAINER NAME HERE (subfolder path)!" 9 | $RetentionWeeks = 8 10 | 11 | [DateTime]$today = (Get-Date) 12 | [Int]$dateofweek = ($today.DayOfWeek) #get the day of the week (0 = Sunday) so that we're always deleting a whole week at a time, in the case of a weekly full schedule 13 | #Delete whole weeks only 14 | $RetentionDays = ($RetentionWeeks * -7) -1 -$dateofweek 15 | $BackupFileExtension = '*.bak' 16 | 17 | write-host (Get-Date) 18 | 19 | Get-AzureStorageBlob -Container $container -Context $context | ` 20 | where-object { $_.PSIsContainer -ne $true -and $_.LastModified -lt (get-date).AddDays($RetentionDays) -and $_.Name -Like $BackupFileExtension ` 21 | } | Remove-AzureStorageBlob -Verbose #-whatif 22 | 23 | $RetentionDays = $RetentionWeeks * -7 -$dateofweek 24 | $BackupFileExtension = '*.dif' 25 | 26 | Get-AzureStorageBlob -Container $container -Context $context | ` 27 | where-object { $_.PSIsContainer -ne $true -and $_.LastModified -lt (get-date).adddays($RetentionDays) -and $_.Name -Like $BackupFileExtension ` 28 | } | Remove-AzureStorageBlob -Verbose #-whatif 29 | 30 | 31 | $RetentionDays = $RetentionWeeks * -7 -$dateofweek 32 | $BackupFileExtension = '*.trn' 33 | 34 | Get-AzureStorageBlob -Container $container -Context $context | ` 35 | where-object { $_.PSIsContainer -ne $true -and $_.LastModified -lt (get-date).adddays($RetentionDays) -and $_.Name -Like $BackupFileExtension ` 36 | } | Remove-AzureStorageBlob -Verbose #-whatif 37 | 38 | write-host (Get-Date) -------------------------------------------------------------------------------- /lab - backup to URL.sql: -------------------------------------------------------------------------------- 1 | 2 | USE master 3 | /* 4 | --SQL 2016+ 5 | CREATE CREDENTIAL [https://sphsqlbackup.blob.core.windows.net/prodsqlbak] -- this name must match the container path, start with https and must not contain a forward slash. 6 | WITH IDENTITY='SHARED ACCESS SIGNATURE' -- this is a mandatory string and do not change it. 7 | , SECRET = N'KThhZXL2l4Kyu1GPvLf9wlhuu6A/K/PQqpNsfxahM3QAm71mBLDcr3CwaQv7RxDCCARJ2pWURxsQKTlM2ATVNA==' -- this is the shared access signature key that you obtained in Lesson 1. 8 | GO 9 | BACKUP DATABASE [DBALogging] TO URL = N'https://sphsqlbackup.blob.core.windows.net/prodsqlbak/DBALogging_backup_2017_10_24.bak' 10 | WITH NAME = N'DBALogging-Full Database Backup', NOREWIND, NOUNLOAD, COMPRESSION, STATS = 10, CHECKSUM 11 | GO 12 | */ 13 | 14 | --Legacy method without SAS 15 | CREATE CREDENTIAL [https://sphsqlbackup.blob.core.windows.net/prodsqlbak] -- this name must match the container path, start with https and must not contain a forward slash. 16 | WITH IDENTITY='sphsqlbackup' -- this is a mandatory string and do not change it. 17 | , SECRET = N'KThhZXL2l4Kyu1GPvLf9wlhuu6A/K/PQqpNsfxahM3QAm71mBLDcr3CwaQv7RxDCCARJ2pWURxsQKTlM2ATVNA==' -- this is the shared access signature key that you obtained in Lesson 1. 18 | GO 19 | 20 | BACKUP DATABASE [DBALogging] TO URL = N'https://sphsqlbackup.blob.core.windows.net/prodsqlbak/test/DBALogging_backup_2017_10_24_151352.bak' 21 | WITH CREDENTIAL = 'https://sphsqlbackup.blob.core.windows.net/prodsqlbak', 22 | FORMAT, NAME = N'DBALogging-Full Database Backup', NOREWIND, NOUNLOAD, COMPRESSION, STATS = 10, CHECKSUM 23 | GO 24 | 25 | 26 | -------------------------------------------------------------------------------- /lab - basic error table.sql: -------------------------------------------------------------------------------- 1 | USE DBALogging 2 | GO 3 | 4 | DECLARE @errormessagecomplete varchar(max) 5 | begin try 6 | 7 | --Use a database that is in SIMPLE recovery mode 8 | BACKUP LOG DBALogging 9 | 10 | end try 11 | begin catch 12 | --Only captures the 3013, not the preceding and actual error message, 4208. :( 13 | --INSERT INTO DBALogging.dbo.errortable ([ErrorNumber], [ErrorSeverity], [ErrorState], [ErrorProcedure], [ErrorLine], [ErrorMessage], [Process]) 14 | --SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_SEVERITY() AS ErrorSeverity, ERROR_STATE() as ErrorState, ERROR_PROCEDURE() as ErrorProcedure, ERROR_LINE() as ErrorLine, ERROR_MESSAGE() as ErrorMessage 15 | -- , Process = 'Testing'; --need the semicolon 16 | 17 | THROW --optional, to actually cause a failure. Reports both 4208 and 3013, job handles the capture. 18 | 19 | end catch 20 | GO 21 | 22 | --select * from DBALogging.dbo.errortable 23 | GO 24 | /* 25 | DROP TABLE [dbo].[errortable] 26 | GO 27 | CREATE TABLE [dbo].[errortable]( 28 | id bigint not null IDENTITY(1,1) CONSTRAINT PK_errortable PRIMARY KEY, 29 | [ErrorNumber] [int] NULL, 30 | [ErrorSeverity] [int] NULL, 31 | [ErrorState] [int] NULL, 32 | [ErrorProcedure] [nvarchar](128) NULL, 33 | [ErrorLine] [int] NULL, 34 | [ErrorMessage] [nvarchar](4000) NULL, 35 | [Process] varchar(8000) NOT NULL, 36 | [WhenObserved] datetimeoffset(0) NOT NULL CONSTRAINT DF_ErrorTable_WhenObserved DEFAULT (sysdatetimeoffset()) 37 | ) ON [PRIMARY] 38 | GO 39 | */ -------------------------------------------------------------------------------- /lab - concat_ws.sql: -------------------------------------------------------------------------------- 1 | 2 | --Concatenate WITH SEPARATOR! 3 | --Place a comma between values (but not at the end) 4 | SELECT csv = CONCAT_WS(',', name, current_utc_offset, is_currently_dst) 5 | FROM sys.time_zone_info 6 | 7 | --Wrap in " for text qualification on csv import 8 | SELECT csv = CONCAT_WS('","', '"'+name, current_utc_offset, is_currently_dst)+'"' 9 | FROM sys.time_zone_info 10 | 11 | --Aggregate concatenation 12 | SELECT STRING_AGG(name, ', ') FROM sys.time_zone_info WHERE NAME LIKE '%central%'; 13 | 14 | SELECT STRING_AGG( 15 | cast( 16 | CONCAT_WS('","', '"'+name, current_utc_offset, is_currently_dst)+'"' 17 | as NVARCHAR(MAX)) 18 | , ',') FROM sys.time_zone_info 19 | -------------------------------------------------------------------------------- /lab - cte 101.sql: -------------------------------------------------------------------------------- 1 | /* 2 | --Use Cases for CTE's 3 | 1. Replace Temp Tables/Table Vars, change multistep processes to single-query 4 | 2. Recursion (org charts) 5 | 3. Pre-build row-by-row conversions 6 | */ 7 | 8 | with cteSimple as ( 9 | select * from sys.databases) 10 | select * from cteSimple--; 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | with cteSimple (database_name, db_id) as ( 30 | select name, database_id from sys.databases) 31 | select * from cteSimple 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | with cteSimple (database_name, database_id) as ( 44 | select name, database_id from sys.databases) 45 | select * from cteSimple c 46 | inner join sys.master_files mf on c.database_id = mf.database_id 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | --compare: 59 | 60 | --Temp Table 61 | 62 | SELECT name, database_id into #TempSimple 63 | from sys.databases; 64 | 65 | select * from #TempSimple c 66 | inner join sys.master_files mf on c.database_id = mf.database_id; 67 | 68 | DROP TABLE IF EXISTS #TempSimple; 69 | 70 | --vs 71 | 72 | --CTE 73 | with cteSimple (database_name, database_id) as ( 74 | select name, database_id from sys.databases) 75 | select * from cteSimple c 76 | inner join sys.master_files mf on c.database_id = mf.database_id; 77 | 78 | 79 | -------------------------------------------------------------------------------- /lab - datetime conversion from datetime to datetimeoffsets.sql: -------------------------------------------------------------------------------- 1 | use w 2 | go 3 | 4 | create table whateverdates 5 | (id int not null IDENTITY(1,1) primary key 6 | , cdt datetime --we only assume this data is in CT, the only data we have initially. 7 | , cdt_offset datetimeoffset(0) 8 | , edt datetime 9 | , edt_offset datetimeoffset(0) 10 | ) 11 | GO 12 | insert into dbo.whateverdates (cdt) values (getdate()) --now 13 | insert into dbo.whateverdates (cdt) values ('10/28/2000 14:00') --historical data in DST 14 | insert into dbo.whateverdates (cdt) values ('10/29/2000 14:00') --historical data after DST ended at 10/29/2000 at 1am 15 | GO 16 | update dbo.whateverdates 17 | set cdt_offset = cdt AT TIME ZONE 'Central Standard Time' --assigns a time zone to datetime, which has no offset 18 | GO 19 | update dbo.whateverdates 20 | set edt = cdt AT TIME ZONE 'Central Standard Time' AT TIME ZONE 'Eastern Standard Time' --asigns a time zone correctly (and historically), first to data without offset, then performing timezone math on data that has an offset 21 | , edt_offset = cdt_offset AT TIME ZONE 'Eastern Standard Time' --asigns a time zone correctly (and historically) to data that has an offset 22 | GO 23 | select * from whateverdates 24 | go 25 | 26 | select * from sys.time_zone_info 27 | drop table whateverdates -------------------------------------------------------------------------------- /lab - deadlock part 1.sql: -------------------------------------------------------------------------------- 1 | --To generate a deadlock: 2 | --Run the first half of this script first, then run toolbox\lab - deadlock part 2.sql, then the second half of this script, then the second part of this script. 3 | --Then use toolbox\deadlocks in xevents.sql to view the deadlock. 4 | use w 5 | go 6 | DROP TABLE dbo.dead 7 | DROP TABLE dbo.lock 8 | go 9 | CREATE TABLE dbo.dead (col1 INT) 10 | INSERT INTO dbo.dead SELECT 1 11 | CREATE TABLE dbo.lock (col1 INT) 12 | INSERT INTO dbo.lock SELECT 1 13 | 14 | BEGIN TRAN t1 15 | UPDATE dbo.dead WITH (TABLOCK) SET col1 = 2 16 | 17 | -- Part two, run the below after script 2. 18 | 19 | UPDATE dbo.lock WITH (TABLOCK) SET col1 = 4 20 | COMMIT TRAN t1 21 | GO 22 | select SYSDATETIME(); 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /lab - deadlock part 2.sql: -------------------------------------------------------------------------------- 1 | --To generate a deadlock: 2 | --Run the first part of toolbox\lab - deadlock part 1.sql script first, then this script, then the second part of toolbox\lab - deadlock part 1.sql. 3 | --Then use toolbox\deadlocks in xevents.sql to view the deadlock. 4 | 5 | USE w 6 | go 7 | BEGIN TRAN t2 8 | UPDATE dbo.lock WITH (TABLOCK) SET col1 = 3 9 | UPDATE dbo.dead WITH (TABLOCK) SET col1 = 3 10 | commit tran t2 11 | GO 12 | select SYSDATETIME(); -------------------------------------------------------------------------------- /lab - default constraints.sql: -------------------------------------------------------------------------------- 1 | drop table if exists test 2 | create table test 3 | (id int not null Identity(1,1) constraint pk_test primary key 4 | , null1 varchar(10) NULL Constraint DF_test_null1 DEFAULT ('') 5 | , null2 varchar(10) NULL Constraint DF_test_null2 DEFAULT ('') 6 | ) 7 | GO 8 | insert into test (null1) values ('1') 9 | insert into test (null1) values (null) --despite a default constraint, the NULL WILL INSERT. 10 | GO 11 | 12 | select * from test -------------------------------------------------------------------------------- /lab - dynamic data masking.sql: -------------------------------------------------------------------------------- 1 | use w 2 | go 3 | drop table if exists dbo.Corporate 4 | go 5 | create table dbo.Corporate 6 | ( 7 | id int not null identity(1,1) PRIMARY KEY 8 | , sensitive_email_default varchar(35) not null 9 | , sensitive_email_email varchar(35) not null 10 | , sensitive_email_custom varchar(35) not null 11 | , UserEmail varchar(35) not null 12 | , SSN char(11) not null 13 | , CorporateID int not null 14 | ) 15 | go 16 | insert into dbo.Corporate (sensitive_email_default, sensitive_email_email, sensitive_email_custom, UserEmail, SSN, CorporateID) 17 | values ('testing@domain.com','testing@domain.com','testing@domain.com', 'testing@domain.com','123-45-6789','1000022') 18 | , ('abc@abc.com','abc@abc.com','abc@abc.com','abc@abc.com','234-56-7891','2000033') 19 | go 20 | --Functions: https://docs.microsoft.com/en-us/sql/relational-databases/security/dynamic-data-masking 21 | 22 | alter table dbo.Corporate 23 | alter column sensitive_email_default 24 | add MASKED WITH (FUNCTION = 'default()') 25 | GO 26 | alter table dbo.Corporate 27 | alter column sensitive_email_email 28 | add MASKED WITH (FUNCTION = 'email()') 29 | GO 30 | alter table dbo.Corporate 31 | alter column sensitive_email_custom 32 | add MASKED WITH (FUNCTION = 'partial(1,"XXX@XXXX",4)') 33 | GO 34 | alter table dbo.Corporate 35 | alter column UserEmail 36 | add MASKED WITH (FUNCTION = 'email()') 37 | GO 38 | alter table dbo.Corporate 39 | alter column SSN 40 | add MASKED WITH (FUNCTION = 'partial(1,"XX-XX-XX",2)') 41 | GO 42 | alter table dbo.Corporate 43 | alter column CorporateID 44 | add MASKED WITH (FUNCTION = 'default()') 45 | GO 46 | 47 | GRANT SELECT ON dbo.Corporate to [regularuser]; 48 | 49 | GO 50 | --can see the data, since we're a member of the sysadmin role 51 | select * from dbo.Corporate 52 | GO 53 | --execute as a low-privedged user with only regular permissions 54 | EXECUTE AS LOGIN = 'regularuser'; 55 | select * from dbo.Corporate 56 | REVERT; 57 | -------------------------------------------------------------------------------- /lab - execute as.sql: -------------------------------------------------------------------------------- 1 | USE Master; 2 | CREATE LOGIN [domain\username] FROM WINDOWS; 3 | GO 4 | GRANT CONTROL SERVER TO [domain\username] ; 5 | DENY VIEW SERVER STATE TO [domain\username]; 6 | GO 7 | EXECUTE AS LOGIN = 'domain\username'; 8 | SELECT * FROM sys.dm_exec_cached_plans; --Fails 9 | GO 10 | REVERT; --Reverts the EXECUTE AS 11 | GO 12 | 13 | --CONTROL SERVER is needed to access sys.dm_exec_cached_plans, a server-level DMV for reviewing plans in cache. 14 | 15 | SELECT ORIGINAL_LOGIN(), CURRENT_USER; 16 | /* 17 | ORIGINAL_LOGIN() = The name of the login with which you actually connected. This will not change even after you use EXECUTE AS USER or EXECUTE AS LOGIN. 18 | CURRENT_USER = The name of the user you have assumed. 19 | */ -------------------------------------------------------------------------------- /lab - filetable demo.sql: -------------------------------------------------------------------------------- 1 | USE [master] 2 | GO 3 | EXEC sp_configure filestream_access_level, 2 4 | RECONFIGURE 5 | Go 6 | USE [master] 7 | GO 8 | ALTER DATABASE [w] ADD FILEGROUP [BLOBFiles] CONTAINS FILESTREAM 9 | GO 10 | ALTER DATABASE [w] ADD FILE ( NAME = N'BLOBFiles', FILENAME = N'E:\BLOBFiles\' ) TO FILEGROUP [BLOBFiles] 11 | GO 12 | ALTER DATABASE w 13 | SET FILESTREAM (NON_TRANSACTED_ACCESS = FULL, DIRECTORY_NAME = N'BLOBFiles') 14 | GO 15 | USE w 16 | GO 17 | CREATE TABLE e_BLOBFiles_filetable AS FILETABLE 18 | WITH 19 | ( 20 | FILETABLE_DIRECTORY = 'BLOBFiles' 21 | ) 22 | GO 23 | select * from e_BLOBFiles_filetable 24 | 25 | /* 26 | USE [w] 27 | GO 28 | drop table e_BLOBFiles_filetable 29 | GO 30 | ALTER DATABASE [w] REMOVE FILE [BLOBFiles] 31 | GO 32 | ALTER DATABASE [w] REMOVE FILEGROUP [BLOBFiles] 33 | GO 34 | 35 | */ 36 | 37 | -------------------------------------------------------------------------------- /lab - float as pk problems.sql: -------------------------------------------------------------------------------- 1 | use w 2 | go 3 | drop table if exists dbo.floatinghorror; 4 | drop table if exists dbo.migrated_table; 5 | go 6 | create table dbo.floatinghorror 7 | (invoiceid float not null 8 | constraint PK_floatinghorror primary key 9 | , text1 varchar(10) not null); 10 | go 11 | --1 thru 8 12 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.001, char(79)); 13 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.002, char(77)); 14 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.003, char(71)); 15 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.004, char(33)); 16 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.005, char(87)); 17 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.006, char(84)); 18 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.007, char(70)); 19 | insert into dbo.floatinghorror (invoiceid, text1) values (2019000000100.008, char(33)); 20 | 21 | GO 22 | --weird, shows duplicate values despite a group by?! 23 | select invoiceid from dbo.floatinghorror group by invoiceid; 24 | 25 | --to actually display the data, gotta convert 26 | select invoice = cast(invoiceid as decimal(19,3)) 27 | from dbo.floatinghorror group by invoiceid; 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | --Say we have to copy the data to another system, 38 | --like a data warehouse or a competitor's software, 39 | --to a database that uses grown-up data types 40 | --and they have higher precision requirements! 41 | create table dbo.migrated_table 42 | (invoiceid decimal(19,4) not null constraint PK_migrated_table primary key 43 | , text1 varchar(10) not null 44 | ); 45 | --the data inserts fine 46 | insert into dbo.migrated_table (invoiceid, text1) 47 | select invoiceid, text1 from dbo.floatinghorror; 48 | GO 49 | --but oh no! 50 | select * from dbo.migrated_table; 51 | --cannot trust float/real data types! -------------------------------------------------------------------------------- /lab - fragmented table create missing index.sql: -------------------------------------------------------------------------------- 1 | --Use to generate Missing Index Suggestions, together with the 3 "lab - fragmented table... .sql" scripts. 2 | select fragtext from fragmented_table_nsi where fragtext2 = 'bbb' 3 | select fragtext from fragmented_table_int where fragtext2 = 'bbb' 4 | select fragtext from fragmented_table where fragtext2 = 'bbb' -------------------------------------------------------------------------------- /lab - fragmented table int.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | --RUN ENTIRE SCRIPT 4 | DROP TABLE IF EXISTS dbo.fragmented_table_int 5 | go 6 | CREATE TABLE dbo.fragmented_table_int 7 | ( 8 | fragid int NOT NULL IDENTITY(1,1), 9 | fragtext varchar(100) NOT NULL, 10 | fragtext2 varchar(100) NOT NULL 11 | ) 12 | GO 13 | ALTER TABLE dbo.fragmented_table_int ADD CONSTRAINT 14 | PK_fragmented_table_int PRIMARY KEY CLUSTERED 15 | ( 16 | fragid 17 | ) 18 | WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 19 | ) 20 | 21 | go 22 | CREATE NONCLUSTERED INDEX IDX_NC_fragmented_table_int 23 | ON dbo.fragmented_table_int (FRAGTEXT) 24 | WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 25 | ) 26 | 27 | GO 28 | 29 | 30 | --Insert roughly 131072k records 31 | 32 | insert into dbo.fragmented_table_int (fragtext, fragtext2) 33 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 34 | go 35 | declare @x integer 36 | set @x = 1 37 | while @x < 19 38 | begin 39 | insert into dbo.fragmented_table_int (fragtext, fragtext2) 40 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 41 | from fragmented_table_int 42 | set @x = @x + 1 43 | end 44 | go 45 | 46 | insert into fragmented_table_int (fragtext, fragtext2) values ('aaa','bbb') 47 | 48 | select count(1) from dbo.fragmented_table_int 49 | -------------------------------------------------------------------------------- /lab - fragmented table newsequentialid.sql: -------------------------------------------------------------------------------- 1 | --RUN ENTIRE SCRIPT 2 | DROP TABLE IF EXISTS dbo.fragmented_table_nsi 3 | go 4 | CREATE TABLE dbo.fragmented_table_nsi 5 | ( 6 | fragid uniqueidentifier NOT NULL DEFAULT newsequentialID(), 7 | fragtext varchar(100) NOT NULL, 8 | fragtext2 varchar(100) NOT NULL 9 | 10 | ) 11 | GO 12 | ALTER TABLE dbo.fragmented_table_nsi ADD CONSTRAINT 13 | PK_fragmented_table_nsi PRIMARY KEY CLUSTERED 14 | ( 15 | fragid 16 | ) 17 | WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 18 | ) 19 | go 20 | CREATE NONCLUSTERED INDEX IDX_NC_fragmented_table_nsi 21 | ON dbo.fragmented_table_nsi (FRAGTEXT) 22 | WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 23 | ) 24 | 25 | GO 26 | 27 | 28 | --Insert roughly 131072k records 29 | 30 | insert into dbo.fragmented_table_nsi (fragtext, fragtext2) 31 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 32 | go 33 | declare @x integer 34 | set @x = 1 35 | while @x < 19 36 | begin 37 | insert into dbo.fragmented_table_nsi (fragtext, fragtext2) 38 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 39 | from fragmented_table_nsi 40 | set @x = @x + 1 41 | end 42 | go 43 | 44 | --Add needle to haystack 45 | insert into fragmented_table_nsi (fragtext, fragtext2) values ('aaa','bbb') 46 | 47 | select count(1) from dbo.fragmented_table_nsi 48 | 49 | -------------------------------------------------------------------------------- /lab - fragmented table.sql: -------------------------------------------------------------------------------- 1 | --RUN ENTIRE SCRIPT 2 | DROP TABLE IF EXISTS dbo.fragmented_table --new syntax in SQL 2016! 3 | go 4 | CREATE TABLE dbo.fragmented_table 5 | ( 6 | fragid uniqueidentifier NOT NULL DEFAULT (newid()), 7 | fragtext varchar(100) NOT NULL, 8 | fragtext2 varchar(100) NOT NULL 9 | 10 | ) 11 | GO 12 | ALTER TABLE dbo.fragmented_table ADD CONSTRAINT 13 | PK_fragmented_table PRIMARY KEY CLUSTERED 14 | ( 15 | fragid 16 | ) 17 | go 18 | CREATE NONCLUSTERED INDEX IDX_NC_fragmented_table 19 | ON dbo.fragmented_table (fragtext) 20 | WITH (SORT_IN_TEMPDB = ON ) 21 | GO 22 | 23 | 24 | --Insert roughly 131072k records 25 | 26 | insert into dbo.fragmented_table ( fragtext, fragtext2) 27 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 28 | go 29 | declare @x integer 30 | set @x = 1 31 | while @x < 19 32 | begin 33 | insert into dbo.fragmented_table ( fragtext, fragtext2) 34 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)), replicate(char(round(rand()*100,0)),round(rand()*100,0)) 35 | from fragmented_table 36 | set @x = @x + 1 37 | end 38 | go 39 | 40 | --Add needle to haystack 41 | insert into fragmented_table ( fragtext, fragtext2) values ('aaa','bbb') 42 | 43 | select count(1) from dbo.fragmented_table 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /lab - implicit conversion.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/lab - implicit conversion.sql -------------------------------------------------------------------------------- /lab - joins.sql: -------------------------------------------------------------------------------- 1 | declare @t1 table (id int not null primary key 2 | , test1 varchar(10) null) 3 | 4 | declare @t2 table (id int not null primary key 5 | , test1 varchar(10) null) 6 | 7 | insert into @t1 values (1,'a'),(2,'b') 8 | insert into @t2 values (2,'b'),(3,'c') 9 | 10 | select * from @t1 t1 inner join @t2 t2 on t1.id = t2.id 11 | select * from @t1 t1 join @t2 t2 on t1.id = t2.id --same as inner join, but confusing 12 | select * from @t1 t1 left outer join @t2 t2 on t1.id = t2.id 13 | select * from @t1 t1 right outer join @t2 t2 on t1.id = t2.id 14 | select * from @t1 t1 cross join @t2 t2 15 | select * from @t1 t1 cross apply @t2 t2 --apply is intended for use with functions, not tables, but behaves the same as cross join here 16 | select * from @t1 t1 outer apply @t2 t2 --apply is intended for use with functions, not tables, but behaves similarly as cross join here, but in different order 17 | -------------------------------------------------------------------------------- /lab - memory optimized table.sql: -------------------------------------------------------------------------------- 1 | --Lab - memory optimized table performance 2 | 3 | --select * from sys.master_files 4 | --1. first, you must add a filegroup for memory_optimzed_data 5 | use master 6 | go 7 | ALTER DATABASE w ADD FILEGROUP [Optimized_FG] CONTAINS MEMORY_OPTIMIZED_DATA 8 | GO 9 | ALTER DATABASE w ADD FILE ( NAME = N'Optimized_Data', FILENAME = N'F:\Data\Optimized_Data.ndf') TO FILEGROUP [Optimized_FG] 10 | GO 11 | 12 | use w 13 | go 14 | 15 | CREATE TABLE dbo.mem_table 16 | ( 17 | fragid int NOT NULL IDENTITY(1,1) , 18 | fragtext varchar(4000) NOT NULL, 19 | CONSTRAINT [PK_mem_table] PRIMARY KEY NONCLUSTERED HASH (fragid) WITH (BUCKET_COUNT = 131072 ) --bucket_count should be 1-2x the number of unique key values that are expected. 20 | ) 21 | WITH (MEMORY_OPTIMIZED = ON, DURABILITY = SCHEMA_AND_DATA) 22 | -------------------------------------------------------------------------------- /lab - missing index setup demo.sql: -------------------------------------------------------------------------------- 1 | 2 | USE [WideWorldImporters] 3 | GO 4 | 5 | ---------------- 6 | 7 | SELECT 8 | c.CustomerName, c.PhoneNumber, cc.CustomerCategoryName 9 | FROM 10 | [Sales].[Customers] c 11 | inner join sales.CustomerCategories cc 12 | on cc.CustomerCategoryID = c.CustomerCategoryID 13 | where c.CreditLimit > 1000 14 | 15 | 16 | 17 | 18 | 19 | 20 | /* 21 | 22 | CREATE NONCLUSTERED INDEX IDX_NC_Customers_CreditLimit ON [WideWorldImporters].[Sales].[Customers] ([CreditLimit]) INCLUDE ([CustomerName], [CustomerCategoryID], [DeliveryCityID], [PhoneNumber]) 23 | 24 | */ 25 | 26 | /* 27 | 28 | DROP INDEX IDX_NC_Customers_CreditLimit ON [WideWorldImporters].[Sales].[Customers] 29 | */ -------------------------------------------------------------------------------- /lab - nested sproc tran rollback commit.sql: -------------------------------------------------------------------------------- 1 | --Lab for testing nested sproc transaction rollback/commit. 2 | 3 | create table testingonly 4 | (id int not null identity(1,1) primary key 5 | , testvarchar varchar(100) ) 6 | go 7 | select * from testingonly 8 | go 9 | 10 | Create or ALTER procedure sproc2 11 | as 12 | begin 13 | set xact_abort on 14 | begin tran t2; 15 | begin try 16 | 17 | select 'sproc2' 18 | insert into testingonly (testvarchar) values ('inserted into sproc2') 19 | select 1/0; 20 | 21 | 22 | commit tran t2 23 | end try 24 | begin catch 25 | select 'in sproc2 catch' + str(@@TRANCOUNT) 26 | IF @@TRANCOUNT > 0 27 | rollback tran; 28 | select 'rollback in sproc2', error_message() 29 | throw 30 | end catch 31 | end 32 | go 33 | 34 | create or ALTER procedure sproc1 35 | as 36 | begin 37 | set xact_abort on 38 | begin tran t1; 39 | begin try 40 | select 'sproc1' 41 | 42 | insert into testingonly (testvarchar) values ('inserted into sproc1 1') 43 | exec sproc2 44 | insert into testingonly (testvarchar) values ('inserted into sproc1 2') 45 | commit tran t1; 46 | end try 47 | begin catch 48 | select 'in sproc1 catch' + str(@@TRANCOUNT) 49 | IF @@TRANCOUNT > 0 50 | rollback tran; 51 | select 'rollback in sproc1', error_message() 52 | --throw --comment out if you want the parent not to throw an error. 53 | end catch 54 | END 55 | GO 56 | 57 | 58 | exec sproc1 59 | go 60 | select * from testingonly 61 | 62 | go 63 | drop table if exists testingonly 64 | drop procedure if exists sproc1 65 | drop procedure if exists sproc2 66 | go 67 | 68 | -------------------------------------------------------------------------------- /lab - nonsequentialguid.sql: -------------------------------------------------------------------------------- 1 | --Conditions that trigger a nonsequential "sequential" guid 2 | --- Failover to another machine may trigger in FC or AG 3 | --- Upgrade or Migrate the database to new hardware, new platform 4 | --- Change tier of Azure SQL DB 5 | --- Re-image an Azure VM with ephemeral OS 6 | --- Docker run without explicit MAC 7 | --- Since the C++ function UuidCreateSequential is based on "the MAC address of a machine's Ethernet card", changing or replacing the NIC may trigger, 8 | --- or anything that changes the NIC MAC may trigger. 9 | ------ You cannot specify the MAC address of a new Azure network interface to prevent this on a new machine! 10 | ------ more info: https://docs.microsoft.com/en-us/windows/win32/api/rpcdce/nf-rpcdce-uuidcreatesequential?redirectedfrom=MSDN 11 | ------ "Therefore you should never use this UUID to identify an object that is not strictly local to your computer." 12 | 13 | 14 | use master 15 | go 16 | CREATE DATABASE [make_ints_not_guids] 17 | CONTAINMENT = NONE 18 | ON PRIMARY 19 | ( NAME = N'make_ints_not_guids', FILENAME = N'f:\DATA\make_ints_not_guids.mdf' , SIZE = 800192KB , FILEGROWTH = 65536KB ) 20 | LOG ON 21 | ( NAME = N'make_ints_not_guids_log', FILENAME = N'f:\DATA\make_ints_not_guids_log.ldf' , SIZE = 8192KB , FILEGROWTH = 65536KB ) 22 | GO 23 | ALTER DATABASE [make_ints_not_guids] SET AUTO_UPDATE_STATISTICS ON 24 | GO 25 | use [make_ints_not_guids] 26 | go 27 | --drop table if exists dbo.nonsequentialguid 28 | go 29 | CREATE TABLE dbo.nonsequentialguid 30 | (id uniqueidentifier NOT NULL CONSTRAINT DF_nonseq_id DEFAULT newsequentialid() 31 | , whenobserved datetimeoffset(2) NOT NULL CONSTRAINT DF_nonseq_when DEFAULT sysdatetimeoffset() 32 | , constraint pk_nonseq primary key (id) WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON) --SQL 2019+ only 33 | ) 34 | go 35 | 36 | 37 | --Run the below command after reboots, failovers, etc. You will eventually start writing nonsequential "sequential" guids. 38 | insert into dbo.nonsequentialguid (whatever) values ('a') 39 | 40 | /* 41 | Try to trigger the nonsequential sequential guid. It's not enough to reboot, it has to be a chance to the NIC MAC. See list above. 42 | */ 43 | 44 | --Check for nonsequential "sequential" GUIDs. All three of the below will show results when a nonsequential sequential guid is inserted. 45 | select n1.id, n1.whenobserved, n2.id, n2.whenobserved from nonsequentialguid n1 46 | , nonsequentialguid n2 47 | where n1.id > n2.id and n1.whenobserved < n2.whenobserved -- when an id is greater than another record, but its when is less than the other record's when. It's happened. 48 | order by n1.id, n2.id 49 | 50 | select * from ( 51 | select id, whenobserved, rank_when = rank () over (order by whenobserved), rank_id = rank() over (order by id) from nonsequentialguid 52 | ) x where rank_when <> rank_id -- when rank of id <> the rank of when, they're out of order. It's happened.S 53 | order by rank_id, rank_when 54 | 55 | select id, whenobserved 56 | from nonsequentialguid n1 57 | order by id asc -- when first records in this sort order will have been inserted after later records, it's happened. 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /lab - optimize_for_sequential_key testing.sql: -------------------------------------------------------------------------------- 1 | --Demonstrate wait type reduction with new OPTIMIZE_FOR_SEQUENTIAL_KEY in SQL 2019 2 | -- First run without OPTIMIZE_FOR_SEQUENTIAL_KEY, then run with OPTIMIZE_FOR_SEQUENTIAL_KEY 3 | 4 | use w 5 | go 6 | dbcc freeproccache 7 | dbcc dropcleanbuffers 8 | go 9 | --RUN ENTIRE SCRIPT 10 | DROP TABLE IF EXISTS dbo.fragmented_table_int 11 | go 12 | CREATE TABLE dbo.fragmented_table_int 13 | ( 14 | fragid int NOT NULL IDENTITY(1,1), 15 | fragtext varchar(100) NOT NULL 16 | ) 17 | GO 18 | ALTER TABLE dbo.fragmented_table_int ADD CONSTRAINT 19 | PK_fragmented_table_int PRIMARY KEY CLUSTERED 20 | ( 21 | fragid 22 | ) 23 | WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 24 | ) 25 | 26 | go 27 | CREATE NONCLUSTERED INDEX IDX_NC_fragmented_table_int 28 | ON dbo.fragmented_table_int (FRAGTEXT) 29 | -- WITH (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON --SQL 2019 only! 30 | -- ) 31 | 32 | GO 33 | 34 | create table #tempcounter (id int not null identity(1,1) primary key, counter_name sysname, cntr_value bigint, whenobserved datetimeoffset(2) not null constraint df_cntr_temp_when default (sysdatetimeoffset())) 35 | insert into #tempcounter (counter_name, cntr_value) 36 | select counter_name, cntr_value FROM sys.dm_os_performance_counters 37 | where counter_name = 'Page Splits/sec' 38 | 39 | create table #waitcounter (id int not null identity(1,1) primary key, wait nvarchar(60), val bigint, whenobserved datetimeoffset(2) not null constraint df_wait_temp_when default (sysdatetimeoffset())) 40 | insert into #waitcounter (wait, val) 41 | select wait_type, wait_time_ms 42 | from sys.dm_exec_session_wait_stats 43 | where wait_type like '%PAGELATCH%' and session_id = @@SPID 44 | 45 | go 46 | --Run this block from multiple query connections simultaneously. 47 | insert into dbo.fragmented_table_int (fragtext) 48 | select replicate(char(round(rand()*100,0)),round(rand()*100,0)) 49 | go 20000 50 | select count(1) from dbo.fragmented_table_int 51 | GO 52 | insert into #waitcounter (wait, val) 53 | select wait_type, wait_time_ms 54 | from sys.dm_exec_session_wait_stats 55 | where wait_type like '%PAGELATCH%' and session_id = @@SPID 56 | GO 57 | insert into #tempcounter (counter_name, cntr_value) 58 | select counter_name, cntr_value FROM sys.dm_os_performance_counters 59 | where counter_name = 'Page Splits/sec' 60 | GO 61 | 62 | select a1.counter_name, a2.cntr_value - a1.cntr_value 63 | from #tempcounter a1 64 | inner join #tempcounter a2 on a1.counter_name = a2.counter_name 65 | WHERE a1.id = 1 and a2.id = 2; 66 | 67 | select a1.wait, wait_ms = a2.val - a1.val, sum(a2.val - a1.val) OVER () 68 | --select * 69 | from #waitcounter a1 70 | inner join #waitcounter a2 on a1.wait = a2.wait and a2.id > a1.id 71 | order by a1.wait, wait_ms desc 72 | GO 73 | 74 | drop table #tempcounter; 75 | drop table #waitcounter; 76 | -------------------------------------------------------------------------------- /lab - restore wideworldimporters.sql: -------------------------------------------------------------------------------- 1 | --AdventureWorks latest download 2 | --https://docs.microsoft.com/sql/samples/wide-world-importers-oltp-install-configure 3 | 4 | USE [master] 5 | GO 6 | RESTORE VERIFYONLY FROM DISK = N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\Backup\WideWorldImporters-Full.bak' 7 | GO 8 | alter database [WideWorldImporters] set single_user with rollback immediate 9 | GO 10 | USE [master] 11 | RESTORE DATABASE [WideWorldImporters] FROM DISK = N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\Backup\WideWorldImporters-Full.bak' 12 | WITH FILE = 1, REPLACE, 13 | MOVE N'WWI_Primary' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters.mdf', 14 | MOVE N'WWI_UserData' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters_UserData.ndf', 15 | MOVE N'WWI_Log' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters.ldf', 16 | MOVE N'WWI_InMemory_Data_1' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters_InMemory_Data_1', NOUNLOAD, STATS = 5 17 | GO 18 | 19 | 20 | alter database [WideWorldImporters] set multi_user with rollback immediate 21 | GO 22 | alter database [WideWorldImporters] set compatibility_level = 150 -------------------------------------------------------------------------------- /lab - resumeable index maintenance.sql: -------------------------------------------------------------------------------- 1 | --SQL 2017+ 2 | --Must perform RESUMABLE = ON with ONLINE ON 3 | --See example build of this large, fragmented table in toolbox\lab - fragmented table.sql 4 | 5 | use w 6 | GO 7 | ALTER INDEX PK_fragmented_table on dbo.fragmented_table REBUILD WITH (ONLINE = ON, RESUMABLE = ON ) 8 | 9 | --FROM A DIFFERENT CONNECTION, run the below. 10 | /* 11 | use w 12 | go 13 | alter index PK_fragmented_table on dbo.fragmented_table PAUSE 14 | */ 15 | 16 | --To resume the index maintenance operation, two options: 17 | 18 | --1.Reissue the same index maintenance operation, which will warn you it'll just resume instead. 19 | ALTER INDEX PK_fragmented_table on dbo.fragmented_table REBUILD WITH (ONLINE = ON, RESUMABLE = ON ) 20 | --Warning: An existing resumable operation with the same options was identified for the same index on 'fragmented_table'. The existing operation will be resumed instead. 21 | 22 | --2.Issue a RESUME to the same index. 23 | alter index PK_fragmented_table on dbo.fragmented_table RESUME 24 | -------------------------------------------------------------------------------- /lab - security p1.sql: -------------------------------------------------------------------------------- 1 | USE [master] 2 | GO 3 | /* 4 | 5 | if exists (select * from sys.server_principals where name = 'DenyPrincipal') 6 | DROP LOGIN [DenyPrincipal] 7 | go 8 | use securitydemo 9 | go 10 | if exists (select * from sys.database_principals where name = 'DenyPrincipal') 11 | DROP USER [DenyPrincipal] 12 | drop view if exists dbo.DenyTableview 13 | drop table if exists dbo.DenyTable 14 | drop proc if exists dbo.DenyTablesproc 15 | drop proc if exists dbo.DenyTablesproc_adhoc 16 | drop function if exists dbo.DenyFunc 17 | 18 | 19 | */ 20 | USE [master] 21 | GO 22 | CREATE LOGIN [DenyPrincipal] WITH PASSWORD=N'deny', DEFAULT_DATABASE=[master], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF 23 | GO 24 | GRANT CONNECT SQL TO [DenyPrincipal] 25 | ALTER LOGIN [DenyPrincipal] ENABLE 26 | GO 27 | USE securitydemo 28 | GO 29 | CREATE USER [DenyPrincipal] FOR LOGIN [DenyPrincipal] 30 | GO 31 | 32 | 33 | 34 | CREATE TABLE dbo.DenyTable ( 35 | id int IDENTITY(1,1) NOT NULL PRIMARY KEY, 36 | text1 VARCHAR(100) 37 | ) 38 | GO 39 | INSERT INTO DenyTable (text1) VALUES ('test') 40 | GO 3 41 | GO 42 | 43 | create view dbo.DenyTableview with schemabinding as 44 | select selectview = text1 from dbo.DenyTable 45 | go 46 | 47 | 48 | grant select on dbo.DenyTableview to [DenyPrincipal] 49 | go 50 | 51 | deny select on dbo.DenyTable to [DenyPrincipal] 52 | go 53 | 54 | create proc dbo.DenyTablesproc as 55 | begin 56 | select execsproc = text1 57 | from dbo.DenyTable 58 | end 59 | GO 60 | 61 | grant execute on dbo.DenyTablesproc to [DenyPrincipal] 62 | GO 63 | 64 | deny select to [DenyPrincipal] --on the entire database! 65 | go 66 | 67 | revoke select to [DenyPrincipal] 68 | go 69 | 70 | 71 | create proc dbo.DenyTablesproc_adhoc 72 | as 73 | begin 74 | declare @sql nvarchar(1000) 75 | select @sql = 'select execsproc_adhoc = text1 from dbo.DenyTable' 76 | exec sp_executesql @SQL 77 | end 78 | go 79 | grant execute on dbo.DenyTablesproc_adhoc to [DenyPrincipal] 80 | GO 81 | 82 | 83 | CREATE FUNCTION dbo.DenyFunc () 84 | RETURNS TABLE 85 | AS RETURN 86 | SELECT EXECFUNC = TEXT1 87 | FROM dbo.DenyTable; 88 | GO 89 | GRANT SELECT ON dbo.DenyFunc TO [DenyPrincipal]; 90 | -------------------------------------------------------------------------------- /lab - security p2.sql: -------------------------------------------------------------------------------- 1 | 2 | --change to text mode 3 | --Log in with denyprincipal 4 | 5 | use securitydemo 6 | go 7 | select * from dbo.DenyTable 8 | go 9 | select * from dbo.DenyTableview 10 | go 11 | exec dbo.DenyTablesproc 12 | go 13 | exec dbo.DenyTablesproc_adhoc 14 | go 15 | select * from dbo.DenyFunc() 16 | 17 | /* 18 | 19 | SELECT ORIGINAL_LOGIN(), CURRENT_USER; 20 | 21 | */ -------------------------------------------------------------------------------- /lab - sequence permissions.sql: -------------------------------------------------------------------------------- 1 | --Demonstrate sequence permissions 2 | use w 3 | go 4 | 5 | 6 | USE [master] 7 | GO 8 | CREATE LOGIN [testseq] WITH PASSWORD=N'test', DEFAULT_DATABASE=[master], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF 9 | GO 10 | Use w 11 | create user testseq for login testseq 12 | GO 13 | 14 | create schema test authorization dbo 15 | go 16 | CREATE SEQUENCE Test.CountBy1 17 | START WITH 1 18 | INCREMENT BY 1 ; 19 | GO 20 | 21 | EXECUTE AS USER = 'testseq'; 22 | SELECT NEXT VALUE FOR Test.CountBy1; --FAILS 23 | REVERT 24 | GO 25 | GRANT UPDATE ON OBJECT::Test.CountBy1 to testSeq 26 | GO 27 | EXECUTE AS USER = 'testseq'; 28 | SELECT NEXT VALUE FOR Test.CountBy1; --SUCCEEDS 29 | REVERT 30 | GO 31 | --cleanup 32 | REVOKE UPDATE ON Test.CountBy1 to testSeq 33 | DROP SEQUENCE Test.CountBy1 34 | -------------------------------------------------------------------------------- /lab - time zone.sql: -------------------------------------------------------------------------------- 1 | --Strategies for converting time zones on past/future datasets for SQL2016+ 2 | --For = '1/1/2009' 17 | AND DimDate.CalendarDate < '1/1/2020' 18 | ORDER BY DT 19 | 20 | --Get Server's local time zone from the regionalization settings (does not work in Azure SQL DB) 21 | DECLARE @TimeZone VARCHAR(50) 22 | EXEC MASTER.dbo.xp_regread 'HKEY_LOCAL_MACHINE', 23 | 'SYSTEM\CurrentControlSet\Control\TimeZoneInformation', 24 | 'TimeZoneKeyName',@TimeZone OUT 25 | SELECT @TimeZone, DATENAME(TZ , SYSDATETIMEOFFSET()) 26 | 27 | SELECT * FROM sys.time_zone_info WHERE name = @TimeZone 28 | 29 | --Pretend that audit_created below is a UTC date that needs to be converted to the local timezone for display. 30 | --Between Nov-March, 'Bad Strategy' below is wrong for historical dates between March-Nov. 31 | --Between March-Nov, 'Bad Strategy' below is wrong for historical dates between Nov-March. 32 | SELECT 33 | UTCDate = audit_created 34 | , BadStrategy = DATEADD(second, DATEDIFF(second, GETUTCDATE(), GETDATE()), audit_created ) --Don't use! 35 | , CorrectStrategy = audit_created AT TIME ZONE 'UTC' AT TIME ZONE @TimeZone --SQL 2016+ only 36 | , WRONG = CASE WHEN convert(varchar(19), DATEADD(second, DATEDIFF(second, GETUTCDATE(), GETDATE()), audit_created )) = convert(varchar(19), audit_created AT TIME ZONE 'UTC' AT TIME ZONE @TimeZone) THEN 0 ELSE 1 END 37 | FROM #audit_created 38 | GO -------------------------------------------------------------------------------- /lab - translate syntax.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | declare @replacement varchar(30) 4 | --The second and third arguments must contain an equal number of characters. 5 | --So unlike replace, you can't replace ' ' with '' 6 | --The characters can be in any order. 7 | 8 | 9 | select @replacement = 'abcdef' 10 | select @replacement 11 | , [T] = TRANSLATE(@replacement, 'abcdef', '123456') 12 | , [R] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(@replacement,'a', '1'),'b', '2'),'c', '3'),'d', '4'),'e', '5'),'f', '6') 13 | 14 | select @replacement = '@@data"$%' 15 | select @replacement 16 | , [T] = TRANSLATE (@replacement, '/"_@$%', '_____!') 17 | , [R] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(@replacement,'/', '_'),'"', '_'), '@', '_'), '$', '_'), '%', '!'); 18 | 19 | select @replacement = '(OlympicsAreGreat)' 20 | select @replacement 21 | , [T] = TRANSLATE (@replacement, 'Olympic()','DadJoke[!') 22 | , [R] = REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(@replacement,'O', 'D'),'l', 'a'), 'y', 'd'), 'm', 'J'), 'p', 'o'),'i', 'k'),'c', 'e'), '(', '['), ')', '!') 23 | 24 | -------------------------------------------------------------------------------- /last known DBCC CHECKDB.sql: -------------------------------------------------------------------------------- 1 | EXEC sp_MSforeachdb ' 2 | --Table variable to capture the DBCC DBINFO output, look for the field we want in each database output 3 | DECLARE @DBCC_DBINFO TABLE (ParentObject VARCHAR(255) NOT NULL, [Object] VARCHAR(255) NOT NULL, [Field] VARCHAR(255) NOT NULL 4 | INDEX idx_dbinfo_field CLUSTERED --just this line is SQL 2014+ only 5 | , [Value] VARCHAR(255)); 6 | INSERT INTO @DBCC_DBINFO EXECUTE ("DBCC DBINFO ([?]) WITH TABLERESULTS"); 7 | SELECT DISTINCT ''?'', [Value] FROM @DBCC_DBINFO WHERE Field = ''dbi_dbccLastKnownGood'';'; 8 | -------------------------------------------------------------------------------- /limit number of error log files.sql: -------------------------------------------------------------------------------- 1 | --Equivalent viewing the number of Error logs retained in SSMS, right-click on SQL Server Logs, configure 2 | --"Limit the number of error logs files before they are recycled" 3 | --By default, 6. Max value of 99. 4 | --The registry key NumErrorLogs does not exist until the default value of 6 is overridden, so NULL = default. 5 | --Actual path of key something like: Computer\HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQLServer, but can be searched 6 | 7 | declare @numlogs int 8 | EXEC master.dbo.xp_instance_regread @rootkey= N'HKEY_LOCAL_MACHINE' 9 | , @key = N'Software\Microsoft\MSSQLServer\MSSQLServer' 10 | , @value_name = N'NumErrorLogs' 11 | , @value = @numlogs OUTPUT 12 | select @numlogs 13 | 14 | 15 | 16 | --Configure SQL Server Error log to keep 50 logs, as opposed to the default 6 17 | --EXEC master.dbo.xp_instance_regwrite N'HKEY_LOCAL_MACHINE', N'Software\Microsoft\MSSQLServer\MSSQLServer', N'NumErrorLogs', REG_DWORD, 50 18 | GO 19 | -------------------------------------------------------------------------------- /lock Pages in Memory LPIM.sql: -------------------------------------------------------------------------------- 1 | --SQL 2016 SP1 or above only!!! 2 | use TempDB; 3 | GO 4 | select sql_memory_model_Desc 5 | --Conventional = Lock Pages in Memory privilege is not granted 6 | --LOCK_PAGES = Lock Pages in Memory privilege is granted 7 | --LARGE_PAGES = Lock Pages in Memory privilege is granted in Enterprise mode with Trace Flag 834 ON 8 | from sys.dm_os_sys_info; 9 | 10 | 11 | /* 12 | If LPIM is enabled, MAX SERVER MEMORY MUST BE CONFIGURED conservatively. It WILL cause problems with Windows if memory is exhausted. 13 | FWIW LPIM not used in Azure, because they do not have issues with working set trim problems. 14 | */ -------------------------------------------------------------------------------- /log_reuse_wait.sql: -------------------------------------------------------------------------------- 1 | 2 | --Reference: https://docs.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-databases-transact-sql?view=sql-server-2017 3 | select name, log_reuse_wait ,log_reuse_wait_desc 4 | from sys.databases 5 | GO 6 | 7 | /* If REPLICATION, consider the following but be aware of the consequences: 8 | --Safe for snapshot-only replication, not that safe for other replication if you're not synced 9 | 10 | USE [database] 11 | GO 12 | EXEC sp_repldone @xactid = NULL, @xact_segno = NULL, @numtrans = 0, @time = 0, @reset = 1 13 | GO 14 | CHECKPOINT --must follow with checkpoint! 15 | GO 16 | 17 | 18 | -------------------------------------------------------------------------------- /memory clerks.sql: -------------------------------------------------------------------------------- 1 | --SQL and Azure SQL DB 2 | --Typically not a lot of actionable items to find here. 3 | 4 | use tempdb; 5 | 6 | SELECT 7 | name 8 | , type 9 | , Memory_in_use = SUM(pages_kb + virtual_memory_committed_kb + awe_allocated_kb) --Only really applicable SQL 2012+, pre-2012 use single_pages_kb 10 | FROM sys.dm_os_memory_clerks 11 | GROUP BY name, type 12 | ORDER BY memory_in_use desc 13 | 14 | /* 15 | Some example types: 16 | MEMORYCLERK_SQLQERESERVATIONS - Memory Grant allocations, see toolbox\dm_exec_query_memory_grants.sql 17 | MEMORYCLERK_SQLBUFFERPOOL - the buffer pool across the instance 18 | OBJECTSTORE_LOCK_MANAGER - the lock manager (concurrency), fixed 19 | XTP - hekaton 20 | CACHESTORE_OBJCP - the procedure cache for procs, functions, triggers 21 | CACHESTORE_PHDR - cached algebrizer trees for views, constraints and defaults 22 | CACHESTORE_SQLCP - other batches not in the above, including ad hoc statements 23 | USERSTORE_SCHEMAMGR - schema management especially temporary objects, note a memory leak in unpatched SQL2012/2014 KB3032476 24 | MEMORYCLERK_SQLLOGPOOL - used tlog activities including AG change-capturing activities on the primary replicas, redo manager activities on the secondary availability replicas 25 | MEMORYCLERK_XE - XEvent session management 26 | 27 | */ 28 | 29 | --https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-memory-clerks-transact-sql?view=sql-server-ver15 -------------------------------------------------------------------------------- /memory-optimized table memory usage.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | OBJECT_NAME(ms.object_id) 3 | , memory_allocated_for_table_and_indexes_GB = ((memory_allocated_for_table_kb + memory_allocated_for_indexes_kb)/1024./1024.) 4 | , memory_allocated_for_table_GB = memory_allocated_for_table_kb/1024./1024. 5 | , memory_allocated_for_indexes_GB = memory_allocated_for_indexes_kb/1024./1024. 6 | , Row_Count = p.rows 7 | FROM sys.dm_db_xtp_table_memory_stats ms 8 | INNER JOIN sys.partitions p 9 | ON p.object_id = ms.object_id 10 | WHERE 11 | p.index_id <= 1 12 | --and ms.object_id = object_id('dbo.memopt1') 13 | -------------------------------------------------------------------------------- /modules vs routines.sql: -------------------------------------------------------------------------------- 1 | use [AdventureWorks2012] 2 | go 3 | 4 | select s.name +'.' + o.name, o.type_desc, m.definition, LEN(m.definition) 5 | from sys.sql_modules m 6 | inner join sys.objects o on m.object_id = o.object_id 7 | inner join sys.schemas s on s.schema_id = o.schema_id 8 | where definition like '%GroupName%' 9 | order by o.name 10 | 11 | /* 12 | 13 | --Don't use INFORMATION_SCHEMA regardless, but the below is an example of how it could return incomplete results compared to the superior sys.sql_modules. 14 | 15 | select r.SPECIFIC_SCHEMA + '.' + r.SPECIFIC_NAME, r.routine_type, r.ROUTINE_DEFINITION, LEN(routine_definition) 16 | from INFORMATION_SCHEMA.routines r 17 | where ROUTINE_DEFINITION like '%GroupName%' 18 | order by ROUTINE_NAME 19 | 20 | */ -------------------------------------------------------------------------------- /move system databases.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | USE master; 4 | GO 5 | alter database msdb modify file ( NAME = MSDBData , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\MSDBData.mdf') 6 | go 7 | alter database msdb modify file ( NAME = MSDBlog , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\MSDBLog.ldf') 8 | go 9 | alter database model modify file ( NAME = modeldev, FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\model.mdf') 10 | go 11 | alter database model modify file ( NAME = modellog, FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\modellog.ldf') 12 | go 13 | ALTER DATABASE tempdb MODIFY FILE (NAME = tempdev, FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\tempdb.mdf'); 14 | go 15 | ALTER DATABASE tempdb MODIFY FILE (NAME = templog, FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\templog.ldf'); 16 | go 17 | alter database reportserver MODIFY file ( NAME = ReportServer , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\ReportServer.mdf') 18 | go 19 | alter database reportserver MODIFY file ( NAME = ReportServer_log , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\ReportServer_Log.ldf') 20 | go 21 | alter database ReportServerTempDB MODIFY file ( NAME = ReportServerTempDB , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\ReportServerTempDB.mdf') 22 | go 23 | alter database ReportServerTempDB MODIFY file ( NAME = ReportServerTempDB_log , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\ReportServerTempDB_log.LDF') 24 | go 25 | alter database SSISDB MODIFY file ( NAME = data , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\SSISDB.mdf') 26 | go 27 | alter database SSISDB MODIFY file ( NAME = log , FILENAME = 'F:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\SSISDB.LDF') 28 | 29 | /* 30 | master startup parameters 31 | 32 | --old 33 | -dC:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\DATA\master.mdf;-eC:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Log\ERRORLOG;-lC:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\DATA\mastlog.ldf 34 | 35 | --new 36 | -dF:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\master.mdf;-eC:\Program Files\Microsoft SQL Server\MSSQL10_50.MSSQLSERVER\MSSQL\Log\ERRORLOG;-lF:\Program Files\Microsoft SQL Server\MSSQL13.SQL2K16\MSSQL\data2\mastlog.ldf 37 | */ 38 | 39 | select name, physical_name, state_desc from sys.master_files -------------------------------------------------------------------------------- /multiserver space in files.sql: -------------------------------------------------------------------------------- 1 | use master 2 | go 3 | exec sp_MSforeachdb 'use [?]; 4 | select * from ( 5 | SELECT 6 | ''DatabaseName_____________'' = d.name 7 | , Recovery = d.recovery_model_desc 8 | , ''DatabaseFileName_______'' = df.name 9 | , ''Location_______________________________________________________________________'' = df.physical_name 10 | , df.File_ID 11 | , FileSizeMB = CAST(size/128.0 as Decimal(9,2)) 12 | , SpaceUsedMB = CAST(CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 13 | , AvailableMB = CAST(size/128.0 - CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 14 | , ''Free%'' = CAST((((size/128.0) - (CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0)) / NULLIF(size/128.0, 0) ) * 100. as Decimal(9,2)) 15 | FROM sys.database_files df 16 | cross apply sys.databases d 17 | where d.database_id = DB_ID() 18 | and size > 0 19 | and d.name not like ''%test%'' 20 | ) x 21 | where [Free%] <= 10. 22 | and filesizemb > 500. 23 | and availableMB < 500. 24 | 25 | ' -------------------------------------------------------------------------------- /my_permissions.sql: -------------------------------------------------------------------------------- 1 | --Script to test permissions 2 | 3 | select suser_sname(); --you 4 | execute as login = 'domain\test.user' --login name you want to test 5 | 6 | use master 7 | select suser_sname(), * from sys.fn_my_permissions (null, 'DATABASE') --https://msdn.microsoft.com/en-us/library/ms176097.aspx 8 | 9 | REVERT; --undo the execute as 10 | select suser_sname(); --you 11 | 12 | 13 | 14 | --See also script to check security group membership - toolbox\security group members.sql -------------------------------------------------------------------------------- /oledb providers.sql: -------------------------------------------------------------------------------- 1 | --OLE DB Providers registered to this server 2 | EXEC master.dbo.sp_MSset_oledb_prop 3 | 4 | --DB2OLEDB = Microsoft-provided as400 OLE DB driver for AS400 as found in SQL Server version-specific feature pack 5 | --Microsoft.ACE.OLEDB.12.0 = MS Access 6 | 7 | --Default list 8 | 9 | --SQLOLEDB 10 | --SQLNCLI11 11 | --Microsoft.ACE.OLEDB.12.0 12 | --ADsDSOObject 13 | --SSISOLEDB 14 | --Search.CollatorDSO 15 | --MSDASQL 16 | --MSOLAP 17 | --MSDAOSP -------------------------------------------------------------------------------- /open transactions.sql: -------------------------------------------------------------------------------- 1 | --http://www.sqlskills.com/blogs/paul/script-open-transactions-with-text-and-plans/ 2 | 3 | 4 | SELECT 5 | [s_tst].[session_id], 6 | [s_es].[login_name] AS [Login Name], 7 | DB_NAME (s_tdt.database_id) AS [Database], 8 | [s_tdt].[database_transaction_begin_time] AS [Begin Time], 9 | [s_tdt].[database_transaction_log_bytes_used] AS [Log Bytes], 10 | [s_tdt].[database_transaction_log_bytes_reserved] AS [Log Rsvd], 11 | [s_est].text AS [Last T-SQL Text], 12 | [s_eqp].[query_plan] AS [Last Plan] 13 | FROM 14 | sys.dm_tran_database_transactions [s_tdt] 15 | JOIN 16 | sys.dm_tran_session_transactions [s_tst] 17 | ON 18 | [s_tst].[transaction_id] = [s_tdt].[transaction_id] 19 | JOIN 20 | sys.[dm_exec_sessions] [s_es] 21 | ON 22 | [s_es].[session_id] = [s_tst].[session_id] 23 | JOIN 24 | sys.dm_exec_connections [s_ec] 25 | ON 26 | [s_ec].[session_id] = [s_tst].[session_id] 27 | LEFT OUTER JOIN 28 | sys.dm_exec_requests [s_er] 29 | ON 30 | [s_er].[session_id] = [s_tst].[session_id] 31 | CROSS APPLY 32 | sys.dm_exec_sql_text ([s_ec].[most_recent_sql_handle]) AS [s_est] 33 | OUTER APPLY 34 | sys.dm_exec_query_plan ([s_er].[plan_handle]) AS [s_eqp] 35 | ORDER BY 36 | [Begin Time] ASC; 37 | GO 38 | -------------------------------------------------------------------------------- /optimize for ad hoc workloads.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/optimize for ad hoc workloads.sql -------------------------------------------------------------------------------- /page life expectancy Azure SQL DB.sql: -------------------------------------------------------------------------------- 1 | --For Azure SQL DB only 2 | 3 | select 4 | p.InstanceName 5 | , c.Version 6 | , Min_Server_Mem_MB = c.[Min_Server_Mem_MB] 7 | , Max_Server_Mem_MB = c.[Max_Server_Mem_MB] --2147483647 means unlimited, just like it shows in SSMS 8 | , p.PLE_s --300s is only an arbitrary rule for smaller memory servers (<16gb), for larger, it should be baselined and measured. 9 | , 'Churn (MB/s)' = cast((p.Total_Server_Mem_GB*1024.)/NULLIF(p.PLE_s,0) as decimal(19,2)) 10 | , p.Total_Server_Mem_GB --May be more or less than memory_in_use 11 | , p.Target_Server_Mem_GB 12 | , Target_vs_Total = CASE WHEN p.Total_Server_Mem_GB < p.Target_Server_Mem_GB 13 | THEN 'Target >= Total. SQL wants more memory than it has, or is building up to that point.' 14 | ELSE 'Total >= Target. SQL has enough memory to do what it wants.' END 15 | from 16 | ( 17 | select 18 | InstanceName = @@SERVERNAME 19 | , Target_Server_Mem_GB = case counter_name when 'Target Server Memory (KB)' then convert(decimal(19,3), cntr_value/1024./1024.) end 20 | , Total_Server_Mem_GB = case counter_name when 'Total Server Memory (KB)' then convert(decimal(19,3), cntr_value/1024./1024.) end 21 | , PLE_s = case when object_name like '%Buffer Manager%' and counter_name = 'Page life expectancy' then cntr_value end --This only looks at the overall buffer pool, not individual NUMA nodes. https://www.sqlskills.com/blogs/paul/page-life-expectancy-isnt-what-you-think/ 22 | ,* 23 | from sys.dm_os_performance_counters 24 | WHERE counter_name in ('Target Server Memory (KB)','Total Server Memory (KB)') OR (object_name like '%Buffer Manager%' and Counter_name = 'Page life expectancy') 25 | --This only looks at one NUMA node. https://www.sqlskills.com/blogs/paul/page-life-expectancy-isnt-what-you-think/ 26 | ) as p 27 | inner join (select 'InstanceName' = @@SERVERNAME, Version = @@VERSION, 28 | min_Server_Mem_MB = max(case when name = 'min server memory (MB)' then convert(bigint, value_in_use) end) , 29 | max_Server_Mem_MB = max(case when name = 'max server memory (MB)' then convert(bigint, value_in_use) end) 30 | from sys.configurations) as c on p.InstanceName = c.InstanceName 31 | -------------------------------------------------------------------------------- /page life expectancy.sql: -------------------------------------------------------------------------------- 1 | use TempDB; 2 | GO 3 | select 4 | [InstanceName] = @@SERVERNAME 5 | , [SQL_Version] = @@VERSION 6 | , si.* 7 | , i.[Min_Server_Mem_MB] 8 | , i.[Max_Server_Mem_MB] --2147483647 means unlimited, just like it shows in SSMS 9 | , p.PLE_s --300s is only an arbitrary rule for smaller memory servers (<16gb), for larger, it should be baselined and measured. 10 | , 'Churn (MB/s)' = cast((si.Total_Server_Memory_GB*1024.)/NULLIF(p.PLE_s,0) as decimal(19,2)) 11 | , OS_Available_physical_mem_GB = (SELECT cast(available_physical_memory_kb / 1024. / 1024. as decimal(19,2)) from sys.dm_os_sys_memory) 12 | , SQL_Physical_memory_in_use_GB = (SELECT cast(physical_memory_in_use_kb / 1024. / 1024. as decimal(19,2)) from sys.dm_os_process_memory) 13 | , si.Total_Server_Memory_GB , si.Target_Server_Memory_GB 14 | , Target_vs_Total = CASE WHEN si.Total_Server_Memory_GB < si.Target_Server_Memory_GB 15 | THEN 'Target >= Total. SQL wants more memory than it has, or is building up to that point.' 16 | ELSE 'Total >= Target. SQL has enough memory to do what it wants.' END 17 | FROM (SELECT InstanceName = @@SERVERNAME 18 | , PLE_s = case when object_name like '%Buffer Manager%' and counter_name = 'Page life expectancy' then cntr_value end --This only looks at the overall buffer pool, not individual NUMA nodes. https://www.sqlskills.com/blogs/paul/page-life-expectancy-isnt-what-you-think/ 19 | FROM sys.dm_os_performance_counters 20 | WHERE object_name like '%Buffer Manager%' and counter_name = 'Page life expectancy' 21 | ) as p 22 | cross apply (SELECT Min_Server_Mem_MB = max(case when name = 'min server memory (MB)' then convert(bigint, value_in_use) end) 23 | , Max_Server_Mem_MB = max(case when name = 'max server memory (MB)' then convert(bigint, value_in_use) end) 24 | FROM sys.configurations 25 | ) as i 26 | cross apply (SELECT sqlserver_start_time 27 | , OS_Physical_Mem_MB = convert(bigint, physical_memory_kb /1024.) 28 | , Total_Server_Memory_GB = convert(decimal(19,3), committed_kb / 1024. / 1024.) 29 | , Target_Server_Memory_GB = convert(decimal(19,3), committed_target_kb / 1024. / 1024.) 30 | FROM sys.dm_os_sys_info 31 | ) as si; 32 | 33 | --For LPIM check toolbox\lock Pages in Memory LPIM.sql 34 | --For CPU infor check toolbox\cpu utilization.sql -------------------------------------------------------------------------------- /permissions for readonly admin accounts.sql: -------------------------------------------------------------------------------- 1 | USE master 2 | --create login [Sparkhound\DB Administrators-Readonly] FROM WINDOWS 3 | GO 4 | GRANT ALTER TRACE TO [Sparkhound\DB Administrators-Readonly] 5 | GRANT ALTER ANY EVENT SESSION TO [Sparkhound\DB Administrators-Readonly] 6 | GRANT VIEW ANY DEFINITION TO [Sparkhound\DB Administrators-Readonly] 7 | GRANT SHOWPLAN TO [Sparkhound\DB Administrators-Readonly] 8 | 9 | --For DMV's 10 | GRANT VIEW SERVER STATE TO [Sparkhound\DB Administrators-Readonly] 11 | exec sp_msforeachdb 'use [?]; GRANT VIEW DATABASE STATE TO [Sparkhound\DB Administrators-Readonly]' 12 | 13 | 14 | --For the Error Log 15 | --However, securityadmin is virtually the same as sysadmin, since you can make yourself a sysadmin. 16 | --https://social.msdn.microsoft.com/Forums/en-US/11efe32b-1af5-44da-bbf7-e183e5341f2c/grant-access-to-view-sql-server-logs-from-sql-server-management-studio?forum=sqlsecurity' 17 | --There is no permission short of securityadmin/sysadmin and xp_readerrorlog that allows SSMS to view logs. 18 | ALTER SERVER ROLE [securityadmin] ADD MEMBER [Sparkhound\DB Administrators-Readonly] 19 | GRANT EXECUTE ON xp_readerrorlog TO [Sparkhound\DB Administrators-Readonly]; 20 | GO 21 | 22 | --For SQL Agent jobs 23 | USE msdb 24 | GO 25 | CREATE USER [Sparkhound\DB Administrators-Readonly] FOR LOGIN [Sparkhound\DB Administrators-Readonly] 26 | ALTER ROLE [SQLAgentReaderRole] ADD MEMBER [Sparkhound\DB Administrators-Readonly] 27 | ALTER ROLE [SQLAgentOperatorRole] ADD MEMBER [Sparkhound\DB Administrators-Readonly] 28 | GRANT SELECT TO [Sparkhound\DB Administrators-Readonly] 29 | GRANT EXECUTE ON sysmail_help_profile_sp TO [Sparkhound\DB Administrators-Readonly] -------------------------------------------------------------------------------- /query store troubleshooting.sql: -------------------------------------------------------------------------------- 1 | --IF Query Store is found to be in an ERROR STATE 2 | --This is uncommon 3 | SELECT actual_state_desc, desired_state_desc, current_storage_size_mb, 4 | [max_storage_size_mb], readonly_reason, [interval_length_minutes], 5 | stale_query_threshold_days, size_based_cleanup_mode_desc, 6 | query_capture_mode_desc 7 | FROM sys.database_query_store_options; 8 | 9 | 10 | /* 11 | --First, try manually setting Query_Store to Read_Write then waiting for the flush to pass. It may show READ/WRITE again only until it fails again. 12 | 13 | ALTER DATABASE [WhateverDB] 14 | SET QUERY_STORE (OPERATION_MODE = READ_WRITE); 15 | 16 | SELECT actual_state_desc, desired_state_desc, current_storage_size_mb, 17 | [max_storage_size_mb], readonly_reason, [interval_length_minutes], 18 | stale_query_threshold_days, size_based_cleanup_mode_desc, 19 | query_capture_mode_desc 20 | FROM sys.database_query_store_options; 21 | 22 | 23 | 24 | --If that doesn't cause READ/WRITE mode to stick, then we'll have to wipe the Query Store. 25 | 26 | USE WhateverDB 27 | --Unfortunately clearing the Query Store is necessary. 28 | ALTER DATABASE [WhateverDB] SET QUERY_STORE CLEAR; 29 | --Disable the query store 30 | ALTER DATABASE [WhateverDB] SET QUERY_STORE = OFF; 31 | 32 | 33 | exec dbo.sp_query_store_consistency_check; 34 | 35 | ALTER DATABASE [WhateverDB] 36 | SET QUERY_STORE (OPERATION_MODE = READ_WRITE); 37 | 38 | SELECT actual_state_desc, desired_state_desc, current_storage_size_mb, 39 | [max_storage_size_mb], readonly_reason, [interval_length_minutes], 40 | stale_query_threshold_days, size_based_cleanup_mode_desc, 41 | query_capture_mode_desc 42 | FROM sys.database_query_store_options; 43 | 44 | 45 | 46 | */ 47 | 48 | --FROM https://docs.microsoft.com/en-us/sql/relational-databases/performance/best-practice-with-the-query-store?view=sql-server-ver15 49 | 50 | -------------------------------------------------------------------------------- /refresh view metadata.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | PRINT N'Refreshing views.'; 4 | 5 | DECLARE @TSQL nvarchar(4000) 6 | DECLARE RefreshViewMetadata CURSOR FAST_FORWARD 7 | FOR 8 | select TSQL = 'exec sp_refreshview N''' +s.name + '.' + o.name + '''' 9 | from sys.views o 10 | inner join sys.schemas s on o.schema_id = s.schema_id 11 | inner join sys.sql_modules m on m.object_id = o.object_id 12 | where o.type_desc = 'view' 13 | and o.is_ms_shipped = 0 14 | and m.definition not like '%schemabinding%' 15 | order by s.name, o.name 16 | OPEN RefreshViewMetadata 17 | FETCH NEXT FROM RefreshViewMetadata INTO @TSQL 18 | WHILE @@FETCH_STATUS = 0 19 | BEGIN 20 | --print @TSQL; 21 | BEGIN TRY 22 | BEGIN TRAN vwRefresh 23 | exec sp_executesql @TSQL; 24 | COMMIT TRAN vwRefresh 25 | END TRY 26 | BEGIN CATCH 27 | print @TSQL 28 | print ERROR_MESSAGE() 29 | ROLLBACK TRAN vwRefresh 30 | END CATCH 31 | FETCH NEXT FROM RefreshViewMetadata INTO @TSQL 32 | END 33 | CLOSE RefreshViewMetadata; 34 | DEALLOCATE RefreshViewMetadata; 35 | -------------------------------------------------------------------------------- /rename SQL instance.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Rename SQL Server Instance 3 | 4 | When the physical server is renamed, this script must be executed to rename the SQL Server Instance. 5 | 6 | Change the Variables to the correct information, and then execute the script. 7 | The results will show the old setting and the changed setting so you can verify the 8 | 9 | */ 10 | 11 | SELECT SERVERPROPERTY('MachineName') [Machine Name],@@SERVERNAME [SQL Instance] 12 | 13 | ----------------------------------------------------------------------------- 14 | --- PLEASE CHANGE THE FOLLOWING VARIABLES BEFORE EXECUTION 15 | DECLARE @Current_SQLServer_Name sysname = '' --- Old Physical name or Current SQL Instance Name 16 | DECLARE @New_SQLServer_Name sysname = '' -- New Physical name or New SQL Instance Name 17 | 18 | 19 | ----------------------------------------------------------------------------- 20 | --EXEC sp_dropserver @server=@Current_SQLServer_Name 21 | --EXEC sp_addserver @server=@New_SQLServer_Name, @Local='local' 22 | --GO 23 | 24 | 25 | SELECT SERVERPROPERTY('MachineName') [Machine Name],@@SERVERNAME [SQL Instance] 26 | -------------------------------------------------------------------------------- /reports catalog metadata.sql: -------------------------------------------------------------------------------- 1 | 2 | ;WITH cteCatalog ([Path], Name, [Description], CreationDate, ModifiedDate, AverageRowCount, AverageDuration_sec, XML, row) 3 | as ( 4 | SELECT 5 | [Path], Name, [Description], CreationDate, ModifiedDate, AverageRowCount = AVG(e.[RowCount]) OVER (Partition By [Path], Name) 6 | , AverageDuration_sec = (AVG(e.timedataretrieval + e.timeprocessing + e.TimeRendering) OVER (Partition By [Path], Name))/1000.0 7 | , XML = CONVERT(XML,C.Parameter) 8 | , Row = ROW_NUMBER () OVER (Partition BY Path, Name order by path, name) 9 | from Catalog c 10 | inner join ExecutionLog2 e 11 | on e.ReportPath = c.Path 12 | ) 13 | SELECT distinct 14 | [Path], Name, [Description], CreationDate, ModifiedDate, AverageRowCount 15 | , AverageDuration_sec = round(AverageDuration_sec,2) 16 | , Param_Name = ParamXML.value('Name[1]', 'VARCHAR(250)') 17 | , Param_DataType = ParamXML.value('Type[1]', 'VARCHAR(250)') 18 | , Param_Nullable = ParamXML.value('Nullable[1]', 'VARCHAR(250)') 19 | , Param_AllowBlank = ParamXML.value('AllowBlank[1]', 'VARCHAR(250)') 20 | , Param_MultiValue = ParamXML.value('MultiValue[1]', 'VARCHAR(250)') 21 | , Param_UsedInQuery = ParamXML.value('UsedInQuery[1]', 'VARCHAR(250)') 22 | , Param_Prompt = ParamXML.value('Prompt[1]', 'VARCHAR(250)') 23 | , Param_DynamicPrompt = ParamXML.value('DynamicPrompt[1]', 'VARCHAR(250)') 24 | , Param_PromptUser = ParamXML.value('PromptUser[1]', 'VARCHAR(250)') 25 | , Param_State = ParamXML.value('State[1]', 'VARCHAR(250)') 26 | FROM cteCatalog c 27 | CROSS APPLY c.XML.nodes('//Parameters/Parameter') p ( ParamXML ) 28 | WHERE c.row = 1 29 | order by AverageDuration_sec desc -------------------------------------------------------------------------------- /restore.sql: -------------------------------------------------------------------------------- 1 | --Basic Restore template 2 | 3 | USE [master] 4 | GO 5 | 6 | --RESTORE FILELISTONLY FROM DISK = N'C:\BACKUPS\whatever.bak' 7 | go 8 | ALTER DATABASE whatever SET SINGLE_USER WITH ROLLBACK IMMEDIATE 9 | GO 10 | RESTORE DATABASE [whatever] FROM DISK = N'C:\backups\whatever.bak' 11 | WITH FILE = 1, NOUNLOAD, REPLACE, STATS = 5, 12 | MOVE 'whatever' to 'e:\SQLDATA\whatever.mdf', 13 | MOVE 'whatever_log' to 'e:\SQLDATA\whatever.ldf' 14 | GO 15 | ALTER DATABASE whatever SET MULTI_USER 16 | GO 17 | 18 | /* 19 | Next: 20 | Check compatibility mode and other database options? 21 | Enable Query Store? 22 | fix orpahned sids.sql 23 | */ 24 | 25 | 26 | /* 27 | --Example: Restore WWI sample DB 28 | USE [master] 29 | --https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/wide-world-importers 30 | ALTER DATABASE [WideWorldImporters] SET SINGLE_USER WITH ROLLBACK IMMEDIATE 31 | GO 32 | RESTORE DATABASE [WideWorldImporters] 33 | FROM DISK = N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\Backup\WideWorldImporters-Full.bak' 34 | WITH FILE = 1 35 | , MOVE N'WWI_Primary' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters.mdf' 36 | , MOVE N'WWI_UserData' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters_UserData.ndf' 37 | , MOVE N'WWI_Log' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters.ldf' 38 | , MOVE N'WWI_InMemory_Data_1' TO N'E:\Program Files\Microsoft SQL Server\MSSQL15.SQL2K19\MSSQL\DATA\WideWorldImporters_InMemory_Data_1', NOUNLOAD, STATS = 5 39 | GO 40 | ALTER DATABASE [WideWorldImporters] SET MULTI_USER 41 | GO 42 | ALTER DATABASE [WideWorldImporters] SET COMPATIBILITY_LEVEL = 150 43 | GO 44 | 45 | --Azure VM 46 | Example: Restore WWI sample DB 47 | USE [master] 48 | --https://github.com/Microsoft/sql-server-samples/tree/master/samples/databases/wide-world-importers 49 | ALTER DATABASE [WideWorldImporters] SET SINGLE_USER WITH ROLLBACK IMMEDIATE 50 | GO 51 | RESTORE DATABASE [WideWorldImporters] 52 | FROM DISK = N'C:\Program Files\Microsoft SQL Server\MSSQL15.MSSQLSERVER\MSSQL\Backup\WideWorldImporters-Full.bak' 53 | WITH FILE = 1 54 | , MOVE N'WWI_Primary' TO N'f:\DATA\WideWorldImporters.mdf' 55 | , MOVE N'WWI_UserData' TO N'f:\DAta\WideWorldImporters_UserData.ndf' 56 | , MOVE N'WWI_Log' TO N'f:\Data\WideWorldImporters.ldf' 57 | , MOVE N'WWI_InMemory_Data_1' TO N'f:\DATA\WideWorldImporters_InMemory_Data_1', NOUNLOAD, STATS = 5 58 | GO 59 | ALTER DATABASE [WideWorldImporters] SET MULTI_USER 60 | GO 61 | ALTER DATABASE [WideWorldImporters] SET COMPATIBILITY_LEVEL = 150 62 | GO 63 | 64 | 65 | */ -------------------------------------------------------------------------------- /sas credential.sql: -------------------------------------------------------------------------------- 1 | --Create an SAS credential 2 | 3 | --A PS script to create the storage account and SAS keys is here: https://docs.microsoft.com/en-us/sql/relational-databases/backup-restore/sql-server-backup-to-url?view=sql-server-2017#Examples 4 | 5 | --But if the storage account already exists, how to get key using Azure Storage Explorer? 6 | --Click on "Get Shared Access Signature..." key for the container. Specify permissions and a far-off expiration date. 7 | --The Secret is the Query String minus the leading ? 8 | --From the Azure portal, use the Shared Access Signature page of the storage account, "Generate SAS and Connection string", then use the SAS Token minus the leading ?. 9 | 10 | --drop credential [https://container.blob.core.windows.net/folder] 11 | GO 12 | 13 | CREATE CREDENTIAL [https://container.blob.core.windows.net/folder] --No trailing /, folder name should be included, folder name must not include a hyphen. 14 | WITH IDENTITY='Shared Access Signature' 15 | , SECRET='sv=2018-03-28&ss=bfqt&srt=sco&sp=rwdlacup&se=2099-08-19T23:56:04Z&st=2019-08-19T15:56:04Z&spr=https&sig=ZWHPwhateverD'; --this is a sample only 16 | 17 | --IMPORTANT: Backup up this CREDENTIAL creation script here once created!! -------------------------------------------------------------------------------- /schema dependencies.sql: -------------------------------------------------------------------------------- 1 | 2 | SELECT DISTINCT 3 | referenced_database_name = db_name() 4 | , referencing_entity_name = s_ing.name + '.' + OBJECT_NAME(referencing_id) 5 | , referencing_type_desc = o_ing.type_desc + CASE WHEN cc.is_persisted = 1 THEN ' PERSISTED' ELSE '' END + CASE WHEN cc.column_id is not null THEN ' COMPUTED COLUMN' ELSE '' END 6 | , referencing_minor_object = case WHEN REFERENCING_CLASS_DESC = 'OBJECT_OR_COLUMN' THEN COALESCE(COL_NAME(referencing_id, referencing_minor_id), '(n/a)') 7 | WHEN REFERENCING_CLASS_DESC = 'INDEX' THEN i.name 8 | ELSE '(n/a)' 9 | END 10 | , referencing_class_desc = CASE WHEN referencing_class_desc = 'INDEX' and i.has_filter = 1 THEN 'FILTERED INDEX' ELSE referenced_class_desc END 11 | , referenced_class_desc 12 | , referenced_server_name = isnull(referenced_server_name, @@SERVERNAME) 13 | , referenced_database_name = isnull(referenced_database_name, db_name()) 14 | --, referenced_schema_name = isnull(referenced_schema_name, 'dbo') 15 | , referenced_entity_name = ISNULL(s_ed.name + '.','') + referenced_entity_name 16 | , referenced_type_desc = ISNULL(o_ed.type_desc, CASE WHEN sed.is_ambiguous = 1 THEN 'reference is ambiguous, resolved at runtime' ELSE 'remote object type not available' END) 17 | , referenced_column_name = COALESCE(COL_NAME(referenced_id, referenced_minor_id), '(n/a)') 18 | --, is_caller_dependent, is_ambiguous 19 | ,* 20 | FROM sys.sql_expression_dependencies AS sed 21 | INNER JOIN sys.objects AS o_ing ON sed.referencing_id = o_ing.object_id 22 | LEFT OUTER JOIN sys.objects AS o_ed ON sed.referenced_id = o_ed.object_id 23 | LEFT OUTER JOIN Sys.computed_columns AS cc on cc.object_id = o_ing.object_id and cc.column_id = sed.referencing_minor_id 24 | INNER JOIN sys.schemas s_ing on s_ing.schema_id = o_ing.schema_id 25 | LEFT OUTER JOIN sys.schemas s_ed on s_ed.schema_id = o_ed.schema_id 26 | LEFT OUTER JOIN sys.indexes i on i.index_id = sed.referencing_minor_id and sed.referencing_class_desc = 'INDEX' and i.object_id = o_ing.object_id 27 | --where s_ing.name + '.' + OBJECT_NAME(referencing_id) = 'dbo.SH_MSRS_REPORTTITLE' 28 | order by sed.referenced_entity_name, referencing_entity_name 29 | GO 30 | --reference: https://docs.microsoft.com/en-us/sql/relational-databases/system-catalog-views/sys-sql-expression-dependencies-transact-sql?view=sql-server-ver15 31 | --inspired by https://docs.microsoft.com/en-us/sql/relational-databases/stored-procedures/view-the-dependencies-of-a-stored-procedure?view=sql-server-ver15 32 | 33 | -------------------------------------------------------------------------------- /security check msdb role members.sql: -------------------------------------------------------------------------------- 1 | use msdb 2 | go 3 | select user_name = dpu.name, role_name = dpr.name 4 | from msdb.sys.database_role_members drm 5 | left outer join msdb.sys.database_principals dpu on dpu.principal_id = drm.member_principal_id 6 | left outer join msdb.sys.database_principals dpr on dpr.principal_id = drm.role_principal_id 7 | where dpr.name = 'SQLAgentOperatorRole' -------------------------------------------------------------------------------- /security check sysadmin members.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Individual users, not groups, have been added to the sysadmin server role. 3 | It is more desirable to have AD security groups, not individual accounts (even adm accounts) have access to the SQL Server via this role. 4 | Suggest creating a SQL DBA or DB Admins group for SQL Server admins in the organization instead. 5 | */ 6 | 7 | select sp.name, sr.name, * from sys.server_principals sp 8 | inner join sys.server_role_members srm on sp.principal_id = srm.member_principal_id 9 | inner join sys.server_principals sr on srm.role_principal_id = sr.principal_id 10 | where (sp.name = 'BUILTIN\Administrators' --This should not be there after SQL 2005 11 | or sp.type_desc = 'WINDOWS_LOGIN' or sp.type_desc = 'SQL_LOGIN') --ignores Security Groups, only Windows or SQL individual accounts 12 | and sr.name in ('sysadmin','securityadmin') --securityadmin should be guarded just as much as sysadmin 13 | and sp.name not like 'NT SERVICE\%' 14 | and sp.name not like 'NT AUTHORITY\%' 15 | and sp.principal_id > 1 --ignore the sa account 16 | --check for common naming conventions around service accounts 17 | and sp.name not like '%svc%' -------------------------------------------------------------------------------- /security group members.sql: -------------------------------------------------------------------------------- 1 | --Get members of a windows security group from within SQL 2 | --Returns members but NOT subgroups! There isn't a way in SQL to see subgroups. 3 | EXEC master..xp_logininfo 4 | @acctname = 'domain\groupname', 5 | @option = 'members' 6 | 7 | --Instead, use PowerShell: 8 | --Get-ADGroupMember -identity 'Development' -recursive | select name 9 | 10 | --or the windows plugin "Active Directory Users and Computers" -------------------------------------------------------------------------------- /services dmv.sql: -------------------------------------------------------------------------------- 1 | -- This works for SQL 2008 R2 SP1 and above only 2 | SELECT servicename -- Ex: SQL Server (SQL2K8R2) 3 | , startup_type_desc -- Manual, Automatic 4 | , status_desc -- Running, Stopped, etc. 5 | , process_id 6 | , last_startup_time -- datetime 7 | , service_account 8 | , filename 9 | , is_clustered -- Y/N 10 | , cluster_nodename 11 | FROM sys.dm_server_services 12 | 13 | --The Browser is NOT listed 14 | 15 | 16 | /* 17 | -- This works prior to SQL 2008 R2 SP1 18 | DECLARE @DBEngineLogin VARCHAR(100) 19 | DECLARE @AgentLogin VARCHAR(100) 20 | EXECUTE master.dbo.xp_instance_regread 21 | @rootkey = N'HKEY_LOCAL_MACHINE', 22 | @key = N'SYSTEM\CurrentControlSet\Services\MSSQLServer', 23 | @value_name = N'ObjectName', 24 | @value = @DBEngineLogin OUTPUT 25 | EXECUTE master.dbo.xp_instance_regread 26 | @rootkey = N'HKEY_LOCAL_MACHINE', 27 | @key = N'SYSTEM\CurrentControlSet\Services\SQLServerAgent', 28 | @value_name = N'ObjectName', 29 | @value = @AgentLogin OUTPUT 30 | SELECT [DBEngineLogin] = @DBEngineLogin, [AgentLogin] = @AgentLogin 31 | GO 32 |   33 | */ 34 | 35 | -------------------------------------------------------------------------------- /sessions and requests - SQL2000.sql: -------------------------------------------------------------------------------- 1 | use master 2 | go 3 | declare @sql_handle binary(20) 4 | select *, DBName = db_name(dbid) from sys.sysprocesses sysprc 5 | where spid <> @@SPID 6 | --and db_name(dbid) = 'LPMS_BE' 7 | order by blocked desc, spid asc 8 | 9 | declare cursyssysprocesses cursor fast_forward for 10 | select sql_handle from sys.sysprocesses 11 | where spid >= 50 12 | and sql_handle <> convert(binary(20), 0x0000000000000000000000000000000000000000)--status='runnable'; 13 | and spid <> @@SPID 14 | order by spid 15 | 16 | open cursyssysprocesses; 17 | fetch next from cursyssysprocesses into @sql_handle; 18 | while (@@FETCH_STATUS =0) 19 | BEGIN 20 | select 21 | spid 22 | , sql_handle 23 | , b.hostname 24 | , c.name 25 | , b.program_name 26 | , b.loginame 27 | , b.spid 28 | , getsql.text 29 | , getsql.objectid 30 | , DatabaseName = db_name(getsql.dbid) 31 | , getsql.dbid 32 | from sys.fn_get_sql(@sql_handle) getsql 33 | cross join sysprocesses b 34 | inner join sys.sysdatabases c on c.dbid=b.dbid 35 | where b.sql_handle =@sql_handle 36 | fetch next from cursyssysprocesses into @sql_handle; 37 | END 38 | close cursyssysprocesses 39 | deallocate cursyssysprocesses 40 | -------------------------------------------------------------------------------- /sessions and requests - dump to table.sql: -------------------------------------------------------------------------------- 1 | -- See also toolbox\blocked processes report xevents.sql to capture blocking chains using the blocked process report. 2 | 3 | USE DBALogging 4 | GO 5 | DROP TABLE 6 | --this syntax is 2016 only 7 | IF EXISTS 8 | [dbo].[SessionsAndRequestsLog] 9 | go 10 | 11 | CREATE TABLE [dbo].[SessionsAndRequestsLog]( 12 | [timestamp] [datetimeoffset](7) NOT NULL, 13 | [session_id] [smallint] NOT NULL, 14 | [host_name] [nvarchar](256) NULL, 15 | [program_name] [nvarchar](256) NULL, 16 | [session_status] [nvarchar](60) NULL, 17 | [request_status] [nvarchar](60) NULL, 18 | [request_id] [int] NULL, 19 | [blocking_these] [varchar](1000) NULL, 20 | [blocked_by] [smallint] NULL, 21 | [wait_type] [nvarchar](120) NULL, 22 | [wait_resource] [nvarchar](120) NULL, 23 | [last_wait_type] [nvarchar](120) NULL, 24 | [DBName] [nvarchar](128) NULL, 25 | [objectid] [int] NULL, 26 | [command] [nvarchar](32) NULL, 27 | [login_time] [datetime] NOT NULL, 28 | [login_name] [nvarchar](256) NULL, 29 | [client_interface_name] [nvarchar](64) NULL, 30 | [request_start_time] [datetime] NULL, 31 | [tot_time_s] [decimal](19, 2) NULL, 32 | [wait_time_s] [decimal](19, 2) NULL, 33 | [cpu_time_s] [decimal](19, 2) NULL, 34 | [reads] [bigint] NULL, 35 | [writes] [bigint] NULL, 36 | [logical_reads] [bigint] NULL, 37 | [percent_complete] [decimal](9, 4) NULL, 38 | [estimated_remaining_time_HHMMSS] [varchar](31) NULL, 39 | [offsettext] [nvarchar](max) NULL, 40 | [Input_Buffer_Text_Event_Info] [nvarchar](max) NULL, 41 | [Input_Buffer_Event_Type] [nvarchar](256) NULL, 42 | [cacheobjtype] [nvarchar](35) NULL, 43 | [QueryPlan] [xml] NULL, 44 | [request_transaction_isolation_level] [varchar](15) NULL, 45 | [session_transaction_isolation_level] [varchar](15) NULL, 46 | [plan_handle] [varbinary](64) NULL, 47 | [plan_execution_count] [bigint] NULL, 48 | [plan_total_worker_time_s] [numeric](30, 11) NULL, 49 | [plan_last_worker_time_s] [numeric](30, 11) NULL, 50 | [plan_total_elapsed_time_s] [numeric](30, 11) NULL, 51 | [plan_last_elapsed_time_s] [numeric](30, 11) NULL, 52 | [plan_total_physical_reads] [bigint] NULL, 53 | [plan_total_logical_writes] [bigint] NULL, 54 | [plan_total_logical_reads] [bigint] NULL, 55 | [Governor_Group_Name] [sysname] NULL, 56 | [Governor_Group_ID] [int] NULL, 57 | [Governor_Pool_Name] [sysname] NULL, 58 | [Governor_Pool_ID] [int] NULL, 59 | [EndPointName] [sysname] NULL, 60 | [Protocol] [nvarchar](120) NULL, 61 | [Outstanding_TempDB_Session_Internal_Alloc_pages] [bigint] NULL, 62 | [Outstanding_TempDB_Session_User_Alloc_pages] [bigint] NULL, 63 | [Outstanding_TempDB_Task_Internal_Alloc_pages] [bigint] NULL, 64 | [Outstanding_TempDB_Task_User_Alloc_pages] [bigint] NULL, 65 | [total_rows] [bigint] NULL, 66 | [last_rows] [bigint] NULL 67 | ) ON [PRIMARY] 68 | GO 69 | 70 | -------------------------------------------------------------------------------- /sessions and requests blocking chain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/sessions and requests blocking chain.jpg -------------------------------------------------------------------------------- /sharepoint databases.ps1: -------------------------------------------------------------------------------- 1 | #Must run this via RDP into the App or Web SharePoint server in the farm, maybe not the same as the SQL Server 2 | #Launch the "SharePoint 201x Management Shell" as Administrator 3 | #This script overwrites C:\Content_Inventory.csv 4 | 5 | Get-SPDatabase | Sort-Object Name | Select-Object Name, Type, @{Label ="Size in MB"; Expression = {$_.disksizerequired/1024/1024}} | Export-CSV -Path C:\Content_Inventory.csv -NoTypeInformation 6 | 7 | #Below only works with PS Remoting is enabled. 8 | $SharePoint_App_or_WFE_servername = "sh-sp2013-app1.sparkhound.com" 9 | Invoke-Command -script { 10 | Get-SPDatabase | Sort-Object Name | Select-Object Name, Type, @{Label ="Size in MB"; Expression = {$_.disksizerequired/1024/1024}} | Export-CSV -Path C:\Content_Inventory.csv -NoTypeInformation 11 | } -ComputerName $SharePoint_App_or_WFE_servername 12 | 13 | 14 | <# 15 | #Use new sharepoint modules 16 | #This section just a stub 17 | 18 | #Get latest gallery 19 | Install-Module -Name PowerShellGet -Force 20 | #Get get SharePoint modules 21 | Install-Module -Name SharePointDSC -Force -AllowClobber 22 | Import-Module -Name SharePointDSC -Force 23 | Get-DscResource -Module SharePointDsc 24 | #> 25 | 26 | 27 | -------------------------------------------------------------------------------- /size database files.sql: -------------------------------------------------------------------------------- 1 | -- File names and paths for TempDB and all user databases in instance (Query 21) (Database Filenames and Paths) 2 | SELECT DB_NAME([database_id]) AS [Database Name], 3 | [file_id], name, physical_name, type_desc, state_desc, 4 | is_percent_growth, growth, 5 | CONVERT(bigint, growth/128.0) AS [Growth in MB], 6 | CONVERT(bigint, size/128.0) AS [Total Size in MB] 7 | FROM sys.master_files WITH (NOLOCK) 8 | ORDER BY DB_NAME([database_id]) OPTION (RECOMPILE); 9 | 10 | -- Things to look at: 11 | -- Are data files and log files on different drives? 12 | -- Is everything on the C: drive? 13 | -- Is TempDB on dedicated drives? 14 | -- Is there only one TempDB data file? 15 | -- Are all of the TempDB data files the same size? 16 | -- Are there multiple data files for user databases? 17 | -- Is percent growth enabled for any files (which is bad)? -------------------------------------------------------------------------------- /size in memory.sql: -------------------------------------------------------------------------------- 1 | use w 2 | go 3 | select top 100 objectname, indexname, [object_id], index_id, Buffer_MB = SUM(Buffer_MB) 4 | from 5 | ( 6 | SELECT 7 | objectname = obj.[name], 8 | indexname = i.[name], 9 | obj.[object_id], 10 | i.[index_id], 11 | i.[type_desc], 12 | --count(*)AS Buffered_Page_Count , 13 | count(*) * 8192.0 / (1024.0 * 1024.0) as Buffer_MB 14 | -- ,obj.name ,obj.index_id, i.[name] 15 | FROM sys.dm_os_buffer_descriptors AS bd 16 | INNER JOIN 17 | ( 18 | SELECT object_name(object_id) AS name 19 | ,index_id ,allocation_unit_id, object_id 20 | FROM sys.allocation_units AS au 21 | INNER JOIN sys.partitions AS p 22 | ON au.container_id = p.hobt_id 23 | ) AS obj 24 | ON bd.allocation_unit_id = obj.allocation_unit_id 25 | LEFT OUTER JOIN sys.indexes i on i.object_id = obj.object_id AND i.index_id = obj.index_id 26 | 27 | WHERE database_id = db_id() 28 | 29 | 30 | GROUP BY obj.name, obj.index_id , i.[name],i.[type_desc], obj.[object_id], i.index_id 31 | ) x 32 | GROUP BY objectname, indexname, [type_desc], [object_id], index_id 33 | order by Buffer_MB desc 34 | -------------------------------------------------------------------------------- /size.sql: -------------------------------------------------------------------------------- 1 | --Misc queries on database/table size 2 | 3 | --Database files on disk 4 | --Does not work in Azure SQL DB 5 | select d.name, Current_Size_mb = (size*8.)/1024., * from sys.master_files mf 6 | inner join sys.databases d 7 | on mf.database_id = d.database_id 8 | order by Current_Size_mb desc 9 | GO 10 | 11 | --Size of files in current database 12 | select df.name, Initial_Size_mb = df.size *8./2014. 13 | from sys.database_files df 14 | order by df.name 15 | 16 | --Tables in current database 17 | --return the number of rows in a table without doing a scan 18 | select tablename 19 | , total_size_mb = SUM(sizemb) -- size of all objects combined 20 | , row_count = sum(case when index_id <= 1 THEN row_count ELSE 0 END) --get rowcount in table from heap or clustered index only 21 | from ( 22 | select 23 | SizeMb= (p.reserved_page_count*8.)/1024. 24 | , tablename = '[' + s.name + '].[' + o.name + ']' 25 | , indexname = i.name, p.row_count 26 | , i.index_id 27 | from sys.dm_db_partition_stats p 28 | inner join sys.objects o on p.object_id = o.object_id 29 | inner join sys.schemas s on s.schema_id = o.schema_id 30 | inner join sys.indexes i on i.object_id = o.object_id and i.index_id = p.index_id 31 | where o.is_ms_shipped = 0 32 | ) x 33 | group by tablename 34 | order by total_size_mb desc 35 | 36 | --Index/partitions in current database 37 | select 38 | tablename = '[' + s.name + '].[' + o.name + ']' 39 | , indexname = i.name 40 | , i.index_id 41 | , SizeMb= (p.reserved_page_count*8.)/1024. 42 | , p.in_row_data_page_count 43 | , p.in_row_used_page_count 44 | , p.reserved_page_count 45 | , p.lob_used_page_count 46 | , p.lob_reserved_page_count 47 | , p.row_overflow_used_page_count 48 | , p.row_overflow_reserved_page_count 49 | , p.used_page_count 50 | , p.reserved_page_count 51 | , p.row_count 52 | , pr.data_compression_desc 53 | , p.partition_number 54 | , rebuildcompress = 55 | CASE WHEN pr.data_compression_desc = 'columnstore' THEN NULL ELSE 56 | 'ALTER INDEX [' + i.name + '] ON [' + s.name + '].[' + o.name + '] REBUILD ' + 57 | CASE WHEN MAX(p.partition_number) OVER (PARTITION by i.name) > 1 THEN 58 | 'PARTITION = ' + cast(p.partition_number as varchar(5)) ELSE '' END + 59 | ' WITH (SORT_IN_TEMPDB = ON 60 | , DATA_COMPRESSION = PAGE) ' + CHAR(10) + CHAR(13) 61 | END 62 | from sys.dm_db_partition_stats p 63 | inner join sys.partitions pr on p.partition_id = pr.partition_id 64 | inner join sys.objects o on p.object_id = o.object_id 65 | inner join sys.schemas s on s.schema_id = o.schema_id 66 | left outer join sys.indexes i on i.object_id = o.object_id and i.index_id = p.index_id 67 | WHERE o.is_ms_shipped = 0 68 | order by SizeMb desc 69 | 70 | -------------------------------------------------------------------------------- /sp_repldone.sql: -------------------------------------------------------------------------------- 1 | select name, log_reuse_wait, log_reuse_Wait_desc from sys.databases where name = 'DataWarehouse' 2 | go 3 | use DataWarehouse 4 | go 5 | 6 | SELECT 7 | 'DatabaseName_____________' = d.name 8 | , Recovery = d.recovery_model_desc 9 | , 'DatabaseFileName_______' = df.name 10 | , 'Location_______________________________________________________________________' = df.physical_name 11 | , df.File_ID 12 | , FileSizeMB = CAST(size/128.0 as Decimal(9,2)) 13 | , SpaceUsedMB = CAST(CAST(FILEPROPERTY(df.name, 'SpaceUsed') AS int)/128. as Decimal(9,2)) 14 | , AvailableMB = CAST(size/128.0 - CAST(FILEPROPERTY(df.name, 'SpaceUsed') AS int)/128.0 as Decimal(9,2)) 15 | , 'Free%' = CAST((((size/128.0) - (CAST(FILEPROPERTY(df.name, 'SpaceUsed') AS int)/128.0)) / (size/128.0) ) * 100. as Decimal(9,2)) 16 | FROM sys.database_files df 17 | cross apply sys.databases d 18 | where d.database_id = DB_ID() 19 | 20 | /* 21 | --If log_reuse_wait_desc = 'REPLICATION' 22 | --and the log file is growing unchecked 23 | --and you are aware of the consequences to replication with sp_repldone (especially to transactional/merge repl), then proceed. 24 | --Should be just fine if only snapshot repl. 25 | --This marks all pending transactions as having been Replicated. It could intentionally be used to tell replication to skip over transactions, not send them to subscribers. 26 | 27 | use DataWarehouse 28 | go 29 | exec sp_repldone null, null, 0,0,1 30 | GO 31 | CHECKPOINT 32 | GO 33 | 34 | */ -------------------------------------------------------------------------------- /space in files.sql: -------------------------------------------------------------------------------- 1 | --Observe space in data and log files 2 | --Pregenerated scripts to shrink and/or grow files. Do not shrink unless an unusual/emergency situation has created an overgrown log file. 3 | --See also "vlfs analysis.sql" 4 | 5 | DECLARE @TempTable TABLE 6 | ( DatabaseName varchar(128) 7 | ,recovery_model_desc varchar(50) 8 | ,DatabaseFileName varchar(500) 9 | ,FileLocation varchar(500) 10 | ,FileId int 11 | ,type_desc varchar(50) 12 | ,FileSizeMB decimal(19,2) 13 | ,SpaceUsedMB decimal(19,2) 14 | ,AvailableMB decimal(19,2) 15 | ,FreePercent decimal(19,2) 16 | ,shrinkTSQL nvarchar(4000) 17 | ,growTSQL nvarchar(4000) 18 | ) 19 | 20 | --Optional filter for small/unused databases at bottom 21 | 22 | INSERT INTO @TempTable 23 | exec sp_MSforeachdb 'use [?]; 24 | select * 25 | , shrinkTSQL = ''USE [?]; 26 | DBCC SHRINKFILE (N''''''+ DatabaseFileName_______ COLLATE SQL_Latin1_General_CP1_CI_AS +'''''' , 0, TRUNCATEONLY)'' 27 | , growTSQL = ''ALTER DATABASE [''+DatabaseName_____________ COLLATE SQL_Latin1_General_CP1_CI_AS+''] 28 | MODIFY FILE ( NAME = N''''''+DatabaseFileName_______ COLLATE SQL_Latin1_General_CP1_CI_AS +'''''' 29 | , '' + CASE WHEN FileSizeMB < 100 THEN ''SIZE = ''+STR(FileSizeMB+64) 30 | WHEN FileSizeMB < 1000 THEN ''SIZE = ''+STR(FileSizeMB+256) 31 | WHEN FileSizeMB < 10000 THEN ''SIZE = ''+STR(FileSizeMB+1024) 32 | WHEN FileSizeMB < 40000 THEN ''SIZE = ''+STR(FileSizeMB+4092) 33 | ELSE ''SIZE = ''+STR(FileSizeMB+(FileSizeMB*.05)) END +''MB )'' 34 | FROM ( 35 | SELECT 36 | ''DatabaseName_____________'' = d.name 37 | , Recovery = d.recovery_model_desc 38 | , ''DatabaseFileName_______'' = df.name 39 | , Location = df.physical_name 40 | , File_ID = df.File_ID 41 | , df.type_desc 42 | , FileSizeMB = CAST(size/128.0 as Decimal(9,2)) 43 | , SpaceUsedMB = CAST(CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 44 | , AvailableMB = CAST(size/128.0 - CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0 as Decimal(9,2)) 45 | , FreePercent = CAST((((size/128.0) - (CAST(FILEPROPERTY(df.name, ''SpaceUsed'') AS int)/128.0)) / (size/128.0) ) * 100. as Decimal(9,2)) 46 | 47 | FROM sys.database_files df 48 | CROSS APPLY sys.databases d 49 | WHERE d.database_id = DB_ID() 50 | AND d.is_read_only = 0 51 | AND df.size > 0) x; 52 | ' 53 | 54 | SELECT 55 | * 56 | FROM @TempTable 57 | --where [FreePercent] < 5 and FileSizeMB > 6 --Optional filter for small/unused databases 58 | 59 | ORDER BY FreePercent asc, DatabaseName, FileId 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /ssis events.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/ssis events.sql -------------------------------------------------------------------------------- /ssisdb execution query.sql: -------------------------------------------------------------------------------- 1 | --Sometimes it is easier to query the SSISDB directly instead of using SSMS reports. 2 | --Can also be the foundation for custom dashboards, error notifications, etc. 3 | USE SSISDB 4 | 5 | --Example 1: Query for recent errors 6 | SELECT 7 | om.message 8 | , om.message_time 9 | , em.execution_path 10 | , em.package_name 11 | , em.event_name 12 | , em.message_source_name 13 | , o.start_time 14 | , o.end_time 15 | , o.caller_name 16 | , o.server_name 17 | , o.machine_name 18 | , * 19 | FROM [SSISDB].internal.event_messages em 20 | inner join ssisdb.internal.operations o on em.operation_id =o.operation_id 21 | inner join ssisdb.internal.operation_messages om on om.operation_message_id = em.event_message_id 22 | WHERE om.message_time >= dateadd(day, -1, sysdatetime()) 23 | and event_name = 'OnError' 24 | ORDER BY om.message_time desc, o.operation_id, em.event_message_id; 25 | 26 | --Example 2: Query for rowcounts from a specific data flow over time 27 | SELECT 28 | om.message 29 | , om.message_time 30 | , em.execution_path 31 | , em.package_name 32 | , em.event_name 33 | , em.message_source_name 34 | , o.start_time 35 | , o.end_time 36 | , o.caller_name 37 | , o.server_name 38 | , o.machine_name 39 | , * 40 | FROM [SSISDB].internal.event_messages em 41 | inner join ssisdb.internal.operations o on em.operation_id =o.operation_id 42 | inner join ssisdb.internal.operation_messages om on om.operation_message_id = em.event_message_id 43 | WHERE om.message_time >= dateadd(day, -3, sysdatetime()) 44 | and event_name in ('OnInformation', 'OnProgress') 45 | and message like '%DataFlowName%rows%' 46 | and execution_path = '\Package\Package\Sequence\Data Flow\' 47 | ORDER BY om.message_time desc, o.operation_id, em.event_message_id; -------------------------------------------------------------------------------- /ssisdb package history.sql: -------------------------------------------------------------------------------- 1 | --Since in SQL 2016 individual packages can be deployed without deploying the entire project, 2 | --a query more complex than you'd think is necessary to figure out when an individual package actually changed. 3 | --Deploying a single package in a project updates the project's deployment metadata. 4 | 5 | select 6 | project_name = pr.name 7 | , package_name = pa.name 8 | , project_version_lsn 9 | , first_version_created_time = min(ov.created_time) 10 | 11 | FROM ( select version_guid, project_id, name, project_version_lsn = min(project_version_lsn) 12 | from ssisdb.internal.packages 13 | group by project_id, name, version_guid 14 | ) pa 15 | inner join [SSISDB].[internal].object_versions ov on ov.object_version_lsn = pa.project_version_lsn 16 | inner join [SSISDB].[internal].projects pr on pr.project_id = pa.project_id 17 | where 18 | pr.name = 'DataWarehouse' 19 | and pa.name = 'DimCurrency.dtsx' 20 | group by ov.object_version_lsn, pa.name, pa.project_id, project_version_lsn, pr.name 21 | ORDER BY pr.name, pa.name, project_version_lsn desc; -------------------------------------------------------------------------------- /ssrs Subscription catalog inventory.sql: -------------------------------------------------------------------------------- 1 | use reportserver 2 | go 3 | SELECT Catalog.Name AS ReportName 4 | ,'http://serverwhatever/Reports/Pages/Report.aspx?ItemPath=' + Catalog.Path + '&SelectedTabId=PropertiesTab&SelectedSubTabId=SubscriptionsTab' AS ReportSubscriptionMgrUrl 5 | ,Subscriptions.Description AS SubscriptionDescription 6 | ,Subscriptions.LastStatus 7 | ,Subscriptions.LastRunTime 8 | ,'Next Run Date' = CASE next_run_date 9 | WHEN 0 THEN null 10 | ELSE 11 | substring(convert(varchar(15),next_run_date),1,4) + '/' + 12 | substring(convert(varchar(15),next_run_date),5,2) + '/' + 13 | substring(convert(varchar(15),next_run_date),7,2) 14 | END 15 | , 'Next Run Time' = isnull(CASE len(next_run_time) 16 | WHEN 3 THEN cast('00:0' 17 | + Left(right(next_run_time,3),1) 18 | +':' + right(next_run_time,2) as char (8)) 19 | WHEN 4 THEN cast('00:' 20 | + Left(right(next_run_time,4),2) 21 | +':' + right(next_run_time,2) as char (8)) 22 | WHEN 5 THEN cast('0' + Left(right(next_run_time,5),1) 23 | +':' + Left(right(next_run_time,4),2) 24 | +':' + right(next_run_time,2) as char (8)) 25 | WHEN 6 THEN cast(Left(right(next_run_time,6),2) 26 | +':' + Left(right(next_run_time,4),2) 27 | +':' + right(next_run_time,2) as char (8)) 28 | END,'NA') 29 | ,Subscriptions.Parameters 30 | ,[ExtensionSettings] 31 | ,ISNULL( 32 | Convert(XML,[ExtensionSettings]).value('(//ParameterValue/Value[../Name="TO"])[1]','nvarchar(50)') 33 | ,Convert(XML,[ExtensionSettings]).value('(//ParameterValue/Value[../Name="PATH"])[1]','nvarchar(150)') 34 | ) as [To] 35 | , 36 | ISNULL( 37 | Convert(XML,[ExtensionSettings]).value('(//ParameterValue/Value[../Name="RenderFormat"])[1]','nvarchar(50)') 38 | , Convert(XML,[ExtensionSettings]).value('(//ParameterValue/Value[../Name="RENDER_FORMAT"])[1]','nvarchar(50)') 39 | ) as [Render Format] 40 | ,Convert(XML,[ExtensionSettings]).value('(//ParameterValue/Value[../Name="Subject"])[1]','nvarchar(150)') as [Subject] 41 | ,* 42 | FROM [dbo].[ReportSchedule] 43 | INNER JOIN [dbo].[Schedule] 44 | ON ReportSchedule.ScheduleID = Schedule.ScheduleID 45 | INNER JOIN [dbo].[Catalog] 46 | ON ReportSchedule.ReportID = Catalog.ItemID 47 | INNER JOIN [dbo].[Subscriptions] 48 | ON ReportSchedule.SubscriptionID = Subscriptions.SubscriptionID 49 | INNER JOIN [dbo].[Users] 50 | ON Subscriptions.OwnerID = Users.UserID 51 | INNER JOIN msdb.dbo.sysjobs J ON Convert(nvarchar(128),[ReportSchedule].ScheduleID) = J.name 52 | INNER JOIN msdb.dbo.sysjobschedules JS ON J.job_id = JS.job_id 53 | 54 | order by schedule.lastruntime desc -------------------------------------------------------------------------------- /ssrs Subscription failure emails.sql: -------------------------------------------------------------------------------- 1 | 2 | --TODO: Change sp_send_dbmail info at bottom 3 | 4 | IF EXISTS ( 5 | 6 | --Run this query standalone if desired 7 | select 8 | InstanceName 9 | , ReportPath 10 | , ReportName = c.Name 11 | , RequestType 12 | , TimeStart 13 | , TimeEnd 14 | , Status 15 | , LastStatus 16 | , SubscriptionDescription = s.Description 17 | --, * 18 | from ReportServer.dbo.executionlog2 el 19 | inner join ReportServer.dbo.Catalog c 20 | on el.ReportPath = c.Path 21 | inner join ReportServer.dbo.ReportSchedule a 22 | on c.ItemID = a.ReportID 23 | inner join ReportServer.dbo.subscriptions s 24 | on s.Report_OID = c.ItemID 25 | and a.SubscriptionID = s.SubscriptionID 26 | where requesttype = 'subscription' 27 | and status <> 'rsSuccess' 28 | and (LastRunTime > dateadd(d, -1, getdate()) and TimeEnd > dateadd(d, -1, getdate())) 29 | 30 | ) 31 | BEGIN 32 | 33 | declare @tsql nvarchar(4000) = ' 34 | select 35 | '''' 36 | , '''' 37 | , '''' 38 | , '''' 39 | , '''' 40 | , SubscriptionDescription = ''
'' + InstanceName + '''' + ReportPath+ '''' + convert(varchar(100), TimeStart) + '''' + Status+ '''' + LastStatus + '''' + s.Description + ''
'' 41 | --, * 42 | from ReportServer_GP2k8r2.dbo.executionlog2 el 43 | inner join ReportServer_GP2k8r2.dbo.Catalog c 44 | on el.ReportPath = c.Path 45 | inner join ReportServer_GP2k8r2.dbo.ReportSchedule a 46 | on c.ItemID = a.ReportID 47 | inner join ReportServer_GP2k8r2.dbo.subscriptions s 48 | on s.Report_OID = c.ItemID 49 | and a.SubscriptionID = s.SubscriptionID 50 | where requesttype = ''subscription'' 51 | and status <> ''rsSuccess'' 52 | and (LastRunTime > dateadd(d, -1, getdate()) and TimeEnd > dateadd(d, -1, getdate())) 53 | order by el.timestart desc' 54 | 55 | --Send email 56 | 57 | exec msdb.dbo.sp_send_dbmail @profile_name = 'sparkhound', @recipients = 'dbadministrators@sparkhound.com' 58 | , @from_address = 'ReportServer_GP2k8R2@sparkhound.com', @reply_to = 'dbadministrators@sparkhound.com' 59 | , @subject = 'Failed SSRS subscriptions Report', @query = @tsql, @query_result_header = 0, @body_format ='html', @execute_query_database = 'msdb' 60 | 61 | END -------------------------------------------------------------------------------- /ssrs Subscription start jobs.sql: -------------------------------------------------------------------------------- 1 | select 'exec sp_start_job @job_name = ''' + cast(j.name as varchar(40)) + '''' , s.Description, s.LastRunTime, s.LastStatus, s.EventType, j.description 2 | from msdb.dbo.sysjobs j 3 | inner join msdb.dbo.sysjobsteps js on js.job_id = j.job_id 4 | inner join [ReportServer].[dbo].[Subscriptions] s on js.command like '%' + cast(s.subscriptionid as varchar(40)) + '%' 5 | where j.description = 'This job is owned by a report server process. Modifying this job could result in database incompatibilities. Use Report Manager or Management Studio to update this job.' 6 | -------------------------------------------------------------------------------- /ssrs subscription failure logs.sql: -------------------------------------------------------------------------------- 1 | ----SSRS logs for subscription failures 2 | --1. Check Windows Application Event Log 3 | 4 | --2. Check ReportServer.dbo.ExecutionLog3 5 | select top 1000 * from ReportServer.dbo.ExecutionLog3 where status <> 'rsSuccess' order by timestart desc 6 | 7 | --3. Check SSRS trace Log Files on the server 8 | --https://docs.microsoft.com/en-us/sql/reporting-services/troubleshooting/troubleshoot-reporting-services-subscriptions-and-delivery?view=sql-server-2017 9 | --C:\Program Files\Microsoft SQL Server\instance.mssqlserver\Reporting Services\LogFiles\ReportServerService_.log 10 | --C:\Program Files\Microsoft SQL Server\instance.mssqlserver\Reporting Services\LogFiles\Microsoft.ReportingServices.Portal.WebHost_.log 11 | --or 12 | --C:\Program Files\Microsoft SQL Server Reporting Services\SSRS\LogFiles 13 | 14 | --5. Query below for subscription statuses: 15 | use ReportServer 16 | go 17 | select 18 | ReportName = c.[Name] 19 | , c.Path 20 | , s.LastStatus 21 | , el.RequestType 22 | , s.LastRunTime 23 | , el.TimeStart 24 | , el.TimeEnd 25 | , el.Status 26 | , s.Description 27 | , s.ExtensionSettings 28 | , el.ItemPath 29 | , [Owner] = u.UserName 30 | --, * 31 | FROM dbo.Catalog c 32 | INNER JOIN dbo.ReportSchedule a 33 | on c.ItemID = a.ReportID 34 | INNER JOIN dbo.subscriptions s 35 | on s.Report_OID = c.ItemID 36 | and a.SubscriptionID = s.SubscriptionID 37 | INNER JOIN dbo.[Users] u on s.[OwnerID] = u.[UserID] 38 | LEFT OUTER JOIN dbo.ExecutionLog3 el on el.itempath = c.path 39 | WHERE el.requesttype = 'subscription' 40 | AND laststatus not like '%0 errors.' 41 | ORDER BY s.LastRunTime desc, el.timestart desc 42 | -------------------------------------------------------------------------------- /ssrs subscriptions status.sql: -------------------------------------------------------------------------------- 1 | use ReportServer 2 | go 3 | SELECT 4 | ReportName = c.[Name] 5 | , c.Path 6 | , s.LastStatus 7 | , el.RequestType 8 | , s.LastRunTime 9 | , el.TimeStart 10 | , el.TimeEnd 11 | , el.Status 12 | , s.Description 13 | , s.ExtensionSettings 14 | , el.ItemPath 15 | , [Owner] = u.UserName 16 | --, * 17 | from dbo.Catalog c 18 | inner join dbo.ReportSchedule a 19 | on c.ItemID = a.ReportID 20 | inner join dbo.subscriptions s 21 | on s.Report_OID = c.ItemID 22 | and a.SubscriptionID = s.SubscriptionID 23 | INNER JOIN dbo.[Users] u on s.[OwnerID] = u.[UserID] 24 | left outer join dbo.ExecutionLog3 el on el.itempath = c.path 25 | where el.requesttype = 'subscription' 26 | 27 | 28 | order by s.LastRunTime desc, el.timestart desc 29 | -------------------------------------------------------------------------------- /sys.dm_io_virtual_file_stats.sql: -------------------------------------------------------------------------------- 1 | --http://blogs.msdn.com/b/dpless/archive/2010/12/01/leveraging-sys-dm-io-virtual-file-stats.aspx?Redirected=true 2 | 3 | select d.name, mf.physical_name 4 | , SizeMb = size_on_disk_bytes /1024./1024. 5 | --, mf.size*8./1024. --same 6 | , io_stall_read_s = fs.io_stall_read_ms /1000. 7 | , io_stall_write_s = fs.io_stall_write_ms /1000. 8 | , io_stall_s = fs.io_stall / 1000. 9 | from sys.dm_io_virtual_file_stats (null,null) fs 10 | inner join sys.master_files mf on fs.file_id = mf.file_id 11 | inner join sys.databases d on d.database_id = mf.database_id and fs.database_id = d.database_id 12 | 13 | order by io_stall desc -------------------------------------------------------------------------------- /sys_dm_os_ring_buffers.sql: -------------------------------------------------------------------------------- 1 | select 2 | [Time] = dateadd(ms, -1 * (dosi.cpu_ticks / (dosi.cpu_ticks/dosi.ms_ticks) - x.[timestamp]), SYSDATETIMEOFFSET ()) 3 | , CPU_SQL = record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') 4 | , CPU_Idle = record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') 5 | FROM (SELECT timestamp, convert(xml, record) AS record 6 | FROM sys.dm_os_ring_buffers 7 | WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR') AS x 8 | CROSS APPLY sys.dm_os_sys_info AS dosi 9 | ORDER by [Time] desc 10 | 11 | 12 | SELECT 13 | [Time] = dateadd(ms, -1 * (dosi.cpu_ticks / (dosi.cpu_ticks/dosi.ms_ticks) - x.[timestamp]), SYSDATETIMEOFFSET ()) 14 | , MemoryEvent = record.value('(./Record/ResourceMonitor/Notification)[1]', 'varchar(64)') 15 | , Target_Server_Mem_GB = convert(decimal(19,3), record.value('(./Record/MemoryNode/TargetMemory)[1]', 'bigint')/1024./1024.) 16 | , Physical_Server_Mem_GB = convert(decimal(19,3), record.value('(./Record/MemoryRecord/TotalPhysicalMemory)[1]', 'bigint')/1024./1024.) 17 | , Committed_Mem_GB = convert(decimal(19,3), record.value('(./Record/MemoryNode/CommittedMemory)[1]', 'bigint')/1024./1024.) 18 | , Shared_Mem_GB = convert(decimal(19,3), record.value('(./Record/MemoryNode/SharedMemory)[1]', 'bigint')/1024./1024.) 19 | , MemoryUtilization = record.value('(./Record/MemoryRecord/MemoryUtilization)[1]', 'bigint') 20 | , Available_Server_Mem_GB = convert(decimal(19,3), record.value('(./Record/MemoryRecord/AvailablePhysicalMemory)[1]', 'bigint')/1024./1024.) 21 | FROM (SELECT timestamp, convert(xml, record) AS record 22 | FROM sys.dm_os_ring_buffers 23 | WHERE ring_buffer_type = N'RING_BUFFER_RESOURCE_MONITOR') as x 24 | CROSS APPLY sys.dm_os_sys_info AS dosi 25 | ORDER BY [Time] desc 26 | 27 | /* 28 | 29 | select 30 | cpu_ticks , 31 | (cpu_ticks/ms_ticks), 32 | cpu_ticks / (cpu_ticks/ms_ticks) 33 | from sys.dm_os_sys_info 34 | 35 | */ -------------------------------------------------------------------------------- /tables without clustered indexes.sql: -------------------------------------------------------------------------------- 1 | --TODO Set current database context to desired database. 2 | 3 | --Doesn't work on 2000 databases or databases in 2000 compatability mode. 4 | 5 | select 6 | [Database Name] = db_name() 7 | , [Table Name] = '[' + s.name + '].[' + o.name + ']' 8 | , [rows] = sum(ps.row_count) 9 | , Already_Has_Identity_column = c.name 10 | , Already_Has_Unique_index = i.name 11 | from 12 | sys.objects o 13 | inner join 14 | sys.schemas s 15 | on o.schema_id = s.schema_id 16 | inner join 17 | sys.dm_db_partition_stats ps 18 | on ps.object_id = o.object_id 19 | and index_id = 0 20 | left outer join 21 | sys.columns c on c.object_id = o.object_id 22 | and c.is_identity = 1 23 | left outer join 24 | sys.indexes i on i.object_id = o.object_id 25 | and (i.is_unique = 1 or i.is_unique_constraint = 1 or i.is_primary_key = 1) 26 | left outer join 27 | sys.index_columns ic on ic.object_id = o.object_id and i.index_id = ic.index_id and ic.column_id = c.column_id 28 | WHERE 29 | o.name <> 'dtproperties' 30 | and is_ms_shipped = 0 31 | and o.type = 'u' 32 | group by s.name, o.name, i.name, c.name 33 | order by rows desc 34 | go 35 | 36 | /* 37 | 38 | create table noCL1 39 | (id int not null IDENTITY(1,1) 40 | ,whatever1 int ) 41 | 42 | 43 | create table noCL2 44 | (id int not null 45 | ,whatever1 int INDEX IDX_CL UNIQUE) 46 | 47 | */ -------------------------------------------------------------------------------- /tde setup.sql: -------------------------------------------------------------------------------- 1 | --1. see toolbox\lab - tde encryption workshop 2014.sql 2 | --2. Note you should also backup the Service Master Key!! 3 | --3. Generate three strong passwords. 4 | --4. See important TODO to copy these files OFFSITE. 5 | USE master 6 | go 7 | CREATE MASTER KEY ENCRYPTION BY PASSWORD = '$123testpassword-VM1'; --The master database master key password 1 8 | GO 9 | --Proof it is now there 10 | SELECT * FROM sys.symmetric_keys where name = '##MS_DatabaseMasterKey##' 11 | GO 12 | CREATE CERTIFICATE TDECert_enctest_2012 13 | WITH SUBJECT = 'Testing TDE Cert' 14 | , START_DATE = '7/30/2019' --Today's Date 15 | , EXPIRY_DATE = '7/30/2099'; --Future Date 16 | GO 17 | --Proof it is now there 18 | SELECT * FROM sys.certificates where name = 'TDECert_enctest_2012' 19 | GO 20 | 21 | --You must take backups for recovery of both the master DB master key and the cert. 22 | BACKUP MASTER KEY --each instance can have its own master key. 23 | TO FILE = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\data\SQLMasterKey_20120314.key' 24 | ENCRYPTION BY PASSWORD = '$123testpassword' --This password is for the new master key backup file. The Master Key's password above is different. Password 2 25 | 26 | BACKUP CERTIFICATE TDECert_enctest_2012 27 | TO FILE = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\data\TestingTDEcert2014.cer' 28 | WITH PRIVATE KEY ( FILE = 'E:\Program Files\Microsoft SQL Server\MSSQL14.SQL2K17\MSSQL\data\TestingTDEcert2014.key' , --This is a new key file for the cert backup, NOT the same as the key for the MASTER KEY backup above. 29 | ENCRYPTION BY PASSWORD = '$12345testpassword123' ); --This password is for the cert backup's key file. The Master Key's password above is different. Password 3 30 | GO 31 | 32 | USE [enctest] --In this case, enctest is the sample name of the database you want to encrypt with TDE. 33 | go 34 | --Create the key in the TDE database using the server cert we created earlier. 35 | CREATE DATABASE ENCRYPTION KEY 36 | WITH ALGORITHM = AES_256 37 | ENCRYPTION BY SERVER CERTIFICATE TDECert_enctest_2012 38 | GO 39 | --This actually enables TDE on the database. This begins an asynchronous encryption process, it will finish immediately and encrypt behind the scenes. 40 | ALTER DATABASE enctest SET ENCRYPTION ON 41 | GO 42 | 43 | --Proof it is encrypted. 44 | SELECT [name], is_encrypted FROM sys.databases order by is_encrypted desc, name asc 45 | GO 46 | --Then check "tde status.sql" for encryption progress. 47 | 48 | /* IMPORTANT: 49 | Copy the three passwords, MOVE the master key file, the cert backup, and the cert backup key, OFFSITE to a secure enterprise storage. 50 | DO NOT LOSE THEM. If you lose these files or passwords, you will NOT be able to restore/recover the database!!! 51 | */ -------------------------------------------------------------------------------- /tde status.sql: -------------------------------------------------------------------------------- 1 | 2 | /* The value 3 represents an encrypted state on the database and transaction logs. */ 3 | SELECT d.name 4 | , d.state_desc 5 | --, encryption_state_desc --SQL 2019+ only 6 | , encryption_state_desc = CASE dek.encryption_state WHEN 0 THEN 'No database encryption key present, no encryption' 7 | WHEN 1 THEN 'Unencrypted' 8 | WHEN 2 THEN 'Encryption in progress' 9 | WHEN 3 THEN 'Encrypted' 10 | WHEN 4 THEN 'Key change in progress' 11 | WHEN 5 THEN 'Decryption in progress' 12 | WHEN 6 THEN 'Protection change in progress (The certificate or asymmetric key that is encrypting the database encryption key is being changed.)' 13 | END 14 | , dek.percent_complete -- Percent complete of the database encryption state change. This will be 0 if there is no state change. 15 | --, encryption_scan_state --SQL 2019+ only 16 | --, encryption_scan_modify_date --SQL 2019+ 17 | , Cert_Name = c.name 18 | , c.pvt_key_encryption_type_desc 19 | , c.issuer_name 20 | , c.subject 21 | , dek.modify_date 22 | , c.pvt_key_last_backup_date 23 | , dek.encryptor_type 24 | , dek.key_algorithm 25 | , dek.key_length 26 | , c.start_date 27 | , c.expiry_date 28 | --, * 29 | FROM sys.databases as d 30 | left outer join sys.dm_database_encryption_keys as dek on dek.database_id = d.database_id 31 | left outer join master.sys.certificates as c on c.thumbprint = dek.encryptor_thumbprint 32 | where encryption_state is not null 33 | GO 34 | -------------------------------------------------------------------------------- /test user permissions.sql: -------------------------------------------------------------------------------- 1 | use w; 2 | 3 | --test my permissions 4 | select * from fn_my_permissions('dbo.ssisyslog', 'OBJECT') order by 1,2,3; 5 | 6 | --Find role membership 7 | exec xp_logininfo 'dbm\william.assaf', 'all' 8 | 9 | /* 10 | --test another's permissions 11 | 12 | execute as login = 'domain\username' --or 'sqlloginname'; 13 | select * from fn_my_permissions('dbo.ssisyslog', 'OBJECT') order by 1,2,3; 14 | REVERT; --VERY IMPORTANT, or you will continue to execute as. 15 | */ -------------------------------------------------------------------------------- /toolbox-stablestate.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/toolbox-stablestate.zip -------------------------------------------------------------------------------- /toolbox.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparkhoundSQL/sql-server-toolbox/51636a22fe7db848cc12473c8083e2753bb6ab93/toolbox.zip -------------------------------------------------------------------------------- /volume stats.sql: -------------------------------------------------------------------------------- 1 | 2 | 3 | select 4 | volume_letter = UPPER(vs.volume_mount_point) 5 | , volume_name = vs.logical_volume_name 6 | , file_system_type 7 | , drive_size_GB = MAX(CONVERT(decimal(19,2), vs.total_bytes/1024./1024./1024. )) 8 | , drive_free_space_GB = MAX(CONVERT(decimal(19,2), vs.available_bytes/1024./1024./1024. )) 9 | , drive_percent_free = MAX(CONVERT(decimal(5,2), vs.available_bytes * 100.0 / vs.total_bytes)) 10 | FROM 11 | sys.master_files AS f CROSS APPLY 12 | sys.dm_os_volume_stats(f.database_id, f.file_id) vs --only return volumes where there is database file (data or log) 13 | GROUP BY vs.volume_mount_point, vs.file_system_type, vs.logical_volume_name 14 | ORDER BY volume_letter 15 | 16 | --exec xp_fixeddrives 17 | 18 | 19 | /* 20 | 21 | E:\ NTFS 300.00 152.94 50.98 22 | F:\ NTFS 300.00 84.08 28.03 23 | */ 24 | -------------------------------------------------------------------------------- /xevent ringbuffer.sql: -------------------------------------------------------------------------------- 1 | WITH CteRingBuffer (XMLData) as 2 | ( SELECT CAST(xet.target_data as XML) as XMLData 3 | FROM sys.dm_xe_session_targets xet INNER JOIN 4 | sys.dm_xe_sessions xe ON (xe.address = xet.event_session_address) 5 | WHERE xe.name = 'system_health' ) 6 | SELECT top 100 e.query('.').value('(/event/@timestamp)[1]', 'datetime2(0)') as "TimeStamp", 7 | e.query('.').value('(/event/data/value)[1]', 'int') as "ErrorNumber", 8 | e.query('.').value('(/event/data/value)[2]', 'int') as "ErrorSeverity", 9 | e.query('.').value('(/event/data/value)[3]', 'int') as "ErrorState", 10 | e.query('.').value('(/event/data/value)[5]', 'varchar(max)') as "ErrorMessage" 11 | FROM cteRingBuffer CROSS APPLY 12 | XMLData.nodes('/RingBufferTarget/event') AS Event(e) 13 | WHERE e.query('.').value('(/event/@name)[1]', 'varchar(255)') = 'error_reported' 14 | AND e.query('.').value('(/event/@timestamp)[1]', 'datetime2(0)') > GETDATE()-14 15 | -------------------------------------------------------------------------------- /zip toolbox.ps1: -------------------------------------------------------------------------------- 1 | #There is a Windows Scheduled task on Windows that runs 2 | #PowerShell -File "E:\whatever path\toolbox\zip toolbox.ps1" 3 | set-ExecutionPolicy -Scope Process Unrestricted 4 | #Full toolbox 5 | cd C:\Users\william.assaf\OneDrive\toolbox\ 6 | get-childitem .\* -Recurse | Where-Object {$_.FullName -notlike '*\.git*' } | Where-Object {$_.FullName -notlike "*.zip" } | Compress-Archive -DestinationPath .\toolbox.zip -Force 7 | 8 | #Stable state checklist files only 9 | $stablestate = ( 10 | '*autogrow*', 11 | 'Backup History*', 12 | '*Configuration Changes History*', 13 | '*CPU Utilization*', 14 | '*Database Owner*', 15 | '*Database Mail Diag*', 16 | '*Database Settings*', 17 | '*Error Log.sql*', 18 | '*Find Memory Mini Dumps*', 19 | '*Fix Orphaned SID*', 20 | '*Gather Log Events*', 21 | '*Job Owners*', 22 | '*Public Permissions*', 23 | '*Page Life Expectancy*', 24 | '*Sharepoint databases*', 25 | 'Space in files*', 26 | '*Stats out of Date', 27 | '*TempDB*', 28 | '*VLFs*', 29 | 'Volume Stats*' ); 30 | 31 | get-childitem .\* -Recurse -include $stablestate | Compress-Archive -DestinationPath .\toolbox-stablestate.zip -Force 32 | --------------------------------------------------------------------------------