├── RemoveMasterKeyFrombacpac.zip ├── README.md ├── TestConnectivityLatency.ps1 ├── ListAllResourcesWithTableAuditing.ps1 ├── GenerateDummyDeadlocks ├── DeadlockWithPowerShell.ps1 └── DeadlockWithPowerShellV2.ps1 ├── Incremental Shrink.txt ├── AzureTempDB_SpaceMonitoring.txt ├── MasterKeyIssue ├── RemoveMasterKeyLT4GB.ps1 └── RemoveMasterKeyGT4GB.ps1 ├── AzureSQLDB_Diagnostics.txt └── AzureSQLMaintenance.sql /RemoveMasterKeyFrombacpac.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yochananrachamim/AzureSQL/HEAD/RemoveMasterKeyFrombacpac.zip -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AzureSQL 2 | 3 | Anything that is related to Azure SQL DB 4 | 5 | AzureSQLMaintenance.txt - https://techcommunity.microsoft.com/t5/azure-database-support-blog/how-to-maintain-azure-sql-indexes-and-statistics/ba-p/368787 6 | 7 | -------------------------------------------------------------------------------- /TestConnectivityLatency.ps1: -------------------------------------------------------------------------------- 1 | $ServerName = ".database.windows.net" 2 | $User = "" 3 | $Password = "" 4 | $DatabseName = "master" 5 | $NumOfTests = 100 6 | 7 | 8 | $stopwatch = New-Object System.Diagnostics.Stopwatch 9 | $stopwatch.Start() 10 | 11 | $scsb = New-Object System.Data.SqlClient.SqlConnectionStringBuilder 12 | $scsb["data source"] = $ServerName 13 | $scsb["user"] = $User 14 | $scsb["password"] = $Password 15 | $scsb["initial catalog"] = $DatabseName 16 | $conn = New-Object System.Data.SqlClient.SqlConnection $scsb 17 | 18 | $Itereations = $NumOfTests 19 | $TotalMS = 0 20 | $conn.Open() 21 | $cmd = $conn.CreateCommand() 22 | $cmd.CommandText = "SELECT 1" 23 | "Wait..." 24 | for($x=0;$x -lt $Itereations; $x++) { 25 | Write-Progress -Activity "Testing connectivity" -PercentComplete (100*$x/$Itereations) -CurrentOperation $x 26 | $stopwatch.Restart() 27 | $cmd.ExecuteNonQuery() | Out-Null 28 | $stopwatch.Stop() 29 | $NonQueryCmdTime = $StopWatch.ElapsedMilliseconds 30 | 31 | #"NonQuery:$NonQueryCmdTime" 32 | $TotalMS = $TotalMS + $NonQueryCmdTime 33 | #Start-Sleep 1 34 | } 35 | $conn.Close() 36 | 37 | 38 | $avg = 1.0*$TotalMS/$Itereations 39 | "AVG of $Itereations iterations: $avg ms" 40 | "Done" -------------------------------------------------------------------------------- /ListAllResourcesWithTableAuditing.ps1: -------------------------------------------------------------------------------- 1 | Login-AzureRmAccount 2 | 3 | $arrayTableFound = @() 4 | 5 | foreach($sub in Get-AzureRmSubscription) 6 | { 7 | Write-Host 'Looking for table auditing in subscription'$sub.Name'('$sub.SubscriptionId')' 8 | 9 | Set-AzureRmContext -SubscriptionName $sub.Name 10 | $sqlServers = Get-AzureRmResourceGroup | Get-AzureRmSqlServer 11 | foreach($sqlServer in $sqlServers) 12 | { 13 | $serverName = $sqlServer.ServerName 14 | Write-Host 'Looking for table auditing on server'$serverName 15 | 16 | $serverAuditingPolicy = $sqlServer | Get-AzureRmSqlServerAuditingPolicy -WarningAction SilentlyContinue 17 | 18 | if ($serverAuditingPolicy.AuditState -eq 'Enabled' -and $serverAuditingPolicy.AuditType -eq 'Table') 19 | { 20 | Write-Host -ForegroundColor Red 'Found table auditing on server'$serverName 21 | $arrayTableFound += ,@("SERVER: $serverName") 22 | } 23 | foreach($sqlDB in $sqlServer | Get-AzureRmSqlDatabase) 24 | { 25 | 26 | if ($sqlDB.DatabaseName -eq 'master') 27 | { 28 | #no support for auditing on master 29 | continue 30 | } 31 | Write-Host 'Looking for table auditing on DB'$sqlDB.DatabaseName 32 | $DBAuditingPolicy = Get-AzureRmSqlDatabaseAuditingPolicy -ServerName $sqlServer.ServerName -DatabaseName $sqlDB.DatabaseName -ResourceGroupName $sqlDB.ResourceGroupName -WarningAction SilentlyContinue 33 | 34 | if ($DBAuditingPolicy.AuditState -eq 'Enabled' -and $DBAuditingPolicy.AuditType -eq 'Table') 35 | { 36 | Write-Host -ForegroundColor Red 'Found table auditing on database'$sqlDB.DatabaseName'Server'$sqlServer.ServerName 37 | $databaseName = $sqlDB.DatabaseName 38 | $arrayTableFound += ,@("DATABASE: $databaseName (on Server $serverName)") 39 | } 40 | } 41 | } 42 | } 43 | 44 | Write-Host -ForegroundColor Green 'Total resources with Table auditing:'$arrayTableFound.Length 45 | 46 | Write-Host -ForegroundColor Red 'The following resources have Table auditing enabled:' 47 | 48 | foreach($item in $arrayTableFound) 49 | { 50 | Write-Host -ForegroundColor Red $item[0] 51 | } 52 | -------------------------------------------------------------------------------- /GenerateDummyDeadlocks/DeadlockWithPowerShell.ps1: -------------------------------------------------------------------------------- 1 | $pServer = '{Azure SQL Server as FQDN}' 2 | $pDatabase = '{DatabaseName}' 3 | $pLoginName = '{LoginName}' 4 | $pPassword = '{Password}' 5 | $pNumberOfDeadlocksToGenerate = 10 6 | 7 | "Using:" 8 | " Server Name: " + $pServer 9 | " Database Name: " +$pDatabase 10 | " Login Name: " +$pLoginName 11 | " Password: ******" 12 | 13 | $iTableName = '##dltab' 14 | $iSQLPreperation = ` 15 | ' 16 | create table ['+ $iTableName +'1] (i int); 17 | create table ['+ $iTableName +'2] (i int); 18 | insert into ['+$iTableName+'1] values(0); 19 | insert into ['+$iTableName+'2] values(0); 20 | 21 | ' 22 | 23 | $iSQLSessionA = ` 24 | ' 25 | begin transaction 26 | update ['+$iTableName+'1] set i=1 27 | waitfor delay ''00:00:02'' 28 | update ['+$iTableName+'2] set i=1 29 | commit 30 | ' 31 | 32 | $iSQLSessionB = ` 33 | ' 34 | begin transaction 35 | update ['+$iTableName+'2] set i=2 36 | waitfor delay ''00:00:02'' 37 | update ['+$iTableName+'1] set i=1 38 | commit 39 | ' 40 | 41 | 42 | # Preperation 43 | "Running table preperation command..." 44 | Invoke-Sqlcmd -ServerInstance $pServer -Database $pDatabase -Username $pLoginName -Password $pPassword -Query $iSQLPreperation 45 | 46 | "Starting deadlock loop..." 47 | for($i=1; $i -le $pNumberOfDeadlocksToGenerate; $i++) 48 | { 49 | # Session A 50 | $jobA = Start-Job -ScriptBlock { 51 | Invoke-Sqlcmd -ServerInstance $using:pServer -Database $using:pDatabase -Username $using:pLoginName -Password $using:pPassword -Query $using:iSQLSessionA 52 | } 53 | 54 | Start-Sleep -Seconds 1 55 | 56 | # Session B 57 | $jobB = Start-Job -ScriptBlock { 58 | Invoke-Sqlcmd -ServerInstance $using:pServer -Database $using:pDatabase -Username $using:pLoginName -Password $using:pPassword -Query $using:iSQLSessionB 59 | } 60 | 61 | # Wait until finish 62 | Get-Job | Wait-Job | Out-Null 63 | 64 | # Write output 65 | $jobA | Receive-Job 66 | $jobB | Receive-Job 67 | 68 | # Notify user about the progress 69 | (get-date).DateTime.ToString() + " | " + $i.ToString() + " Deadlock completed." 70 | } 71 | 72 | "Done." 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /Incremental Shrink.txt: -------------------------------------------------------------------------------- 1 | CREATE OR ALTER PROCEDURE usp_IncrementalShrink 2 | @DesiredFileSize int=0, 3 | @ShrinkChunkSize int=5, 4 | @dbFileID int =0 5 | as 6 | begin 7 | /*********************************************** 8 | Incremental Shrink for data file - SQL Server, Azure SQL, Azure Managed Instance 9 | ************************************************/ 10 | /*------------------------------------------------- 11 | Change Log: 12 | 2022-07-12 - Change it from script to stored procedure 13 | - Add functionality to go through all data files 14 | 2022-07-06 - more accurate current size validation. 15 | 16 | */----------------------------------------------- 17 | set nocount on 18 | declare @AllocatedSpaceMB int 19 | declare @UsedSpaceMB int 20 | declare @UnusedSpaceMB int 21 | declare @ErrorIndication int=0 22 | declare @dbFileType sysname 23 | declare @lastSize int 24 | declare @SqlCMD nvarchar(max) 25 | declare @MSG nvarchar(100) 26 | declare @iFileList table(i int) 27 | declare @iTMP table(i int) 28 | declare @iFileID int 29 | declare @iCurrentSizeTarget int 30 | 31 | 32 | set @MSG = convert(nvarchar,getdate())+' - Starting incremental shrink procedure'; raiserror(@msg,0,0) with nowait 33 | 34 | 35 | /* @dbFileID=0 -> All Files, or actual data file ID */ 36 | insert into @iFileList select file_id from sys.database_files where type=0/*Rows*/ and (@dbFileID=0 or file_id=@dbFileID) 37 | 38 | 39 | -- check if there is paused resumable index operation on this DB 40 | -- existance of these types of operations block the shrink operation from reducing the file size 41 | if (SELECT count(*) FROM sys.index_resumable_operations)>0 set @ErrorIndication=3 42 | 43 | 44 | 45 | 46 | 47 | if @ErrorIndication=3 raiserror('[Error] Paused resumable index rebuild was detected, please abort or complete the operation before running shrink',16,0) with nowait 48 | 49 | 50 | 51 | /*Go throgh all files pending to be shrinked*/ 52 | WHILE (select count(*) from @iFileList)>0 53 | Begin 54 | set @MSG = REPLICATE('-',50); raiserror(@msg,0,0) with nowait 55 | 56 | /*Iterate on specific file*/ 57 | delete top (1) from @iFileList output deleted.i into @iTMP 58 | select top 1 @iFileID=i from @iTMP 59 | 60 | set @MSG = 'Running shrink file on file ID = ' + CONVERT(varchar,@iFileID) +char(13) ; raiserror(@msg,0,0) with nowait 61 | 62 | SELECT 63 | @AllocatedSpaceMB = SIZE/128.0 64 | , @UsedSpaceMB = cast(fileproperty(name, 'SpaceUsed') AS int)/128.0 65 | , @UnusedSpaceMB = (SIZE/128.0) - cast(fileproperty(name, 'SpaceUsed') AS int)/128.0 66 | FROM sys.database_files 67 | WHERE file_id = @iFileID 68 | 69 | set @MSG = char(9)+'Information about file ID = ' + CONVERT(varchar,@iFileID) ; raiserror(@msg,0,0) with nowait 70 | set @MSG = char(9)+char(9)+'Allocated Space MB = ' + CONVERT(varchar,@AllocatedSpaceMB) ; raiserror(@msg,0,0) with nowait 71 | set @MSG = char(9)+char(9)+'Used Space MB = ' + CONVERT(varchar,@UsedSpaceMB) ; raiserror(@msg,0,0) with nowait 72 | set @MSG = char(9)+char(9)+'Unused Space MB = ' + CONVERT(varchar,@UnusedSpaceMB) ; raiserror(@msg,0,0) with nowait 73 | 74 | 75 | 76 | set @lastSize = @AllocatedSpaceMB+1 77 | while @AllocatedSpaceMB > @DesiredFileSize /*check if we got the desired size*/ and @lastSize>@AllocatedSpaceMB /* check if there is progress*/ and @ErrorIndication=0 78 | begin 79 | set @MSG = char(9)+char(9)+char(9)+convert(nvarchar,getdate()) + ' - Calling ShrinkFile' ; raiserror(@msg,0,0) with nowait 80 | 81 | select @lastSize = size/128.0 82 | from sys.database_files 83 | where file_id=@iFileID 84 | 85 | /*Calculate next target size and make sure we do not go below 0*/ 86 | set @iCurrentSizeTarget = @AllocatedSpaceMB-@ShrinkChunkSize 87 | set @iCurrentSizeTarget = iif(@iCurrentSizeTarget>0, @iCurrentSizeTarget,0) 88 | 89 | set @sqlCMD = N'dbcc shrinkfile('+cast(@iFileID as varchar(7))+','+ convert(nvarchar,@iCurrentSizeTarget) +') with no_infomsgs;' 90 | --print @sqlCMD 91 | exec(@sqlCMD) 92 | 93 | select @AllocatedSpaceMB = size/128.0 94 | from sys.database_files 95 | where file_id=@iFileID 96 | 97 | set @MSG = char(9)+char(9)+char(9)+convert(nvarchar,getdate()) + ' - ShrinkFile completed. current size is: ' + cast(@AllocatedSpaceMB as varchar(10)) + 'MB'; raiserror(@msg,0,0) with nowait 98 | end 99 | 100 | delete from @iTMP 101 | End 102 | 103 | set @MSG = convert(nvarchar,getdate())+' - Finished incremental shrink procedure'; raiserror(@msg,0,0) with nowait 104 | END 105 | 106 | -------------------------------------------------------------------------------- /GenerateDummyDeadlocks/DeadlockWithPowerShellV2.ps1: -------------------------------------------------------------------------------- 1 | ####################################################################################################################### 2 | ## DeadLock Generator 3 | ## 4 | ## Description and usage: 5 | ## This PS script is used to generate high volumne of deadlocks 6 | ## There are no prerequesites on the destination database, and no leftovers will be in the destination DB. 7 | ## 8 | ## How to use: 9 | ## Set the parameters at the top of the script and run. 10 | ## 11 | ## Yochanan Rachamim - Jan 2023 12 | ####################################################################################################################### 13 | 14 | 15 | # Parameters 16 | $pNumberOfDeadlocksToGenerate = 10 17 | $pServerName = '{ServerName}.database.windows.net' 18 | $pDatabaseName = 'master' 19 | $pUserName ='{LoginName}' 20 | $pPassword = '{password}' 21 | 22 | # Initilize variables 23 | $connectionString = 'Server=tcp:{0},1433;Initial Catalog={1};Persist Security Info=False;User ID={2};Password={3};MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;' ` 24 | -f $pServerName,$pDatabaseName,$pUserName,$pPassword 25 | $iTableName = '##dltab' + (New-Guid).ToString() 26 | $iSafetyWaitms = 100 27 | $iNumberOfExceptions = 0 28 | 29 | # Create connection objects 30 | $sqlConnection0 = New-Object System.Data.SqlClient.SqlConnection $connectionString 31 | $sqlConnection1 = New-Object System.Data.SqlClient.SqlConnection $connectionString 32 | $sqlConnection2 = New-Object System.Data.SqlClient.SqlConnection $connectionString 33 | 34 | # Create command objects 35 | $sqlCMD0 = New-Object System.Data.SqlClient.SqlCommand 36 | $sqlCMD1 = New-Object System.Data.SqlClient.SqlCommand 37 | $sqlCMD2 = New-Object System.Data.SqlClient.SqlCommand 38 | 39 | # Set command's connection 40 | $sqlCMD0.Connection = $sqlConnection0 41 | $sqlCMD1.Connection = $sqlConnection1 42 | $sqlCMD2.Connection = $sqlConnection2 43 | 44 | # Preparing database environment by creating tables for deadlock reproduction. 45 | $sqlConnection0.Open() 46 | $sqlCMD0.CommandText = ' 47 | -- Environment preperation 48 | -- create two tables with one record each, so we can cross lock the records and generate deadlock. 49 | CREATE TABLE ['+$iTableName+'1] (i int) 50 | CREATE TABLE ['+$iTableName+'2] (i int) 51 | INSERT INTO ['+$iTableName+'1] VALUES(0) 52 | INSERT INTO ['+$iTableName+'2] VALUES(0) 53 | ' 54 | $null = $sqlCMD0.ExecuteNonQuery(); 55 | # need to keep Connection0 opened as it keeps the temp tables alive. 56 | 57 | # loop to produce deadlock. 58 | for ($i = 1; $i -le $pNumberOfDeadlocksToGenerate; $i++) { 59 | try { 60 | "Generating deadlock " + $i.ToString() 61 | 62 | # make sure connections are opened. 63 | if($sqlConnection1.State -ne "open") {$sqlConnection1.Open()} 64 | if($sqlConnection2.State -ne "open") {$sqlConnection2.Open()} 65 | 66 | # begin transaction so we will keep the lock on the records. 67 | $sqlCMD1.Transaction = $sqlConnection1.BeginTransaction() 68 | $sqlCMD2.Transaction = $sqlConnection2.BeginTransaction() 69 | 70 | $sqlCMD1.CommandText = "update [" + $iTableName +"1] set i=1" 71 | $sqlCMD2.CommandText = "update [" + $iTableName +"2] set i=1" 72 | $null = $sqlCMD1.ExecuteNonQuery() 73 | $null = $sqlCMD2.ExecuteNonQuery() 74 | 75 | $sqlCMD1.CommandText = "update [" + $iTableName +"2] set i=1" 76 | $sqlCMD2.CommandText = "update [" + $iTableName +"1] set i=1" 77 | $null = $sqlCMD1.ExecuteNonQueryAsync() 78 | # add wait for few ms to let the previous commend to aquire the lock on the row, as we might be faster than the database. 79 | # if we are faster than the database increase the SafetyWait time so lock can be aquired. 80 | Start-Sleep -Milliseconds $iSafetyWaitms 81 | $null = $sqlCMD2.ExecuteNonQuery() 82 | } 83 | catch { 84 | #rollback transactions 85 | "Exception catched.." 86 | $_.Exception.Message.ToString() 87 | $iNumberOfExceptions +=1 88 | } 89 | finally { 90 | 91 | # closing connections (anyway the dead connection will be dropped, just to be on the safe side we validating and closing both.) 92 | if($sqlCMD1.Transaction.Connection.State -eq "open"){ 93 | $sqlCMD1.Transaction.Connection.Close() 94 | Start-Sleep -Milliseconds $iSafetyWaitms 95 | } 96 | if($sqlCMD2.Transaction.Connection.State -eq "open"){ 97 | $sqlCMD2.Transaction.Connection.Close() 98 | Start-Sleep -Milliseconds $iSafetyWaitms 99 | } 100 | } 101 | } 102 | 103 | # closing connection (this is where temp tables will be dropped by SQL engine) 104 | $sqlConnection1.Close() 105 | $sqlConnection2.Close() 106 | $sqlConnection0.Close() 107 | 108 | "Done generating {0} deadlocks" -f $iNumberOfExceptions.ToString() 109 | if($pNumberOfDeadlocksToGenerate -gt $iNumberOfExceptions) 110 | { 111 | "Seems like number of deadlocks generated is less the requested. it might happen due to slowness on the database side. to overcome this please increase the number in iSafetyWaitms variable. it will give more time to the database to complete the step before proceeding." 112 | } -------------------------------------------------------------------------------- /AzureTempDB_SpaceMonitoring.txt: -------------------------------------------------------------------------------- 1 | -- (1) get Database name 2 | select db_name() as DatabaseName 3 | 4 | -- (2) Get SLO Level 5 | select * 6 | from sys.database_service_objectives 7 | 8 | -- (3) Get TempDB information current and max size 9 | SELECT 10 | FILE_ID, 11 | type_desc, 12 | SizeInMB = format(size*1.0/128,'#,###0'), 13 | MaxSizeinMB = format(max_size*1.0/128,'#,##0') 14 | FROM tempdb.sys.database_files 15 | 16 | 17 | -- (4) From the current size, how much is used or free 18 | SELECT 19 | [free space in MB] = format((SUM(unallocated_extent_page_count)*1.0/128),'#,##0'), 20 | [used space in MB] = format((SUM(allocated_extent_page_count)*1.0/128),'#,##0'), 21 | [VersionStore space in MB] = format((SUM(version_store_reserved_page_count)*1.0/128),'#,##0') 22 | FROM tempdb.sys.dm_db_file_space_usage; 23 | 24 | -- (5) Get Allocations by session. 25 | ;with TempDBAlloc 26 | as 27 | ( 28 | SELECT 29 | ((user_objects_alloc_page_count-user_objects_dealloc_page_count) + (internal_objects_alloc_page_count-internal_objects_dealloc_page_count)) / 129 AS tempdb_current_usage_MB, 30 | es.session_id, 31 | es.host_name, 32 | es.program_name, 33 | es.login_name, 34 | es.last_request_end_time 35 | FROM sys.dm_db_session_space_usage ss 36 | join sys.dm_exec_sessions es on es.session_id = ss.session_id 37 | ) 38 | select * 39 | from TempDBAlloc 40 | where tempdb_current_usage_MB>0 41 | order by tempdb_current_usage_MB desc 42 | 43 | 44 | -- (6) get size for temp tables 45 | SELECT 46 | s.Name AS SchemaName, 47 | t.NAME AS TableName, 48 | p.rows AS RowCounts, 49 | SUM(a.total_pages) * 8 AS TotalSpaceKB, 50 | SUM(a.used_pages) * 8 AS UsedSpaceKB, 51 | (SUM(a.total_pages) - SUM(a.used_pages)) * 8 AS UnusedSpaceKB 52 | FROM 53 | tempdb.sys.tables(nolock) t 54 | INNER JOIN 55 | tempdb.sys.schemas(nolock) s ON s.schema_id = t.schema_id 56 | INNER JOIN 57 | tempdb.sys.indexes(nolock) i ON t.OBJECT_ID = i.object_id 58 | INNER JOIN 59 | tempdb.sys.partitions(nolock) p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 60 | INNER JOIN 61 | tempdb.sys.allocation_units(nolock) a ON p.partition_id = a.container_id 62 | WHERE 63 | t.NAME LIKE '#%' -- filter out system tables for diagramming 64 | GROUP BY 65 | t.Name, s.Name, p.Rows 66 | ORDER BY 67 | s.Name, t.Name 68 | 69 | -- (7) Get Log file information 70 | select counter_name,instance_name, SizeInMB=cntr_value/1024 71 | from sys.dm_os_performance_counters 72 | where counter_name in('Log File(s) Size (KB)','Log File(s) Used Size (KB)') 73 | and instance_name='tempdb' 74 | 75 | --(8) Drill down to get information about log consumers 76 | ;with CTE as 77 | (select 78 | DBT.database_id, 79 | SS.session_id, 80 | SS.host_name, 81 | SS.program_name, 82 | SS.login_name, 83 | DBT.transaction_id, 84 | DBT.database_transaction_begin_time, 85 | database_transaction_type_desc = case DBT.database_transaction_type 86 | when 1 then 'Read/write transaction' 87 | when 2 then 'Read-only transaction' 88 | when 3 then 'System transaction' 89 | end, 90 | database_transaction_state_desc = case DBT.database_transaction_state 91 | when 1 then 'The transaction has not been initialized' 92 | when 3 then 'The transaction has been initialized but has not generated any log records' 93 | when 4 then 'The transaction has generated log records' 94 | when 5 then 'The transaction has been prepared' 95 | when 10 then 'The transaction has been committed' 96 | when 11 then 'The transaction has been rolled back' 97 | when 12 then 'The transaction is being committed. (The log record is being generated, but has not been materialized or persisted.' 98 | end, 99 | SST.open_transaction_count, 100 | TotalLogSpaceReserved = format(1.0*(DBT.database_transaction_log_bytes_reserved+DBT.database_transaction_log_bytes_reserved_system)/1024/1024,'#,##0'), 101 | database_transaction_log_bytes_used_MB = format(1.0*DBT.database_transaction_log_bytes_used/1024/1024,'#,##0'), 102 | database_transaction_log_bytes_reserved_MB = format(1.0*DBT.database_transaction_log_bytes_reserved/1024/1024,'#,##0'), 103 | database_transaction_log_bytes_used_system_MB = format(1.0*DBT.database_transaction_log_bytes_used_system/1024/1024,'#,##0'), 104 | database_transaction_log_bytes_reserved_system_MB = format(1.0*DBT.database_transaction_log_bytes_reserved_system/1024/1024,'#,##0') 105 | From tempdb.sys.dm_tran_database_transactions DBT 106 | left join tempdb.sys.dm_tran_session_transactions SST on DBT.transaction_id = SST.transaction_id 107 | left join tempdb.sys.dm_exec_sessions SS on SS.session_id = SST.session_id 108 | ) 109 | select 110 | database_id, 111 | session_id, 112 | host_name, 113 | program_name, 114 | transaction_id, 115 | database_transaction_begin_time, 116 | database_transaction_type_desc, 117 | database_transaction_state_desc, 118 | TotalLogSpaceReserved, 119 | database_transaction_log_bytes_used_MB, 120 | database_transaction_log_bytes_reserved_MB , 121 | database_transaction_log_bytes_used_system_MB, 122 | database_transaction_log_bytes_reserved_system_MB 123 | from CTE 124 | where transaction_id in (select transaction_id from CTE where TotalLogSpaceReserved>0) 125 | -- in case of using elastic pool you may switch to master and query for the database name of the database ID returned by this query 126 | 127 | 128 | -- another option to monitor TemoDB usage is by using Adam Machanic WhoIsActive stored procedure. 129 | -- http://whoisactive.com/ 130 | 131 | 132 | -------------------------------------------------------------------------------- /MasterKeyIssue/RemoveMasterKeyLT4GB.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | Patches the model.xml and origin.xml in a .bacpac with the master key information removed. 4 | .DESCRIPTION 5 | When exporting a .bacpac from Azure SQLDB with auditing enabled, the .bacpac will contain a master key without a password in the model.xml. A 6 | master key without a password is an Azure SQLDB only feature, so it's presence prevents being able to import the .bacpac into an on-premise 7 | SQL Server database. This script works around this limitation by extracting the model.xml and origin.xml from the .bacpac, removing the references 8 | to the master key, and then updating the .bacpac with the new model.xml and origin.xml. The resulting .bacpac can then be imported to an on-premise 9 | database. By default, a copy of the original .bacpac is made, and the copy is updated. Using the -skipCopy parameter will skip the copy step. 10 | .EXAMPLE 11 | C:\PS> .\RemoveMasterKey.ps1 -bacpacPath "C:\BacPacs\Test.bacpac" -skipCopy 12 | .PARAMETER bacpacPath 13 | Specifies the path the .bacpac to patch. 14 | .PARAMETER skipCopy 15 | If specified, copies the .bacpac before making updates. 16 | #> 17 | param( 18 | [Parameter(Mandatory=$true, HelpMessage="Specifies the path the .bacpac to patch.")] 19 | [string]$bacpacPath, 20 | [Parameter(Mandatory=$false, HelpMessage="If specified, copies the .bacpac before making updates.")] 21 | [switch]$skipCopy 22 | ) 23 | 24 | 25 | if ($PSVersionTable.PSVersion.Major -lt 4) 26 | { 27 | Write-Host "Unsupported powershell version. This script requires powershell version 4.0 or later" 28 | return 29 | } 30 | 31 | 32 | Add-Type -Assembly System.IO.Compression.FileSystem 33 | 34 | 35 | $targetBacpacPath = [System.IO.Path]::GetFullPath($bacpacPath) 36 | if (!$skipCopy) 37 | { 38 | $targetBacpacPath = [System.IO.Path]::Combine( 39 | [System.IO.Path]::GetDirectoryName($targetBacpacPath), 40 | [System.IO.Path]::GetFileNameWithoutExtension($targetBacpacPath) + "-patched" + [System.IO.Path]::GetExtension($targetBacpacPath)) 41 | 42 | Copy-Item $bacpacPath $targetBacpacPath 43 | } 44 | 45 | 46 | $originXmlFile = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($targetBacpacPath), "Origin.xml") 47 | $modelXmlFile = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($targetBacpacPath), "model.xml") 48 | 49 | 50 | # 51 | # Extract the model.xml and Origin.xml from the .bacpac 52 | # 53 | $zip = [IO.Compression.ZipFile]::OpenRead($targetBacpacPath) 54 | foreach ($entry in $zip.Entries) { 55 | if ([string]::Compare($entry.Name, "model.xml", $True) -eq 0) { 56 | [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $modelXmlFile, $true) 57 | break 58 | } 59 | } 60 | foreach ($entry in $zip.Entries) { 61 | if ([string]::Compare($entry.Name, "Origin.xml", $True) -eq 0) { 62 | [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $originXmlFile, $true) 63 | break 64 | } 65 | } 66 | $zip.Dispose() 67 | 68 | 69 | if(![System.IO.File]::Exists($modelXmlFile)) { 70 | Write-Host "Could not extract the model.xml from file " + $targetBacpacPath 71 | return 72 | } 73 | if(![System.IO.File]::Exists($originXmlFile)) { 74 | Write-Host "Could not extract the Origin.xml from file " + $targetBacpacPath 75 | return 76 | } 77 | 78 | 79 | # 80 | # Modify the model.xml 81 | # 82 | [xml]$modelXml = Get-Content $modelXmlFile 83 | $ns = New-Object System.Xml.XmlNamespaceManager($modelXml.NameTable) 84 | $ns.AddNamespace("x", $modelXml.DocumentElement.NamespaceURI) 85 | 86 | 87 | $masterKeyNodes = $modelXml.SelectNodes("//x:DataSchemaModel/x:Model/x:Element[@Type='SqlMasterKey']", $ns) 88 | foreach ($masterKeyNode in $masterKeyNodes) { 89 | $masterKeyNode.ParentNode.RemoveChild($masterKeyNode) 90 | } 91 | 92 | 93 | $sqlDatabaseCredentialNodes = $modelXml.SelectNodes("//x:DataSchemaModel/x:Model/x:Element[@Type='SqlDatabaseCredential']", $ns) 94 | foreach ($sqlDatabaseCredentialNode in $sqlDatabaseCredentialNodes) { 95 | if ($sqlDatabaseCredentialNode.Property.Name -eq "Identity" -and $sqlDatabaseCredentialNode.Property.Value -eq "SHARED ACCESS SIGNATURE") 96 | { 97 | $sqlDatabaseCredentialNode.ParentNode.RemoveChild($sqlDatabaseCredentialNode) 98 | } 99 | } 100 | 101 | 102 | $modelXml.Save($modelXmlFile) 103 | 104 | 105 | # 106 | # Modify the Origin.xml 107 | # 108 | [xml]$originXml = Get-Content $originXmlFile 109 | $ns = New-Object System.Xml.XmlNamespaceManager($originXml.NameTable) 110 | $ns.AddNamespace("x", $originXml.DocumentElement.NamespaceURI) 111 | 112 | 113 | $databaseCredentialNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Server/x:ObjectCounts/x:DatabaseCredential", $ns) 114 | if ($databaseCredentialNode) { 115 | if ($databaseCredentialNode.InnerText -eq "1") { 116 | $databaseCredentialNode.ParentNode.RemoveChild($databaseCredentialNode) 117 | } else { 118 | $databaseCredentialNode.InnerText = $databaseCredentialNode.Value - 1 119 | } 120 | } 121 | 122 | 123 | $masterKeyNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Server/x:ObjectCounts/x:MasterKey", $ns) 124 | if ($masterKeyNode) { 125 | $masterKeyNode.ParentNode.RemoveChild($masterKeyNode) 126 | } 127 | 128 | 129 | $modelXmlHash = (Get-FileHash $modelXmlFile -Algorithm SHA256).Hash 130 | $checksumNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Checksums/x:Checksum", $ns) 131 | if ($checksumNode) { 132 | $checksumNode.InnerText = $modelXmlHash 133 | } 134 | 135 | 136 | $originXml.Save($originXmlFile) 137 | 138 | 139 | # 140 | # Update the model.xml and Origin.xml in the .bacpac 141 | # 142 | $zip = [System.IO.Compression.ZipFile]::Open($targetBacpacPath, "Update") 143 | foreach ($entry in $zip.Entries) { 144 | if ([string]::Compare($entry.Name, "Origin.xml", $True) -eq 0) { 145 | $entryName = $entry.FullName 146 | $entry.Delete() 147 | [System.IO.Compression.ZipFileExtensions]::CreateEntryFromFile($zip, $originXmlFile, $entryName) 148 | break 149 | } 150 | } 151 | foreach ($entry in $zip.Entries) { 152 | if ([string]::Compare($entry.Name, "model.xml", $True) -eq 0) { 153 | $entryName = $entry.FullName 154 | $entry.Delete() 155 | [System.IO.Compression.ZipFileExtensions]::CreateEntryFromFile($zip, $modelXmlFile, $entryName) 156 | break 157 | } 158 | } 159 | $zip.Dispose() 160 | 161 | 162 | [System.IO.File]::Delete($modelXmlFile) 163 | [System.IO.File]::Delete($originXmlFile) 164 | 165 | 166 | Write-Host "Completed update to the model.xml and Origin.xml in file "([System.IO.Path]::GetFullPath($targetBacpacPath)) -------------------------------------------------------------------------------- /MasterKeyIssue/RemoveMasterKeyGT4GB.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | Given a new [file].bacpac, creates a new [file]-patched.bacpac with master key information removed from the model.xml and origin.xml. 4 | .DESCRIPTION 5 | When exporting a .bacpac from Azure SQLDB with auditing enabled, the .bacpac will contain a master key without a password in the model.xml. A 6 | master key without a password is an Azure SQLDB only feature, so it's presence prevents being able to import the .bacpac into an on-premise 7 | SQL Server database. This script works around this limitation by extracting the model.xml and origin.xml from the .bacpac, removing the references 8 | to the master key, and creating a new .bacpac with the updated model.xml and origin.xml. The resulting .bacpac can then be imported to an on-premise 9 | database. 10 | .EXAMPLE 11 | C:\PS> .\RemoveMasterKey.ps1 -bacpacPath "C:\BacPacs\Test.bacpac" # Generates a Test-patched.bacpac file 12 | .PARAMETER bacpacPath 13 | Specifies the path the .bacpac to patch. 14 | #> 15 | param( 16 | [Parameter(Mandatory=$true, HelpMessage="Specifies the path the .bacpac. This file will not be modified.")] 17 | [string]$bacpacPath 18 | ) 19 | 20 | 21 | if ($PSVersionTable.PSVersion.Major -lt 4) { 22 | Write-Host "Unsupported powershell version. This script requires powershell version 4.0 or later" 23 | return 24 | } 25 | 26 | 27 | Add-Type -Assembly System.IO.Compression.FileSystem 28 | 29 | 30 | $targetBacpacPath = [System.IO.Path]::Combine( 31 | [System.IO.Path]::GetDirectoryName($bacpacPath), 32 | [System.IO.Path]::GetFileNameWithoutExtension($bacpacPath) + "-patched" + [System.IO.Path]::GetExtension($bacpacPath)) 33 | $originXmlFile = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($targetBacpacPath), "Origin.xml") 34 | $modelXmlFile = [System.IO.Path]::Combine([System.IO.Path]::GetDirectoryName($targetBacpacPath), "model.xml") 35 | 36 | 37 | if ([System.IO.File]::Exists($targetBacpacPath)) { 38 | [System.IO.File]::Delete($targetBacpacPath) 39 | } 40 | 41 | 42 | # 43 | # Extract the model.xml and Origin.xml from the .bacpac 44 | # 45 | $zip = [System.IO.Compression.ZipFile]::OpenRead($bacpacPath) 46 | foreach ($entry in $zip.Entries) { 47 | if ([string]::Compare($entry.Name, "model.xml", $True) -eq 0) { 48 | [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $modelXmlFile, $true) 49 | break 50 | } 51 | } 52 | foreach ($entry in $zip.Entries) { 53 | if ([string]::Compare($entry.Name, "Origin.xml", $True) -eq 0) { 54 | [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $originXmlFile, $true) 55 | break 56 | } 57 | } 58 | $zip.Dispose() 59 | 60 | 61 | if(![System.IO.File]::Exists($modelXmlFile)) { 62 | Write-Host "Could not extract the model.xml from file " + $bacpacPath 63 | return 64 | } 65 | if(![System.IO.File]::Exists($originXmlFile)) { 66 | Write-Host "Could not extract the Origin.xml from file " + $bacpacPath 67 | return 68 | } 69 | 70 | 71 | # 72 | # Modify the model.xml 73 | # 74 | [xml]$modelXml = Get-Content $modelXmlFile 75 | $ns = New-Object System.Xml.XmlNamespaceManager($modelXml.NameTable) 76 | $ns.AddNamespace("x", $modelXml.DocumentElement.NamespaceURI) 77 | 78 | 79 | $masterKeyNodes = $modelXml.SelectNodes("//x:DataSchemaModel/x:Model/x:Element[@Type='SqlMasterKey']", $ns) 80 | foreach ($masterKeyNode in $masterKeyNodes) { 81 | $masterKeyNode.ParentNode.RemoveChild($masterKeyNode) 82 | } 83 | 84 | 85 | $sqlDatabaseCredentialNodes = $modelXml.SelectNodes("//x:DataSchemaModel/x:Model/x:Element[@Type='SqlDatabaseCredential']", $ns) 86 | foreach ($sqlDatabaseCredentialNode in $sqlDatabaseCredentialNodes) { 87 | if ($sqlDatabaseCredentialNode.Property.Name -eq "Identity" -and $sqlDatabaseCredentialNode.Property.Value -eq "SHARED ACCESS SIGNATURE") 88 | { 89 | $sqlDatabaseCredentialNode.ParentNode.RemoveChild($sqlDatabaseCredentialNode) 90 | } 91 | } 92 | 93 | 94 | $modelXml.Save($modelXmlFile) 95 | 96 | 97 | # 98 | # Modify the Origin.xml 99 | # 100 | [xml]$originXml = Get-Content $originXmlFile 101 | $ns = New-Object System.Xml.XmlNamespaceManager($originXml.NameTable) 102 | $ns.AddNamespace("x", $originXml.DocumentElement.NamespaceURI) 103 | 104 | 105 | $databaseCredentialNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Server/x:ObjectCounts/x:DatabaseCredential", $ns) 106 | if ($databaseCredentialNode) { 107 | if ($databaseCredentialNode.InnerText -eq "1") { 108 | $databaseCredentialNode.ParentNode.RemoveChild($databaseCredentialNode) 109 | } else { 110 | $databaseCredentialNode.InnerText = $databaseCredentialNode.Value - 1 111 | } 112 | } 113 | 114 | 115 | $masterKeyNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Server/x:ObjectCounts/x:MasterKey", $ns) 116 | if ($masterKeyNode) { 117 | $masterKeyNode.ParentNode.RemoveChild($masterKeyNode) 118 | } 119 | 120 | 121 | $modelXmlHash = (Get-FileHash $modelXmlFile -Algorithm SHA256).Hash 122 | $checksumNode = $originXml.SelectSingleNode("//x:DacOrigin/x:Checksums/x:Checksum", $ns) 123 | if ($checksumNode) { 124 | $checksumNode.InnerText = $modelXmlHash 125 | } 126 | 127 | 128 | $originXml.Save($originXmlFile) 129 | 130 | 131 | # 132 | # Create the new .bacpac using the patched model.xml and Origin.xml 133 | # 134 | $zipSource = [System.IO.Compression.ZipFile]::OpenRead($bacpacPath) 135 | $zipTarget = [System.IO.Compression.ZipFile]::Open($targetBacpacPath, "Create") 136 | foreach ($entry in $zipSource.Entries) { 137 | if ([string]::Compare($entry.Name, "Origin.xml", $True) -eq 0) { 138 | [System.IO.Compression.ZipFileExtensions]::CreateEntryFromFile($zipTarget, $originXmlFile, $entry.FullName) 139 | } elseif ([string]::Compare($entry.Name, "model.xml", $True) -eq 0) { 140 | [System.IO.Compression.ZipFileExtensions]::CreateEntryFromFile($zipTarget, $modelXmlFile, $entry.FullName) 141 | } else { 142 | $targetEntry = $zipTarget.CreateEntry($entry.FullName) 143 | $sourceStream = $null 144 | $targetStream = $null 145 | try { 146 | $sourceStream = [System.IO.Stream]$entry.Open() 147 | $targetStream = [System.IO.Stream]$targetEntry.Open() 148 | $sourceStream.CopyTo($targetStream) 149 | } 150 | finally { 151 | if ($targetStream -ne $null) { 152 | $targetStream.Dispose() 153 | } 154 | if ($sourceStream -ne $null) { 155 | $sourceStream.Dispose() 156 | } 157 | } 158 | } 159 | } 160 | $zipSource.Dispose() 161 | $zipTarget.Dispose() 162 | 163 | 164 | [System.IO.File]::Delete($modelXmlFile) 165 | [System.IO.File]::Delete($originXmlFile) 166 | 167 | 168 | Write-Host "Completed update to the model.xml and Origin.xml in file"([System.IO.Path]::GetFullPath($targetBacpacPath)) -------------------------------------------------------------------------------- /AzureSQLDB_Diagnostics.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Updated: 2022-07-13 4 | 5 | + open new query window using SSMS. 6 | + paste this whole script. 7 | + right click on the query windows and then 8 | >> Results to -> Results to file 9 | + execute the query 10 | + select the file to keep the results. 11 | 12 | */ 13 | set nocount on 14 | 15 | --(0) - timestamp and metadata 16 | print '*** General information' 17 | print 'SysDateTime' 18 | select sysdatetime() 19 | print 'ServerName' 20 | select @@servername 21 | print 'DatabaseName' 22 | select db_name() as DatabaseName 23 | 24 | -- Get SLO Level and size 25 | print '*** Get SLO Level and size' 26 | SELECT DATABASEPROPERTYEX(db_name(), 'Collation') AS Collation 27 | SELECT DATABASEPROPERTYEX(db_name(), 'Edition') AS Edition 28 | SELECT DATABASEPROPERTYEX(db_name(), 'ServiceObjective') AS ServiceObjective 29 | SELECT DATABASEPROPERTYEX(db_name(), 'MaxSizeInBytes') AS MaxSizeInBytes 30 | SELECT DATABASEPROPERTYEX(db_name(), 'IsParameterizationForced') AS Parameterization 31 | SELECT @@version 32 | 33 | 34 | --(1) 35 | print '***When were Statistics last updated on all indexes? ' 36 | select 37 | ObjectSchema = OBJECT_SCHEMA_NAME(s.object_id) 38 | ,ObjectName = object_name(s.object_id) 39 | ,StatsName = s.name 40 | ,sp.last_updated 41 | ,idx.rowcnt CurrentRowCnt 42 | ,sp.rows RowCntWhenStatsTaken 43 | ,sp.rows_sampled 44 | ,sp.modification_counter 45 | ,pct_modified = format((1.0*sp.modification_counter / idx.rowcnt),'p') 46 | ,LastStatsUpdatedWith = iif(sp.rows_sampled=sp.rows,'FullScan','Partial') 47 | ,'UPDATE STATISTICS ['+ OBJECT_SCHEMA_NAME(s.object_id) + '].[' + object_name(s.object_id) + '](' + s.name + ') WITH FULLSCAN' 48 | from sys.stats s cross apply sys.dm_db_stats_properties(s.object_id,s.stats_id) sp 49 | join sys.objects o on s.object_id = o.object_id 50 | left join sys.sysindexes idx on idx.id = s.object_id and idx.indid in (0,1) 51 | where s.object_id>100 and o.schema_id!=4 /*sys*/ and idx.rowcnt>0 52 | order by modification_counter desc 53 | 54 | 55 | 56 | --(2) 57 | print '***Get fragmentation info for all indexes above a certain size in the current database' 58 | -- Note: This could take some time on a very large database 59 | SELECT DB_NAME(database_id) AS [Database Name], OBJECT_NAME(ps.OBJECT_ID) AS [Object Name], 60 | i.name AS [Index Name], ps.index_id, ps.index_type_desc, ps.avg_fragmentation_in_percent, 61 | ps.fragment_count, ps.page_count, i.fill_factor, i.has_filter, i.filter_definition 62 | FROM sys.dm_db_index_physical_stats(DB_ID(),NULL, NULL, NULL ,'LIMITED') AS ps 63 | INNER JOIN sys.indexes AS i WITH (NOLOCK) 64 | ON ps.[object_id] = i.[object_id] 65 | AND ps.index_id = i.index_id 66 | WHERE database_id = DB_ID() 67 | AND page_count > 250 68 | ORDER BY avg_fragmentation_in_percent DESC OPTION (RECOMPILE); 69 | 70 | --(3) 71 | print '***Index Read/Write stats (all tables in current DB) ordered by Reads' 72 | SELECT OBJECT_NAME(s.[object_id]) AS [ObjectName], i.name AS [IndexName], i.index_id, 73 | user_seeks + user_scans + user_lookups AS [Reads], s.user_updates AS [Writes], 74 | i.type_desc AS [IndexType], i.fill_factor AS [FillFactor], i.has_filter, i.filter_definition, 75 | s.last_user_scan, s.last_user_lookup, s.last_user_seek 76 | FROM sys.dm_db_index_usage_stats AS s WITH (NOLOCK) 77 | INNER JOIN sys.indexes AS i WITH (NOLOCK) 78 | ON s.[object_id] = i.[object_id] 79 | WHERE OBJECTPROPERTY(s.[object_id],'IsUserTable') = 1 80 | AND i.index_id = s.index_id 81 | AND s.database_id = DB_ID() 82 | ORDER BY user_seeks + user_scans + user_lookups DESC OPTION (RECOMPILE); -- Order by reads 83 | 84 | 85 | --(4) 86 | print '***full rowset of sys.dm_db_file_space_usage' 87 | select * from sys.dm_db_file_space_usage 88 | go 89 | 90 | print '***Table sizes' 91 | SELECT 92 | s.Name AS SchemaName, 93 | t.NAME AS TableName, 94 | p.rows AS RowCounts, 95 | SUM(a.total_pages) * 8 AS TotalSpaceKB, 96 | SUM(a.used_pages) * 8 AS UsedSpaceKB, 97 | (SUM(a.total_pages) - SUM(a.used_pages)) * 8 AS UnusedSpaceKB 98 | FROM 99 | sys.tables t 100 | INNER JOIN 101 | sys.schemas s ON s.schema_id = t.schema_id 102 | INNER JOIN 103 | sys.indexes i ON t.OBJECT_ID = i.object_id 104 | INNER JOIN 105 | sys.partitions p ON i.object_id = p.OBJECT_ID AND i.index_id = p.index_id 106 | INNER JOIN 107 | sys.allocation_units a ON p.partition_id = a.container_id 108 | WHERE 109 | t.NAME NOT LIKE 'dt%' -- filter out system tables for diagramming 110 | AND t.is_ms_shipped = 0 111 | AND i.OBJECT_ID > 255 112 | GROUP BY 113 | t.Name, s.Name, p.Rows 114 | ORDER BY 115 | s.Name, t.Name 116 | 117 | go 118 | print 'sp_spaceused' 119 | exec sp_spaceused 120 | go 121 | 122 | 123 | -- (5) 124 | print '***missing indexes' 125 | SELECT 126 | id.[object_id] AS [ObjectID] 127 | ,id.[statement] AS [FullyQualifiedObjectName] 128 | ,id.[equality_columns] AS [EqualityColumns] 129 | ,id.[inequality_columns] AS [InEqualityColumns] 130 | ,id.[included_columns] AS [IncludedColumns] 131 | ,gs.[unique_compiles] AS [UniqueCompiles] 132 | ,gs.[user_seeks] AS [UserSeeks] 133 | ,gs.[user_scans] AS [UserScans] 134 | ,gs.[last_user_seek] AS [LastUserSeekTime] 135 | ,gs.[last_user_scan] AS [LastUserScanTime] 136 | ,gs.[avg_total_user_cost] AS [AvgTotalUserCost] 137 | ,gs.[avg_user_impact] AS [AvgUserImpact] 138 | ,gs.[system_seeks] AS [SystemSeeks] 139 | ,gs.[system_scans] AS [SystemScans] 140 | ,gs.[last_system_seek] AS [LastSystemSeekTime] 141 | ,gs.[last_system_scan] AS [LastSystemScanTime] 142 | ,gs.[avg_total_system_cost] AS [AvgTotalSystemCost] 143 | ,gs.[avg_system_impact] AS [AvgSystemImpact] 144 | ,gs.[user_seeks] * gs.[avg_total_user_cost] * (gs.[avg_user_impact] * 0.01) AS [IndexAdvantage] 145 | ,'CREATE INDEX [Missing_IXNC_' + OBJECT_NAME(id.[object_id]) + '_' + REPLACE(REPLACE(REPLACE(ISNULL(id.[equality_columns], ''), ', ', '_'), '[', ''), ']', '') + CASE 146 | WHEN id.[equality_columns] IS NOT NULL 147 | AND id.[inequality_columns] IS NOT NULL 148 | THEN '_' 149 | ELSE '' 150 | END + REPLACE(REPLACE(REPLACE(ISNULL(id.[inequality_columns], ''), ', ', '_'), '[', ''), ']', '') + '_' + LEFT(CAST(NEWID() AS [nvarchar](64)), 5) + ']' + ' ON ' + id.[statement] + ' (' + ISNULL(id.[equality_columns], '') + CASE 151 | WHEN id.[equality_columns] IS NOT NULL 152 | AND id.[inequality_columns] IS NOT NULL 153 | THEN ',' 154 | ELSE '' 155 | END + ISNULL(id.[inequality_columns], '') + ')' + ISNULL(' INCLUDE (' + id.[included_columns] + ')', '') AS [ProposedIndex] 156 | ,CAST(CURRENT_TIMESTAMP AS [smalldatetime]) AS [CollectionDate] 157 | FROM [sys].[dm_db_missing_index_group_stats] gs WITH (NOLOCK) 158 | INNER JOIN [sys].[dm_db_missing_index_groups] ig WITH (NOLOCK) 159 | ON gs.[group_handle] = ig.[index_group_handle] 160 | INNER JOIN [sys].[dm_db_missing_index_details] id WITH (NOLOCK) 161 | ON ig.[index_handle] = id.[index_handle] 162 | ORDER BY [IndexAdvantage] DESC 163 | OPTION (RECOMPILE); 164 | 165 | 166 | -- (6) 167 | 168 | print '***Get Average Waits for Database'; 169 | WITH [Waits] AS 170 | (SELECT 171 | [wait_type], 172 | [wait_time_ms] / 1000.0 AS [WaitS], 173 | ([wait_time_ms] - [signal_wait_time_ms]) / 1000.0 AS [ResourceS], 174 | [signal_wait_time_ms] / 1000.0 AS [SignalS], 175 | [waiting_tasks_count] AS [WaitCount], 176 | 100.0 * [wait_time_ms] / SUM ([wait_time_ms]) OVER() AS [Percentage], 177 | ROW_NUMBER() OVER(ORDER BY [wait_time_ms] DESC) AS [RowNum] 178 | FROM sys.dm_db_wait_stats 179 | WHERE [wait_type] NOT IN ( 180 | N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', 181 | N'BROKER_TASK_STOP', N'BROKER_TO_FLUSH', 182 | N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', 183 | N'CHKPT', N'CLR_AUTO_EVENT', 184 | N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', 185 | N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', 186 | N'DBMIRROR_WORKER_QUEUE', N'DBMIRRORING_CMD', 187 | N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', 188 | N'EXECSYNC', N'FSAGENT', 189 | N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', 190 | N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', 191 | N'HADR_LOGCAPTURE_WAIT', N'HADR_NOTIFICATION_DEQUEUE', 192 | N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', 193 | N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', 194 | N'LOGMGR_QUEUE', N'ONDEMAND_TASK_QUEUE', 195 | N'PWAIT_ALL_COMPONENTS_INITIALIZED', 196 | N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', 197 | N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', 198 | N'REQUEST_FOR_DEADLOCK_SEARCH', N'RESOURCE_QUEUE', 199 | N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', 200 | N'SLEEP_DBSTARTUP', N'SLEEP_DCOMSTARTUP', 201 | N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', 202 | N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', 203 | N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', 204 | N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', 205 | N'SP_SERVER_DIAGNOSTICS_SLEEP', N'SQLTRACE_BUFFER_FLUSH', 206 | N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', 207 | N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', 208 | N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', 209 | N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', 210 | N'WAIT_XTP_CKPT_CLOSE', N'XE_DISPATCHER_JOIN', 211 | N'XE_DISPATCHER_WAIT', N'XE_TIMER_EVENT') 212 | ) 213 | SELECT 214 | [W1].[wait_type] AS [WaitType], 215 | CAST ([W1].[WaitS] AS DECIMAL (16, 2)) AS [Wait_S], 216 | CAST ([W1].[ResourceS] AS DECIMAL (16, 2)) AS [Resource_S], 217 | CAST ([W1].[SignalS] AS DECIMAL (16, 2)) AS [Signal_S], 218 | [W1].[WaitCount] AS [WaitCount], 219 | CAST ([W1].[Percentage] AS DECIMAL (5, 2)) AS [Percentage], 220 | CAST (([W1].[WaitS] / [W1].[WaitCount]) AS DECIMAL (16, 4)) AS [AvgWait_S], 221 | CAST (([W1].[ResourceS] / [W1].[WaitCount]) AS DECIMAL (16, 4)) AS [AvgRes_S], 222 | CAST (([W1].[SignalS] / [W1].[WaitCount]) AS DECIMAL (16, 4)) AS [AvgSig_S] 223 | FROM [Waits] AS [W1] 224 | INNER JOIN [Waits] AS [W2] 225 | ON [W2].[RowNum] <= [W1].[RowNum] 226 | GROUP BY [W1].[RowNum], [W1].[wait_type], [W1].[WaitS], 227 | [W1].[ResourceS], [W1].[SignalS], [W1].[WaitCount], [W1].[Percentage] 228 | HAVING SUM ([W2].[Percentage]) - [W1].[Percentage] < 95; -- percentage threshold 229 | GO 230 | 231 | 232 | --(7) - Currently active queries 233 | print '*** exec requests' 234 | select * from sys.dm_exec_requests 235 | 236 | print '*** exec sessions' 237 | SELECT 238 | c.session_id, c.net_transport, c.encrypt_option, 239 | c.auth_scheme, s.host_name, s.program_name, 240 | s.client_interface_name, s.login_name, s.nt_domain, 241 | s.nt_user_name, s.original_login_name, c.connect_time, 242 | s.login_time 243 | FROM sys.dm_exec_connections AS c 244 | JOIN sys.dm_exec_sessions AS s 245 | ON c.session_id = s.session_id 246 | 247 | 248 | --(8) - db stats 249 | print '*** content of dm_db_resource_stats' 250 | select * from sys.dm_db_resource_stats 251 | 252 | --(9) 253 | print '*** current blocking and running batches' 254 | SELECT 255 | sql_text.text, 256 | locks.resource_type, 257 | locks.resource_subtype, 258 | locks.resource_description, 259 | locks.resource_associated_entity_id, 260 | locks.request_mode, 261 | locks.request_status, 262 | ses.login_name, 263 | ses.original_login_name, 264 | ses.login_time, 265 | ses.host_name, 266 | ses.program_name, 267 | ses.last_request_start_time 268 | FROM sys.dm_tran_locks locks 269 | JOIN sys.dm_exec_sessions ses 270 | ON locks.request_session_id = ses.session_id 271 | JOIN sys.sysprocesses pr 272 | ON ses.session_id = pr.spid 273 | CROSS APPLY sys.dm_exec_sql_text(pr.sql_handle) sql_text; 274 | 275 | --(10) - database properties 276 | print '*** sys.databases' 277 | select * from sys.databases 278 | 279 | --(11) deadlocks 280 | print '*** deadlock' 281 | SET QUOTED_IDENTIFIER ON; 282 | WITH CTE AS ( 283 | SELECT CAST(event_data AS XML) AS target_data_XML 284 | FROM sys.fn_xe_telemetry_blob_target_read_file('dl', null, null, null) 285 | ) 286 | SELECT target_data_XML.value('(/event/@timestamp)[1]', 'DateTime2') AS Timestamp, 287 | target_data_XML.query('/event/data[@name=''xml_report'']/value/deadlock') AS deadlock_xml, 288 | target_data_XML.query('/event/data[@name=''database_name'']/value').value('(/value)[1]', 'nvarchar(100)') AS db_name 289 | FROM CTE 290 | 291 | --(12) 292 | Print '*** database_scoped_configurations' 293 | select * from sys.database_scoped_configurations 294 | 295 | --(13) 296 | Print '*** Persistent version store usage stats' 297 | select db_name(database_id),* from sys.dm_tran_persistent_version_store_stats 298 | 299 | --(0) - timestamp 300 | select sysdatetime() 301 | -------------------------------------------------------------------------------- /AzureSQLMaintenance.sql: -------------------------------------------------------------------------------- 1 | /* Azure SQL Maintenance - Maintenance script for Azure SQL Database */ 2 | /* This script provided AS IS, Please review the code before executing this on production environment */ 3 | /* For any issue or suggestion please email to: yocr@microsoft.com */ 4 | /* 5 | *********************************************** 6 | Current Version Date: 2025-11-06 7 | *********************************************** 8 | 9 | Change Log: 10 | 2025-11-04 - Add User Override options. (exclue or override the command or add extra command to maintenance operation) 11 | 2024-09-23 - Avoid rebuil heaps on external tables as this is not needed and not possible. 12 | 2024-09-18 - Preserve xml compression in case this was used for the index. 13 | 2024-04-18 - Add internal variable to control SORT_IN_TEMPDB, change the way alter index command is being build to make it more flexible. 14 | 2023-07-13 - KB4551220 - skip resumable operation for indexes that has filter 15 | 2022-11-08 - Ignore table value functions for index maintenance Thanks to https://github.com/Mitch-Wheat for suggesting that 16 | 2022-10-06 - Fix help text that was mixed up, thanks Holger for pointing that out. 17 | 2022-10-03 - Add [whatif] as the first debug option, fix - do not skip stats for reorganize. 18 | 2022-04-25 - Set data type for @debug to nvarchar(10) as per comment I got from Paul McMillan - note that @debug is not in use at the moment. 19 | 2022-01-30 - As per comment from Tariq, removing dbo schema name from procedure and use user default. 20 | 2021-12-08 - Fix issue #19 on GitGub 21 | 2021-01-07 22 | + some bug fixes - see GitHub for more information 23 | */ 24 | 25 | if object_id('AzureSQLMaintenance') is null 26 | exec('create procedure AzureSQLMaintenance as /*dummy procedure body*/ select 1;') 27 | GO 28 | ALTER PROCEDURE [AzureSQLMaintenance] 29 | ( 30 | @operation nvarchar(10) = null, 31 | @mode nvarchar(10) = 'smart', 32 | @ResumableIndexRebuild bit = 0, 33 | @RebuildHeaps bit = 0, 34 | @LogToTable bit = 0, 35 | @debug nvarchar(10) = 'off' 36 | ) 37 | as 38 | begin 39 | set nocount on; 40 | 41 | --------------------------------------------- 42 | --- Varialbles and pre conditions check 43 | --------------------------------------------- 44 | 45 | set quoted_identifier on; 46 | declare @idxIdentifierBegin char(1), @idxIdentifierEnd char(1); 47 | declare @statsIdentifierBegin char(1), @statsIdentifierEnd char(1); 48 | 49 | declare @msg nvarchar(max); 50 | declare @minPageCountForIndex int = 40; 51 | declare @OperationTime datetime2 = sysdatetime(); 52 | declare @KeepXOperationInLog int =3; 53 | declare @ScriptHasAnError int = 0; 54 | declare @ResumableIndexRebuildSupported int; 55 | declare @indexStatsMode sysname; 56 | declare @LowFragmentationBoundry int = 5; 57 | declare @HighFragmentationBoundry int = 30; 58 | declare @SORT_IN_TEMPDB bit = 1; /* 1=Activate SORT_IN_TEMPDB , 0= do not activate SORT_IN_TEMPDB while rebuilding indexes */ 59 | 60 | /* 61 | Add your manual settings here 62 | */ 63 | 64 | /* make sure parameters selected correctly */ 65 | set @operation = lower(@operation) 66 | set @mode = lower(@mode) 67 | set @debug = lower(@debug) 68 | 69 | if @mode not in ('smart','dummy') or @mode is null 70 | set @mode = 'smart' 71 | 72 | --------------------------------------------- 73 | --- Begin 74 | --------------------------------------------- 75 | 76 | if @operation not in ('index','statistics','all') or @operation is null 77 | begin 78 | raiserror('@operation (varchar(10)) [mandatory]',0,0) 79 | raiserror(' Select operation to perform:',0,0) 80 | raiserror(' "index" to perform index maintenance',0,0) 81 | raiserror(' "statistics" to perform statistics maintenance',0,0) 82 | raiserror(' "all" to perform indexes and statistics maintenance',0,0) 83 | raiserror(' ',0,0) 84 | raiserror('@mode(varchar(10)) [optional]',0,0) 85 | raiserror(' optionaly you can supply second parameter for operation mode: ',0,0) 86 | raiserror(' "smart" (Default) using smart decision about what index or stats should be touched.',0,0) 87 | raiserror(' "dummy" going through all indexes and statistics regardless thier modifications or fragmentation.',0,0) 88 | raiserror(' ',0,0) 89 | raiserror('@ResumableIndexRebuild(bit) [optional]',0,0) 90 | raiserror(' Optionaly you can choose to rebuild indexes as resumable operation: ',0,0) 91 | raiserror(' "0" (Default) using non resumable index rebuild.',0,0) 92 | raiserror(' "1" using resumable index rebuild when it is supported.',0,0) 93 | raiserror(' ',0,0) 94 | raiserror('@RebuildHeaps(bit) [optional]',0,0) 95 | raiserror(' Rebuild HEAPS to fix forwarded records issue on tables with no clustered index',0,0) 96 | raiserror(' 0 - (Default) do not rebuild heaps',0,0) 97 | raiserror(' 1 - Rebuild heaps based on @mode parameter, @mode=dummy will rebuild all heaps',0,0) 98 | raiserror(' ',0,0) 99 | raiserror('@LogToTable(bit) [optional]',0,0) 100 | raiserror(' Optionaly allows you to turn on logging ',0,0) 101 | raiserror(' 0 - (Default) do not log operation to table',0,0) 102 | raiserror(' 1 - log operation to table',0,0) 103 | raiserror(' for logging option only 3 last execution will be kept by default. this can be changed by easily in the procedure body.',0,0) 104 | raiserror(' Log table will be created automatically if not exists.',0,0) 105 | raiserror(' ',0,0) 106 | raiserror('@debug [optional]',0,0) 107 | raiserror(' Allows debugging feature.',0,0) 108 | raiserror(' off - (Default) debug option is off',0,0) 109 | raiserror(' whatif - Remark all commands so it will not be executed, helps with understanding the commands to be executed',0,0) 110 | raiserror(' * in any case debug is used there will be user tables created to help with reviewing the process. cmdQueue, idxBefore and statsBefore ',0,0) 111 | raiserror(' ',0,0) 112 | raiserror('User Override settings:',0,0) 113 | raiserror(' You are allowed to averride the default behavior as follows ',0,0) 114 | raiserror(' - add command to be executed in addition to the maintenance script',0,0) 115 | raiserror(' - Exclude either whole schema, table, index or statistics from an operation',0,0) 116 | raiserror(' - force command of your own, such as sample percent for stats update or specific index',0,0) 117 | raiserror(' for more information about it open the code and go to line 145 to get some samples and available options.',0,0) 118 | raiserror(' ',0,0) 119 | raiserror('Example:',0,0) 120 | raiserror(' exec AzureSQLMaintenance ''all'', @LogToTable=1',0,0) 121 | end 122 | else 123 | begin 124 | 125 | --------------------------------------------- 126 | --- Prepare log table 127 | --------------------------------------------- 128 | 129 | /* Prepare Log Table */ 130 | if object_id('AzureSQLMaintenanceLog') is null and @LogToTable=1 131 | begin 132 | create table AzureSQLMaintenanceLog (id bigint primary key identity(1,1), OperationTime datetime2, command varchar(4000),ExtraInfo varchar(4000), StartTime datetime2, EndTime datetime2, StatusMessage varchar(1000)); 133 | end 134 | 135 | /* Prepare User override table if not exists */ 136 | if object_id('AzureSQLMaintenanceOverride') is null 137 | begin 138 | create table AzureSQLMaintenanceOverride( 139 | PK int identity primary key, 140 | ApplyOnObjectType nvarchar(10) null, /* schema / table / index / statistics */ 141 | TableSchema sysname null, 142 | TableName sysname null, 143 | IndexName sysname null, 144 | StatisticsName sysname null, 145 | Operation nvarchar(10) null, /* index / statistics / Null */ 146 | OverrideName varchar(100) not null, /* Exclude / Manual / AddCommand */ 147 | AdditionalSettings varchar(max) null, 148 | IsSample bit not null, 149 | Remark varchar(1000) null 150 | ,constraint Add_Command_Cannot_Be_Applied_On_Object check( (lower(OverrideName) =lower('AddCommand') and ApplyOnObjectType is null ) or (ApplyOnObjectType is not null and lower(OverrideName) !=lower('AddCommand')) ) 151 | ,constraint Cannot_Apply_Index_Operation_On_Statistics check(lower(Operation)!=lower('Index') OR (lower(Operation)=lower('Index') and StatisticsName is null )) 152 | ,constraint Cannot_Apply_Statistics_Operation_On_Index check(lower(Operation)!=lower('Statistics') OR (lower(Operation)=lower('Statistics') and IndexName is null )) 153 | ,constraint At_Schema_Level_Can_Only_Exclue check(lower(ApplyOnObjectType) != lower('Schema') or (lower(ApplyOnObjectType)=lower('Schema') and lower(OverrideName) =lower('Exclude'))) 154 | ) 155 | 156 | /* 157 | Some sample configuration for user override. 158 | 159 | * note, a mistake in applying the override most likely to just be ignored, please make sure you test the settings to confirm it has been applied correctly. 160 | below there are some samples you can use. 161 | 162 | insert into AzureSQLMaintenanceOverride 163 | select NULL,NULL,NULL,NULL,NULL,NULL,'AddCommand','-- ReadMe',1,'Please read the below samples to understand better the options you can override ' 164 | union all select null,NULL,NULL,NULL,NULL,NULL,'AddCommand','SELECT 1',1,NULL 165 | union all select 'Schema','dbo',NULL,NULL,NULL,'Index','exclude',NULL,1,NULL 166 | union all select 'Schema','dbo',NULL,NULL,NULL,'Statistics','exclude',NULL,1,NULL 167 | union all select 'Table','dbo','tab1',NULL,NULL,'Index','exclude',NULL,1,NULL 168 | union all select 'Table','dbo','tab1',NULL,NULL,'Statistics','exclude',NULL,1,NULL 169 | union all select 'Table','dbo','tab1',NULL,'stats1','Statistics','exclude',NULL,1,NULL 170 | union all select 'Index','dbo','tab1','idx1',NULL,'Index','exclude',NULL,1,NULL 171 | union all select 'Statistics','dbo','tab1',NULL,'stats1','Statistics','exclude',NULL,1,NULL 172 | union all select 'Table','dbo','tab2',NULL,NULL,'Index','Manual',NULL,1,NULL 173 | union all select 'Table','dbo','tab2',NULL,NULL,'Statistics','Manual',NULL,1,NULL 174 | union all select 'Table','dbo','tab2',NULL,'stats2','Statistics','Manual',NULL,1,NULL 175 | union all select 'Index','dbo','tab2','idx2',NULL,'Index','Manual',NULL,1,'Use commant template with place holders in curly braces such as: ALTER INDEX [{IndexName}] ON [{SchemaName}].[{TableName}] rebuild; ' 176 | union all select 'Statistics','dbo','tab2',NULL,'stats2','Statistics','Manual',NULL,1,NULL 177 | 178 | 179 | insert into AzureSQLMaintenanceOverride values('Table','SalesLT','Customer',null,null,'Statistics','Manual','UPDATE STATISTICS [{SchemaName}].[{TableName}] ([{StatisticsName}]) WITH FULLSCAN; --manual row2',1,null) 180 | insert into AzureSQLMaintenanceOverride values('Statistics','dbo','tab1',null,'idx1','Statistics','Manual','UPDATE STATISTICS [{SchemaName}].[{TableName}] ([{StatisticsName}]) WITH FULLSCAN; -- manual row 1',1,null) 181 | 182 | insert into AzureSQLMaintenanceOverride values('Table','SalesLT','Customer',null,null,'Index','Manual','Alter index [{IndexName}] on [{SchemaName}].[{TableName}] rebuild;',1,null) 183 | insert into AzureSQLMaintenanceOverride values('Index','dbo','tab1','idx1',null,'Index','Manual','Alter index [{IndexName}] on [{SchemaName}].[{TableName}] rebuild;',1,null) 184 | 185 | 186 | insert into AzureSQLMaintenanceOverride values(NULL,NULL,NULL,NULL,NULL,NULL,'AddCommand','-- Adding command 1 out of 2 to execution ',1,null) 187 | insert into AzureSQLMaintenanceOverride values(NULL,NULL,NULL,NULL,NULL,NULL,'AddCommand','-- Adding command 2 out of 2 to execution ',1,null) 188 | */ 189 | end 190 | 191 | --------------------------------------------- 192 | --- Resume operation 193 | --------------------------------------------- 194 | 195 | /*Check is there is operation to resume*/ 196 | if OBJECT_ID('AzureSQLMaintenanceCMDQueue') is not null 197 | begin 198 | if 199 | /*resume information exists*/ exists(select * from AzureSQLMaintenanceCMDQueue where ID=-1) 200 | begin 201 | /*resume operation confirmed*/ 202 | set @operation='resume' -- set operation to resume, this can only be done by the proc, cannot get this value as parameter 203 | 204 | -- restore operation parameters 205 | select top 1 206 | @LogToTable = JSON_VALUE(ExtraInfo,'$.LogToTable') 207 | ,@mode = JSON_VALUE(ExtraInfo,'$.mode') 208 | ,@ResumableIndexRebuild = JSON_VALUE(ExtraInfo,'$.ResumableIndexRebuild') 209 | from AzureSQLMaintenanceCMDQueue 210 | where ID=-1 211 | 212 | raiserror('-----------------------',0,0) 213 | set @msg = 'Resuming previous operation' 214 | raiserror(@msg,0,0) 215 | raiserror('-----------------------',0,0) 216 | end 217 | else 218 | begin 219 | -- table [AzureSQLMaintenanceCMDQueue] exist but resume information does not exists 220 | -- this might happen in case execution intrupted between collecting index & ststistics information and executing commands. 221 | -- to fix that we drop the table now, it will be recreated later 222 | DROP TABLE [AzureSQLMaintenanceCMDQueue]; 223 | end 224 | end 225 | 226 | 227 | --------------------------------------------- 228 | --- Report operation parameters 229 | --------------------------------------------- 230 | 231 | /*Write operation parameters*/ 232 | raiserror('-----------------------',0,0) 233 | set @msg = 'set operation = ' + @operation; 234 | raiserror(@msg,0,0) 235 | set @msg = 'set mode = ' + @mode; 236 | raiserror(@msg,0,0) 237 | set @msg = 'set ResumableIndexRebuild = ' + cast(@ResumableIndexRebuild as varchar(1)); 238 | raiserror(@msg,0,0) 239 | set @msg = 'set RebuildHeaps = ' + cast(@RebuildHeaps as varchar(1)); 240 | raiserror(@msg,0,0) 241 | set @msg = 'set LogToTable = ' + cast(@LogToTable as varchar(1)); 242 | raiserror(@msg,0,0) 243 | set @msg = 'set debug = ' + @debug; 244 | raiserror(@msg,0,0) 245 | raiserror('-----------------------',0,0) 246 | end 247 | 248 | if @LogToTable=1 insert into AzureSQLMaintenanceLog values(@OperationTime,null,null,sysdatetime(),sysdatetime(),'Starting operation: Operation=' +@operation + ' Mode=' + @mode + ' Keep log for last ' + cast(@KeepXOperationInLog as varchar(10)) + ' operations' ) 249 | 250 | -- create command queue table, if there table exits then we resume operation in earlier stage. 251 | if @operation!='resume' 252 | create table AzureSQLMaintenanceCMDQueue (ID int identity primary key,txtCMD nvarchar(max),ExtraInfo varchar(max)) 253 | 254 | --------------------------------------------- 255 | --- Check if engine support resumable index operation 256 | --------------------------------------------- 257 | if @ResumableIndexRebuild=1 258 | begin 259 | if cast(SERVERPROPERTY('EngineEdition')as int)>=5 or cast(SERVERPROPERTY('ProductMajorVersion')as int)>=14 260 | begin 261 | set @ResumableIndexRebuildSupported=1; 262 | end 263 | else 264 | begin 265 | set @ResumableIndexRebuildSupported=0; 266 | set @msg = 'Resumable index rebuild is not supported on this database' 267 | raiserror(@msg,0,0) 268 | if @LogToTable=1 insert into AzureSQLMaintenanceLog values(@OperationTime,null,null,sysdatetime(),sysdatetime(),@msg) 269 | end 270 | end 271 | 272 | 273 | --------------------------------------------- 274 | --- Index maintenance 275 | --------------------------------------------- 276 | if @operation in('index','all') 277 | begin 278 | /**/ 279 | if @mode='smart' and @RebuildHeaps=1 280 | set @indexStatsMode = 'SAMPLED' 281 | else 282 | set @indexStatsMode = 'LIMITED' 283 | 284 | raiserror('Get index information...(wait)',0,0) with nowait; 285 | /* Get Index Information */ 286 | /* using inner join - this eliminates indexes that we cannot maintain such as indexes on functions */ 287 | select 288 | idxs.[object_id] 289 | ,ObjectSchema = OBJECT_SCHEMA_NAME(idxs.object_id) 290 | ,ObjectName = object_name(idxs.object_id) 291 | ,IndexName = idxs.name 292 | ,idxs.type 293 | ,idxs.type_desc 294 | ,idxs.has_filter 295 | ,p.xml_compression 296 | ,i.avg_fragmentation_in_percent 297 | ,i.page_count 298 | ,i.index_id 299 | ,i.partition_number 300 | ,i.avg_page_space_used_in_percent 301 | ,i.record_count 302 | ,i.ghost_record_count 303 | ,i.forwarded_record_count 304 | ,null as OnlineOpIsNotSupported 305 | ,0 as ObjectDoesNotSupportResumableOperation 306 | ,cast(0 as bit) as SortInTempDB 307 | ,case when ps.data_space_id IS NULL then 0 else 1 end as IsPartitioned 308 | ,case when et.object_id is NULL then 0 else 1 end as IsExternalTable 309 | ,0 as SkipIndex 310 | ,replicate(' ',20) as OperationToTake 311 | ,replicate(' ',128) as SkipReason 312 | ,0 as UserOverride 313 | ,replicate(' ',1024) as UserOverrideTemplate 314 | into #idxBefore 315 | from sys.indexes idxs 316 | left join sys.partition_schemes ps ON idxs.data_space_id = ps.data_space_id 317 | inner join sys.objects obj on idxs.object_id = obj.object_id 318 | left join sys.partitions p on p.object_id = obj.object_id and p.index_id = idxs.index_id 319 | left join sys.external_tables et on obj.object_id = et.object_id 320 | inner join sys.dm_db_index_physical_stats(DB_ID(),NULL, NULL, NULL ,@indexStatsMode) i on i.object_id = idxs.object_id and i.index_id = idxs.index_id and p.partition_number=i.partition_number 321 | where idxs.type in (0 /*HEAP*/,1/*CLUSTERED*/,2/*NONCLUSTERED*/,5/*CLUSTERED COLUMNSTORE*/,6/*NONCLUSTERED COLUMNSTORE*/) 322 | and (alloc_unit_type_desc = 'IN_ROW_DATA' /*avoid LOB_DATA or ROW_OVERFLOW_DATA*/ or alloc_unit_type_desc is null /*for ColumnStore indexes*/) 323 | and OBJECT_SCHEMA_NAME(idxs.object_id) != 'sys' 324 | and idxs.is_disabled=0 325 | and obj.type_desc != 'TF' /* Ignore table value functions */ 326 | and not exists (select * from sys.external_tables as et where et.object_id = obj.object_id) /* as added by alasdaircs to avoid external tables */ 327 | order by i.avg_fragmentation_in_percent desc, i.page_count desc 328 | 329 | -- mark indexes XML,spatial and columnstore not to run online update 330 | update #idxBefore set OnlineOpIsNotSupported=1 where [object_id] in (select [object_id] from #idxBefore where [type]=3 /*XML Indexes*/) 331 | 332 | -- mark clustered indexes for tables with 'text','ntext','image' to rebuild offline 333 | update #idxBefore set OnlineOpIsNotSupported=1 334 | where index_id=1 /*clustered*/ and [object_id] in ( 335 | select object_id 336 | from sys.columns c join sys.types t on c.user_type_id = t.user_type_id 337 | where t.name in ('text','ntext','image') 338 | ) 339 | 340 | -- do all as offline for box edition that does not support online 341 | update #idxBefore set OnlineOpIsNotSupported=1 342 | where /* Editions that does not support online operation in case this has been used with on-prem server */ 343 | convert(varchar(100),serverproperty('Edition')) like '%Express%' 344 | or convert(varchar(100),serverproperty('Edition')) like '%Standard%' 345 | or convert(varchar(100),serverproperty('Edition')) like '%Web%' 346 | 347 | -- Do non resumable operation when index contains computed column or timestamp data type 348 | update idx set ObjectDoesNotSupportResumableOperation=1 349 | from #idxBefore idx join sys.index_columns ic on idx.object_id = ic.object_id and idx.index_id=ic.index_id 350 | join sys.columns c on ic.object_id=c.object_id and ic.column_id=c.column_id 351 | where c.is_computed=1 or system_type_id=189 /*TimeStamp column*/ 352 | 353 | -- Disable resumable operation for indexes that has filter (filtered indexes) (KB4551220) 354 | update idx set ObjectDoesNotSupportResumableOperation=1 355 | from #idxBefore idx 356 | where idx.has_filter=1 357 | 358 | -- set SkipIndex=1 if conditions for maintenance are not met 359 | -- this is used to idntify if stats need to be updated or not. 360 | -- Check#1 - if table is too small 361 | update #idxBefore set SkipIndex=1,SkipReason='Maintenance is not needed as table is too small' 362 | where ( 363 | /*Table is small*/ 364 | (page_count<=@minPageCountForIndex) 365 | ) 366 | and @mode != 'dummy' /*for Dummy mode we do not want to skip anything */ 367 | 368 | -- Check#2 - if table is not small and fragmentation % is too low 369 | update #idxBefore set SkipIndex=1,SkipReason='Maintenance is not needed as fragmentation % is low' 370 | where ( 371 | /*Table is big enough - but fragmentation is less than 5%*/ 372 | (page_count>@minPageCountForIndex and avg_fragmentation_in_percent<@LowFragmentationBoundry) 373 | ) 374 | and @mode != 'dummy' /*for Dummy mode we do not want to skip anything */ 375 | 376 | -- Skip columnstore indexes 377 | update #idxBefore set SkipIndex=1,SkipReason='Columnstore index' 378 | where ( 379 | type in ( 380 | 5/*Clustered columnstore index*/, 381 | 6/*Nonclustered columnstore index*/ 382 | ) 383 | ) 384 | and @mode != 'dummy' /*for Dummy mode we do not want to skip anything */ 385 | 386 | /***/ 387 | update #idxBefore set OperationToTake = 388 | case when 389 | ( 390 | avg_fragmentation_in_percent between @LowFragmentationBoundry and @HighFragmentationBoundry and @mode = 'smart')/* index fragmentation condition */ 391 | or 392 | (@mode='dummy' and type in (5,6))/* Columnstore indexes in dummy mode -> reorganize them */ 393 | then 394 | 'REORGANIZE' 395 | else 396 | 'REBUILD' 397 | end 398 | 399 | -- Choose when to do SORT_IN_TEMPDB, based on variable and if resumable operation is used as SORT_IN_TEMPDB is not supported for resumable operations. 400 | update idx set SortInTempDB=1 401 | from #idxBefore idx 402 | where 403 | ( 404 | /* Internal variable instrusts to use SORT_IN_TEMPDB and resumable operation was not activated*/ 405 | /* Resumable operation cannot use sort in tempDB as tempdb is nor persisted*/ 406 | @SORT_IN_TEMPDB=1 and @ResumableIndexRebuild = 0 407 | ) 408 | 409 | -- Apply User override settings for indexes at schema level. ***test*** 410 | update idx set SkipIndex=1, SkipReason='User override at schema level' 411 | from #idxBefore idx 412 | join AzureSQLMaintenanceOverride ovr 413 | on ovr.ApplyOnObjectType='Schema' and ovr.IsSample=0 and ovr.Operation='Index' and ovr.OverrideName='Exclude' 414 | and ovr.TableSchema = idx.ObjectSchema 415 | 416 | -- Apply User override settings for indexes at table level. ***test*** 417 | update idx set SkipIndex=1, SkipReason='User override at table level' 418 | from #idxBefore idx 419 | join AzureSQLMaintenanceOverride ovr 420 | on ovr.ApplyOnObjectType='Table' and ovr.IsSample=0 and ovr.Operation='Index' and ovr.OverrideName='Exclude' 421 | and ovr.TableSchema = idx.ObjectSchema 422 | and ovr.TableName = idx.ObjectName 423 | 424 | -- Apply User override settings for indexes at index level. ***test*** 425 | update idx set SkipIndex=1, SkipReason='User override at index level' 426 | from #idxBefore idx 427 | join AzureSQLMaintenanceOverride ovr 428 | on ovr.ApplyOnObjectType='Index' and ovr.IsSample=0 and ovr.Operation='Index' and ovr.OverrideName='Exclude' 429 | and ovr.TableSchema = idx.ObjectSchema 430 | and ovr.TableName = idx.ObjectName 431 | and ovr.IndexName = idx.IndexName 432 | 433 | -- Tag rows that has manual overrride so we can handle it differently later - This is for table level. ***test*** 434 | update idx set UserOverride=1, UserOverrideTemplate = ovr.AdditionalSettings 435 | from #idxBefore idx 436 | join AzureSQLMaintenanceOverride ovr 437 | on ovr.ApplyOnObjectType='table' and ovr.IsSample=0 and ovr.Operation='Index' and ovr.OverrideName='Manual' 438 | and ovr.TableSchema = idx.ObjectSchema 439 | and ovr.TableName = idx.ObjectName 440 | 441 | -- Tag rows that has manual overrride so we can handle it differently later - This is for index level. ***test*** 442 | update idx set UserOverride=1, UserOverrideTemplate = ovr.AdditionalSettings 443 | from #idxBefore idx 444 | join AzureSQLMaintenanceOverride ovr 445 | on ovr.ApplyOnObjectType='Index' and ovr.IsSample=0 and ovr.Operation='Index' and ovr.OverrideName='Manual' 446 | and ovr.TableSchema = idx.ObjectSchema 447 | and ovr.TableName = idx.ObjectName 448 | and ovr.IndexName = idx.IndexName 449 | 450 | 451 | raiserror('---------------------------------------',0,0) with nowait 452 | raiserror('Index Information:',0,0) with nowait 453 | raiserror('---------------------------------------',0,0) with nowait 454 | 455 | select @msg = count(*) from #idxBefore 456 | set @msg = 'Total Indexes: ' + @msg 457 | raiserror(@msg,0,0) with nowait 458 | 459 | select @msg = avg(avg_fragmentation_in_percent) from #idxBefore where page_count>@minPageCountForIndex 460 | set @msg = 'Average Fragmentation: ' + @msg 461 | raiserror(@msg,0,0) with nowait 462 | 463 | select @msg = sum(iif(avg_fragmentation_in_percent>=@LowFragmentationBoundry and page_count>@minPageCountForIndex,1,0)) from #idxBefore 464 | set @msg = 'Fragmented Indexes: ' + @msg 465 | raiserror(@msg,0,0) with nowait 466 | 467 | 468 | raiserror('---------------------------------------',0,0) with nowait 469 | 470 | 471 | /* Choose the identifier to be used based on existing object name 472 | this came up from object that contains '[' within the object name 473 | such as "EPK[export].[win_sourceofwealthbpf]" as index name 474 | if we use '[' as identifier it will cause wrong identifier name 475 | */ 476 | if exists( 477 | select 1 478 | from #idxBefore 479 | where IndexName like '%[%' or IndexName like '%]%' 480 | or ObjectSchema like '%[%' or ObjectSchema like '%]%' 481 | or ObjectName like '%[%' or ObjectName like '%]%' 482 | ) 483 | begin 484 | set @idxIdentifierBegin = '"' 485 | set @idxIdentifierEnd = '"' 486 | end 487 | else 488 | begin 489 | set @idxIdentifierBegin = '[' 490 | set @idxIdentifierEnd = ']' 491 | end 492 | 493 | /*Handle User Override for indexes*/ 494 | insert into AzureSQLMaintenanceCMDQueue(txtCMD,ExtraInfo) 495 | select 496 | txtCMD = REPLACE(REPLACE(REPLACE('/*User Override*/'+UserOverrideTemplate,'{TableName}',idx.ObjectName),'{SchemaName}',idx.ObjectSchema),'{IndexName}',idx.IndexName) 497 | ,ExtraInfo = 'User Override command' 498 | from #idxBefore idx 499 | where SkipIndex=0 and type != 0 /*Avoid HEAPS*/ and UserOverride=1 /*User Override requested*/ 500 | 501 | 502 | /* create queue for indexes */ 503 | insert into AzureSQLMaintenanceCMDQueue(txtCMD,ExtraInfo) 504 | select 505 | txtCMD = 'ALTER INDEX ' + @idxIdentifierBegin + IndexName + @idxIdentifierEnd + ' ON '+ @idxIdentifierBegin + ObjectSchema + @idxIdentifierEnd +'.'+ @idxIdentifierBegin + ObjectName + @idxIdentifierEnd + ' ' + 506 | OperationToTake+ ' ' + 507 | case when IsPartitioned = 1 then 'PARTITION=' + CAST(partition_number AS varchar(10)) + ' ' else '' end + 508 | case when OperationToTake='REBUILD' 509 | then 'WITH(MAXDOP=1' + 510 | case when OnlineOpIsNotSupported=1 then ',ONLINE=OFF' else ',ONLINE=ON' end + 511 | case when @ResumableIndexRebuild=1 and @ResumableIndexRebuildSupported=1 and ObjectDoesNotSupportResumableOperation=0 then ',RESUMABLE=ON' else '' end /*Default resumable is off so nothing is off - Thanks to mperdeck */ + 512 | case when SortInTempDB=1 then ',SORT_IN_TEMPDB=ON' else ',SORT_IN_TEMPDB=OFF' end + 513 | case when xml_compression=1 then ',XML_COMPRESSION=ON' else '' end + 514 | ')' 515 | else /* Operation is reoranize*/ '' end + 516 | ';' 517 | , ExtraInfo = 518 | 'Taking Action: ' + OperationToTake + ' ' + 519 | case when type in (5,6) then 520 | 'Dummy mode therefore reorganize columnstore indexes' 521 | else 522 | 'Current fragmentation: ' + format(avg_fragmentation_in_percent/100,'p')+ ' with ' + cast(page_count as nvarchar(20)) + ' pages' 523 | end 524 | from #idxBefore 525 | where SkipIndex=0 and type != 0 /*Avoid HEAPS*/ and UserOverride=0 /*No User Override*/ 526 | 527 | --------------------------------------------- 528 | --- Index - Heaps 529 | --------------------------------------------- 530 | 531 | /* create queue for heaps */ 532 | if @RebuildHeaps=1 533 | begin 534 | insert into AzureSQLMaintenanceCMDQueue(txtCMD,ExtraInfo) 535 | select 536 | txtCMD = 'ALTER TABLE ' + @idxIdentifierBegin + ObjectSchema + @idxIdentifierEnd +'.'+ @idxIdentifierBegin + ObjectName + @idxIdentifierEnd + ' REBUILD ' + 537 | case when IsPartitioned = 1 then 'PARTITION=' + CAST(partition_number AS varchar(10)) + ' ' else '' end + ';' 538 | , ExtraInfo = 'Rebuilding heap - forwarded records ' + cast(forwarded_record_count as varchar(100)) + ' out of ' + cast(record_count as varchar(100)) + ' record in the table' 539 | from #idxBefore 540 | where 541 | type = 0 /*heaps*/ 542 | and IsExternalTable=0 /*cannot rebuild external tables*/ 543 | and 544 | ( 545 | @mode='dummy' 546 | or 547 | (forwarded_record_count/nullif(record_count,0)>0.3) /* 30% of record count */ 548 | or 549 | (forwarded_record_count>105000) /* for tables with > 350K rows dont wait for 30%, just run yje maintenance once we reach the 100K forwarded records */ 550 | ) 551 | end /* create queue for heaps */ 552 | end 553 | 554 | 555 | 556 | --------------------------------------------- 557 | --- Statistics maintenance 558 | --------------------------------------------- 559 | 560 | if @operation in('statistics','all') 561 | begin 562 | /*Gets Stats for database*/ 563 | raiserror('Get statistics information...',0,0) with nowait; 564 | select 565 | ObjectSchema = OBJECT_SCHEMA_NAME(s.object_id) 566 | ,ObjectName = object_name(s.object_id) 567 | ,s.object_id 568 | ,s.stats_id 569 | ,StatsName = s.name 570 | ,sp.last_updated 571 | ,sp.rows 572 | ,sp.rows_sampled 573 | ,sp.modification_counter 574 | , i.type 575 | , i.type_desc 576 | ,0 as SkipStatistics /* 0=do not skip, 1=skip unless user override, 2=skip anyway*/ 577 | ,replicate(' ',128) as SkipReason 578 | ,0 as UserOverride 579 | ,replicate(' ',1024) as UserOverrideTemplate 580 | into #statsBefore 581 | from sys.stats s cross apply sys.dm_db_stats_properties(s.object_id,s.stats_id) sp 582 | left join sys.indexes i on sp.object_id = i.object_id and sp.stats_id = i.index_id 583 | where OBJECT_SCHEMA_NAME(s.object_id) != 'sys' and /*Modified stats or Dummy mode*/(isnull(sp.modification_counter,0)>0 or @mode='dummy') 584 | order by sp.last_updated asc 585 | 586 | /*Remove statistics if it is handled by index rebuild 587 | When index is rebuild we already update stats as part of the rebuild -> therefore I am skipping this index 588 | for reorganize or for indexes with low fragmentation we do not update stats*/ 589 | if @operation= 'all' 590 | update _stats set SkipStatistics=1 591 | from #statsBefore _stats 592 | join #idxBefore _idx 593 | on _idx.ObjectSchema = _stats.ObjectSchema 594 | and _idx.ObjectName = _stats.ObjectName 595 | and _idx.IndexName = _stats.StatsName 596 | where _idx.SkipIndex=0 and _idx.OperationToTake='REBUILD' 597 | 598 | /*Skip statistics for Columnstore indexes*/ 599 | update #statsBefore set SkipStatistics=2 600 | where type in (5,6) /*Column store indexes*/ 601 | 602 | /*Skip statistics if resumable operation is pause on the same object*/ 603 | if @ResumableIndexRebuildSupported=1 604 | begin 605 | update _stats set SkipStatistics=2 606 | from #statsBefore _stats join sys.index_resumable_operations iro on _stats.object_id=iro.object_id and _stats.stats_id=iro.index_id 607 | end 608 | 609 | -- Apply User override settings for Statistics at schema level. ***test*** 610 | update _stats set SkipStatistics=2, SkipReason='User override at schema level' 611 | from #statsBefore _stats 612 | join AzureSQLMaintenanceOverride ovr 613 | on ovr.ApplyOnObjectType='schema' and ovr.IsSample=0 and ovr.Operation = 'Statistics' and ovr.OverrideName='exclude' 614 | and ovr.TableSchema = _stats.ObjectSchema 615 | 616 | -- Apply User override settings for Statistics at table level. ***test*** 617 | update _stats set SkipStatistics=2, SkipReason='User override at table level' 618 | from #statsBefore _stats 619 | join AzureSQLMaintenanceOverride ovr 620 | on ovr.ApplyOnObjectType='table' and ovr.IsSample=0 and ovr.Operation = 'Statistics' and ovr.OverrideName='exclude' 621 | and ovr.TableSchema = _stats.ObjectSchema 622 | and ovr.TableName = _stats.ObjectName 623 | 624 | -- Apply User override settings for Statistics at index level. ***test*** 625 | update _stats set SkipStatistics=2, SkipReason='User override at Statistics level' 626 | from #statsBefore _stats 627 | join AzureSQLMaintenanceOverride ovr 628 | on ovr.ApplyOnObjectType='statistics' and ovr.IsSample=0 and ovr.Operation = 'Statistics' and ovr.OverrideName='exclude' 629 | and ovr.TableSchema = _stats.ObjectSchema 630 | and ovr.TableName = _stats.ObjectName 631 | and ovr.StatisticsName = _stats.StatsName 632 | 633 | -- Tag rows that has manual overrride so we can handle it differently later - This is for table level. ***test*** 634 | update _stats set UserOverride=1, UserOverrideTemplate = ovr.AdditionalSettings 635 | from #statsBefore _stats 636 | join AzureSQLMaintenanceOverride ovr 637 | on ovr.ApplyOnObjectType='table' and ovr.IsSample=0 and ovr.Operation='Statistics' and ovr.OverrideName='Manual' 638 | and ovr.TableSchema = _stats.ObjectSchema 639 | and ovr.TableName = _stats.ObjectName 640 | 641 | -- Tag rows that has manual overrride so we can handle it differently later - This is for index level. ***test*** 642 | update _stats set UserOverride=1, UserOverrideTemplate = ovr.AdditionalSettings 643 | from #statsBefore _stats 644 | join AzureSQLMaintenanceOverride ovr 645 | on ovr.ApplyOnObjectType='Statistics' and ovr.IsSample=0 and ovr.Operation='Statistics' and ovr.OverrideName='Manual' 646 | and ovr.TableSchema = _stats.ObjectSchema 647 | and ovr.TableName = _stats.ObjectName 648 | and ovr.StatisticsName = _stats.StatsName 649 | 650 | raiserror('---------------------------------------',0,0) with nowait 651 | raiserror('Statistics Information:',0,0) with nowait 652 | raiserror('---------------------------------------',0,0) with nowait 653 | 654 | select @msg = sum(modification_counter) from #statsBefore 655 | set @msg = 'Total Modifications: ' + @msg 656 | raiserror(@msg,0,0) with nowait 657 | 658 | select @msg = sum(iif(modification_counter>0,1,0)) from #statsBefore 659 | set @msg = 'Modified Statistics: ' + @msg 660 | raiserror(@msg,0,0) with nowait 661 | 662 | raiserror('---------------------------------------',0,0) with nowait 663 | 664 | /* Choose the identifier to be used based on existing object name */ 665 | if exists( 666 | select 1 667 | from #statsBefore 668 | where StatsName like '%[%' or StatsName like '%]%' 669 | or ObjectSchema like '%[%' or ObjectSchema like '%]%' 670 | or ObjectName like '%[%' or ObjectName like '%]%' 671 | ) 672 | begin 673 | set @statsIdentifierBegin = '"' 674 | set @statsIdentifierEnd = '"' 675 | end 676 | else 677 | begin 678 | set @statsIdentifierBegin = '[' 679 | set @statsIdentifierEnd = ']' 680 | end 681 | 682 | /*Handle User Override for Statistics objects*/ 683 | insert into AzureSQLMaintenanceCMDQueue(txtCMD,ExtraInfo) 684 | select 685 | txtCMD = REPLACE(REPLACE(REPLACE('/*User Override*/'+UserOverrideTemplate,'{TableName}',_stats.ObjectName),'{SchemaName}',_stats.ObjectSchema),'{StatisticsName}',_stats.StatsName) 686 | ,ExtraInfo = 'User Override statistics update command' 687 | from #statsBefore _stats 688 | where SkipStatistics in (0,1 /*exclude 2*/) and UserOverride=1 /*User Override requested*/ 689 | 690 | 691 | /* create queue for update stats */ 692 | insert into AzureSQLMaintenanceCMDQueue(txtCMD,ExtraInfo) 693 | select 694 | txtCMD = 'UPDATE STATISTICS '+ @statsIdentifierBegin + ObjectSchema + +@statsIdentifierEnd + '.'+@statsIdentifierBegin + ObjectName + @statsIdentifierEnd +' (' + @statsIdentifierBegin + StatsName + @statsIdentifierEnd + ') WITH FULLSCAN;' 695 | , ExtraInfo = '#rows:' + cast([rows] as varchar(100)) + ' #modifications:' + cast(modification_counter as varchar(100)) + ' modification percent: ' + format((1.0 * modification_counter/ rows ),'p') 696 | from #statsBefore 697 | where SkipStatistics=0; 698 | end 699 | 700 | if @operation in('statistics','index','all','resume') 701 | begin 702 | 703 | declare @SQLCMD nvarchar(max); 704 | declare @ID int; 705 | declare @ExtraInfo nvarchar(max); 706 | 707 | /*handle debug options*/ 708 | if @debug!='off' 709 | begin 710 | 711 | /*When whatif is used remark all commands*/ 712 | if @debug='whatif' 713 | begin 714 | update AzureSQLMaintenanceCMDQueue set txtCMD = '--' + txtCMD 715 | end 716 | 717 | /*keep debug table snapshot*/ 718 | drop table if exists idxBefore 719 | drop table if exists statsBefore 720 | drop table if exists cmdQueue 721 | if object_id('tempdb..#idxBefore') is not null select * into idxBefore from #idxBefore 722 | if object_id('tempdb..#statsBefore') is not null select * into statsBefore from #statsBefore 723 | if object_id('AzureSQLMaintenanceCMDQueue') is not null select * into cmdQueue from AzureSQLMaintenanceCMDQueue 724 | end 725 | 726 | /*Save current execution parameters in case resume is needed */ 727 | if @operation!='resume' 728 | begin 729 | set @ExtraInfo = (select top 1 LogToTable = @LogToTable, operation=@operation, operationTime=@OperationTime, mode=@mode, ResumableIndexRebuild = @ResumableIndexRebuild from sys.tables for JSON path, WITHOUT_ARRAY_WRAPPER) 730 | set identity_insert AzureSQLMaintenanceCMDQueue on 731 | insert into AzureSQLMaintenanceCMDQueue(ID,txtCMD,ExtraInfo) values(-1,'parameters to be used by resume code path',@ExtraInfo) 732 | set identity_insert AzureSQLMaintenanceCMDQueue off 733 | end 734 | 735 | if @operation in('statistics','index','all') 736 | begin 737 | /*Add extra command from User override */ 738 | insert into AzureSQLMaintenanceCMDQueue 739 | select AdditionalSettings, 'User Added Command' from AzureSQLMaintenanceOverride where OverrideName='AddCommand' and IsSample=0 740 | end 741 | 742 | 743 | --------------------------------------------- 744 | --- Executing commands 745 | --------------------------------------------- 746 | /* 747 | needed to rebuild indexes on comuted columns 748 | if ANSI_WARNINGS is set to OFF we might get the followin exception: 749 | Msg 1934, Level 16, State 1, Line 2 750 | ALTER INDEX failed because the following SET options have incorrect settings: 'ANSI_WARNINGS'. Verify that SET options are correct for use with indexed views and/or indexes on computed columns and/or filtered indexes and/or query notifications and/or XML data type methods and/or spatial index operations. 751 | */ 752 | SET ANSI_WARNINGS ON; 753 | 754 | raiserror('Start executing commands...',0,0) with nowait 755 | declare @T table(ID int, txtCMD nvarchar(max),ExtraInfo nvarchar(max)); 756 | while exists(select * from AzureSQLMaintenanceCMDQueue where ID>0) 757 | begin 758 | update top (1) AzureSQLMaintenanceCMDQueue set txtCMD=txtCMD output deleted.* into @T where ID>0; 759 | select top (1) @ID = ID, @SQLCMD = txtCMD, @ExtraInfo=ExtraInfo from @T 760 | raiserror(@SQLCMD,0,0) with nowait 761 | if @LogToTable=1 insert into AzureSQLMaintenanceLog values(@OperationTime,@SQLCMD,@ExtraInfo,sysdatetime(),null,'Started') 762 | begin try 763 | exec(@SQLCMD) 764 | if @LogToTable=1 update AzureSQLMaintenanceLog set EndTime = sysdatetime(), StatusMessage = 'Succeeded' where id=SCOPE_IDENTITY() 765 | end try 766 | begin catch 767 | set @ScriptHasAnError=1; 768 | set @msg = 'FAILED : ' + CAST(ERROR_NUMBER() AS VARCHAR(50)) + ERROR_MESSAGE(); 769 | raiserror(@msg,0,0) with nowait 770 | if @LogToTable=1 update AzureSQLMaintenanceLog set EndTime = sysdatetime(), StatusMessage = @msg where id=SCOPE_IDENTITY() 771 | end catch 772 | delete from AzureSQLMaintenanceCMDQueue where ID = @ID; 773 | delete from @T 774 | end 775 | drop table AzureSQLMaintenanceCMDQueue; 776 | end 777 | 778 | --------------------------------------------- 779 | --- Clean old records from log table 780 | --------------------------------------------- 781 | if @LogToTable=1 782 | begin 783 | delete from AzureSQLMaintenanceLog 784 | from 785 | AzureSQLMaintenanceLog L join 786 | (select distinct OperationTime from AzureSQLMaintenanceLog order by OperationTime desc offset @KeepXOperationInLog rows) F 787 | ON L.OperationTime = F.OperationTime 788 | insert into AzureSQLMaintenanceLog values(@OperationTime,null,cast(@@rowcount as varchar(100))+ ' rows purged from log table because number of operations to keep is set to: ' + cast( @KeepXOperationInLog as varchar(100)),sysdatetime(),sysdatetime(),'Cleanup Log Table') 789 | end 790 | 791 | if @ScriptHasAnError=0 raiserror('Done',0,0) 792 | if @LogToTable=1 insert into AzureSQLMaintenanceLog values(@OperationTime,null,null,sysdatetime(),sysdatetime(),'End of operation') 793 | if @ScriptHasAnError=1 raiserror('Script has errors - please review the log.',16,1) 794 | end 795 | GO 796 | print 'Execute AzureSQLMaintenance to get help' 797 | 798 | 799 | /* 800 | Examples 801 | 802 | 1. run through all indexes and statistic and take smart decision about steps taken for each object 803 | exec AzureSQLMaintenance 'all' 804 | 805 | 1.1 add log to table 806 | exec AzureSQLMaintenance 'all', @LogToTable=1, @ResumableIndexRebuild=1 807 | 808 | 809 | 2. run through all indexes and statistic with no limitation (event non modified object will be rebuild or updated) 810 | exec AzureSQLMaintenance 'all','dummy' 811 | 812 | 813 | 3. run smart maintenance only for statistics 814 | exec AzureSQLMaintenance 'statistics' 815 | 816 | 817 | 4. run smart maintenance only for indexes 818 | exec AzureSQLMaintenance 'index' 819 | 820 | */ 821 | --------------------------------------------------------------------------------