├── .aux └── count.ps1 ├── .github └── workflows │ └── embed.yml ├── AI ├── GradientDescedent.sql └── spDbaGPT.sql ├── AlwaysOn ├── DatabaseReplicaStates.sqlcmd.sql └── GenerateRoutingURL.sql ├── Assembly └── AssemblyBinariesPath.sql ├── Audit └── QueryAuditFile.sql ├── Backups ├── AddLogicDevice.sql ├── Backup │ ├── BackupDetailed.sql │ ├── BackupVolumetria_PorPeriodo.sql │ ├── Backup_DestinationInfo.sql │ ├── Backup_PolicyEstimator.sql │ ├── Backuphistory.sql │ ├── CheckBackupDoing.sql │ ├── CheckBackupLatency.sql │ ├── FrequencyAnalyzer_v2.sql │ ├── InfoBackups_Agg.sql │ ├── LastBackupSize.sql │ ├── Timeline.sql │ ├── olds │ │ ├── Media Tamanho Backup Semanal.sql │ │ ├── Procedure Backup.temp.sql │ │ ├── RelatorioBackup_PorPeriodo.sql │ │ ├── TamLastBackups.sql │ │ ├── [RECENCIA_NOV2014]sp_BackupBanco.sql │ │ ├── backup_log_sql2000.sql │ │ ├── lista de backups recentes.sql │ │ ├── relatorio_backup_lastbackups.sql │ │ └── relatorio_backup_simple.sql │ ├── relatorio_backup_lastbackups.sql │ ├── sp_DoBackup.sql │ └── valid_backup_schedule.sql └── Restore │ ├── Data_Ultimo_Restore.sql │ ├── InfoUltimoRestoreEx.sql │ ├── LastRestores.sql │ ├── MasterManual │ └── CreateLogins.sql │ ├── RestoreAvancado.sql │ ├── RestoreFrequency.sql │ ├── RestoreFrequencyWithSizings.sql │ ├── RestoreReport.sql │ ├── RestoreSequence.sql │ └── prcRestauraBanco.sql ├── CDC └── ValidarCdcRoleMembers.sql ├── CLR ├── Lab.cs ├── LoadCSC.ps1 ├── SqlLabClr.create.sql ├── SqlLabClr.dll └── compile.ps1 ├── CMS ├── CMSProperties │ ├── DynamicPivotProperties.sql │ ├── ExplorerServerPath.sql │ ├── QueryProperties.sql │ ├── README.md │ ├── cmsprops.CMSProperties.tab.sql │ ├── cmsprops.cpInstanceProperties.vw.sql │ ├── cmsprops.prcGetInstance.proc.sql │ ├── cmsprops.prcSetProperty.sql │ ├── cmsprops.sch.sql │ ├── cpFullInstanceProperties.vw.sql │ ├── cpInstances.vw.sql │ └── dbo.FullServerPath.vw.sql └── ConcederAcesso_CMS.sql ├── CPU ├── CPUActivityEx.sql ├── CPUDelta.sql ├── CPUDeltaEx.sql ├── CPUDelta_Custom.sql ├── CPUDelta_GroupByObject.sql ├── CPUTest-Simple.sql ├── CPUTime_Schedulers.sql ├── CPU_usadasporsessao.sql ├── CurrentTasks_VsRunnable_Aggregado.sql ├── NativeCompiledTest.sql ├── QuerStatsDelta.sql ├── RequestDelta.sql ├── TasksActivity.sql ├── ThreadScheduleInfo.sql ├── TryCalcCPU_MultipleThreads.sql ├── Ver Schedulers Possivel CPU Bottlenecks.sql └── threads.sql ├── Checkdb └── PerTableCheck.sql ├── Collations ├── CollationAsciiTable.sql ├── CollationPrecedence_CollationSensitity_Explained.sql ├── PrefCollations_CodePage.sql └── collate_playing_DrawLibrarySQL.sql ├── Colunas └── FirstColText.sql ├── Constraints └── Foreign Keys │ ├── DescobrirFK-Sem-Indice.sql │ ├── GEraCreateFk-AllFks.sql │ ├── Gerar Creates e Drops de Fks.sql │ ├── Informacoes de FKS.sql │ ├── Tabelas Filhas.sql │ └── fnGeraCreateFK.sql ├── Criptografia └── demo-DesmistificandoCriptografia.sql ├── Database ├── EstimarUltimoUsoBase.sql ├── LastGoodCheckDB.sql ├── SizePerDatabase.sql ├── TamanhoTodosBancos_2000+.sql └── UsageSizeInfo.sql ├── DatabaseMail ├── DatabaseMail_CriarProfile_DBA.sql └── DatabaseMail_TestEmail.sql ├── Dockerfile ├── Dumps └── LastDumps.sql ├── Endpoints └── endpoints.sql ├── Index └── sp_skindex.sql ├── Jobs ├── JobHistoryStatistics.sql ├── JobsXProxy.sql └── obter_ultimas_falhas.sql ├── LICENSE ├── Misc ├── Fuckill.sql ├── HighQueryCompilationTime.sql ├── I-Like-Memory.sql ├── NotIn-Null.sql ├── ProcurarTexto.sql └── sp.ForceOledbWait.sql ├── Modulos ├── DependencyChain.sql └── ProcuraEmTodosModulos.sql ├── PowerAlerts ├── ErrorInfo.sql └── README.md ├── README.md ├── Schemas └── ChangeSchemaOwners.sql ├── SqlLibEmbeddings ├── README.md ├── SqlOperations.ps1 ├── embed.ps1 ├── query.ps1 ├── start-embedding.ps1 ├── tab.Scripts.sql └── util.ps1 ├── Tables └── TableSize2.sql └── docker-compose.yml /.aux/count.ps1: -------------------------------------------------------------------------------- 1 | param( 2 | [switch]$Update 3 | ) 4 | 5 | $ErrorActionPreference = "Stop" 6 | 7 | $Committed = @(git ls-files | ?{ 8 | $it = Get-Item $_ -EA SilentlyContinue; 9 | $it.name -like '*.sql' 10 | }).count 11 | $TotalFiles = @(gci -rec *.sql).count 12 | 13 | 14 | [int]$PercentCompleted = $Committed*100/$TotalFiles 15 | $TitleEscaped = [Uri]::EscapeDataString("$Committed/$TotalFiles scripts"); 16 | 17 | $Url = "https://progress-bar.xyz/$PercentCompleted/?width=200&title=$TitleEscaped" 18 | $ReadmeImage = "![Progresso]($Url)" 19 | 20 | 21 | $ReadmeLines = Get-Content .\README.md 22 | 23 | for ($i = 0; $i -lt $ReadmeLines.length; $i++) 24 | { 25 | if($ReadmeLines[$i] -match '^!\[Progresso\].+'){ 26 | $ReadmeLines[$i] = $ReadmeImage 27 | break; 28 | } 29 | } 30 | 31 | if($i -ge $ReadmeLines.length){ 32 | $ReadmeLines = @($ReadmeImage) + $ReadmeLines 33 | } 34 | 35 | $ReadmeLines 36 | 37 | write-host "---" 38 | write-host $Committed $TotalFiles $PercentCompleted 39 | write-host $Url 40 | 41 | if($Update){ 42 | $ReadmeLines | Set-Content .\README.md -Encoding UTF8 43 | } -------------------------------------------------------------------------------- /.github/workflows/embed.yml: -------------------------------------------------------------------------------- 1 | name: embed 2 | on: 3 | push: 4 | tags: 5 | - embed-* 6 | 7 | jobs: 8 | embed: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v4 13 | - name: embed 14 | shell: pwsh 15 | env: 16 | HF_API_TOKEN: ${{ secrets.HF_API_TOKEN }} 17 | SQL_SERVER: ${{ secrets.SQL_SERVER }} 18 | SQL_DB: ${{ secrets.SQL_DB }} 19 | SQL_USER: ${{ secrets.SQL_USER }} 20 | SQL_PASS: ${{ secrets.SQL_PASS }} 21 | run: ./SqlLibEmbeddings/start-embedding.ps1 -------------------------------------------------------------------------------- /AI/GradientDescedent.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/AI/GradientDescedent.sql -------------------------------------------------------------------------------- /AI/spDbaGPT.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/AI/spDbaGPT.sql -------------------------------------------------------------------------------- /AlwaysOn/DatabaseReplicaStates.sqlcmd.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Este script foi criado para rodar no SSMS, usando o SQLCMD mode. 9 | 10 | Com AlwaysON, é bem comum você querer consultar as várias réplicas ao mesmo tempo. 11 | Tem várias maneiras de fazer isso: Você poderia criar um CMS, usar powershell, etc. 12 | 13 | 14 | Uma delas, é usar o SQLCMD mode do Management Studio. 15 | Nesse modo, você tem algumas sintaxes especiais no editor que pode, no meio do script, conectar em outro servidor! 16 | Neste caso, é bem útil quando você quer executar uma mesma query em diferentes instância e comparar o resultado ali na mesma tela. 17 | Isso fica bem produtivo! 18 | 19 | Para habilitar o SQLCMD Mode no SSMS, vá em Query -> SQLCMD Mode (clica e o ícone vai ficar destacado, indicando que ativou). 20 | O comando ":connect" é disponibilizado via SQLCMD mode. Com ele você pode se conectar em uma instância específica. 21 | 22 | Note que para funcionar, a máquina onde você está rodando o SSMS deve conseguir chegar na instância que está tentando conectar. 23 | Se, por exemplo, está dentro do servidor, então ele tem que ter permissão. 24 | 25 | MAis sobre o SQLCMD Mode: https://learn.microsoft.com/en-us/sql/tools/sqlcmd/edit-sqlcmd-scripts-query-editor?view=sql-server-ver16 26 | 27 | ## Sobre o script 28 | 29 | Este script traz informações de cada banco de dados de cada AG em cada replica. 30 | Se você rodar na primária, ele vai ter 1 linha para cada base de cada AG de cada réplica. 31 | Se você rodar na secundária, vai ter 1 linha para cada base de cada AG associado este secundário. 32 | 33 | A coluna is_local indica se a linha é referente a instãncia onde você está executando o script. 34 | quando 1, então é a linha correspondente a instância atual. 35 | 36 | 37 | Sobre a DMV: 38 | https://learn.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-hadr-database-replica-states-transact-sql 39 | 40 | 41 | No caso abaixo, a última vez que usei o script era pra analisar o sincronismo com a secundária. 42 | Eu queria entender a diferença de dados entre o primário e o secundário. 43 | 44 | Na primaira, filtrei is_local = 0, pois queria as linhas com os dados de sincronização com a secundária. 45 | Na secundária, filtrei is_local = 1, pois queria as linhas com os dados da própria secundária. 46 | 47 | 48 | Geralmente, quando está tudo ok na comunicação, os valores são bem próximos. 49 | Se estiver muito discrepante, pode indicar algum problema na comunicação (lentidão da rede, probs do sql, bugs, etc.) 50 | */ 51 | 52 | 53 | :connect SERVER\INSTANCIA1 -- No meu caso, era a primaria 54 | SELECT @@servername,D.name,d.log_reuse_wait_desc,synchronization_state_desc,log_send_rate,redo_queue_size,redo_rate,last_commit_time,last_hardened_time,last_sent_time,last_sent_lsn,last_redone_time 55 | ,last_received_time 56 | fROM sys.dm_hadr_database_replica_states RS 57 | INNER JOIN sys.databases D on D.database_id = RS.database_id 58 | where is_local = 0 59 | order by D.name 60 | GO 61 | 62 | 63 | :connect SERVER\INSTANCIA2 -- No meu caso, era a secundária 64 | SELECT @@servername,D.name,d.log_reuse_wait_desc,synchronization_state_desc,log_send_rate,redo_queue_size,redo_rate,last_commit_time,last_hardened_time,last_sent_time,last_sent_lsn,last_redone_time 65 | ,last_received_time 66 | fROM sys.dm_hadr_database_replica_states RS 67 | INNER JOIN sys.databases D on D.database_id = RS.database_id 68 | where is_local = 1 69 | order by D.name 70 | GO 71 | 72 | 73 | -------------------------------------------------------------------------------- /AlwaysOn/GenerateRoutingURL.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Tone Poem (Blog Microsoft) 5 | 6 | # Detalhes 7 | 8 | Este script foi criado para gerar as read-only routing URL de AGs. 9 | Eu acho que usei isso 1x. 10 | Mas é bem interessante como o autor faz algumas validaçõs... 11 | Estou mantendo esse pois talvez seja útil para alguém aí! 12 | 13 | */ 14 | 15 | -- From: https://blogs.msdn.microsoft.com/mattn/2012/04/25/calculating-read_only_routing_url-for-alwayson/ 16 | ----Read-only routing url generation script. 17 | ----Connect to each replica in your AlwaysOn cluster and run this script to get the read_only_routing_url for the replica. 18 | ----Then set this to the read_only_routing_url for the availability group replica => 19 | -- alter availability group MyAvailabilityGroup modify replica on N'ThisReplica' with (secondary_role(read_only_routing_url=N'')) 20 | print 'Read-only-routing url script v.2012.1.24.1' 21 | 22 | print 'This SQL Server instance version is [' + cast(serverproperty('ProductVersion') as varchar(256)) + ']' 23 | 24 | if (ServerProperty('IsClustered') = 1) 25 | begin 26 | print 'This SQL Server instance is a clustered SQL Server instance.' 27 | end 28 | else 29 | begin 30 | print 'This SQL Server instance is a standard (not clustered) SQL Server instance.' 31 | end 32 | 33 | if (ServerProperty('IsHadrEnabled') = 1) 34 | begin 35 | print 'This SQL Server instance is enabled for AlwaysOn.' 36 | end 37 | else 38 | begin 39 | print 'This SQL Server instance is NOT enabled for AlwaysOn.' 40 | end 41 | 42 | --Detect SQL Azure instance. 43 | declare @is_sql_azure bit 44 | set @is_sql_azure = 0 45 | 46 | begin try 47 | set @is_sql_azure = 1 48 | exec('declare @i int set @i = sql_connection_mode()') 49 | print 'This SQL Server instance is a Sql Azure instance.' 50 | end try 51 | begin catch 52 | set @is_sql_azure = 0 53 | print 'This SQL Server instance is NOT a Sql Azure instance.' 54 | end catch 55 | 56 | --Check that this is SQL 11 or later, otherwise fail fast. 57 | if (@@microsoftversion / 0x01000000 < 11 or @is_sql_azure > 0) 58 | begin 59 | print 'This SQL Server instance does not support read-only routing, exiting script.' 60 | end 61 | else 62 | begin -- if server supports read-only routing 63 | 64 | -- Fetch the dedicated admin connection (dac) port. 65 | -- Normally it's always port 1434, but to be safe here we fetch it from the instance. 66 | -- We use this later to exclude the admin port from read_only_routing_url. 67 | declare @dac_port int 68 | declare @reg_value varchar(255) 69 | exec xp_instance_regread 70 | N'HKEY_LOCAL_MACHINE', 71 | N'SOFTWARE\Microsoft\Microsoft SQL Server\\MSSQLServer\SuperSocketNetLib\AdminConnection\Tcp', 72 | N'TcpDynamicPorts', 73 | @reg_value output 74 | 75 | set @dac_port = cast(@reg_value as int) 76 | 77 | print 'This SQL Server instance DAC (dedicated admin) port is ' + cast(@dac_port as varchar(255)) 78 | if (@dac_port = 0) 79 | begin 80 | print 'Note a DAC port of zero means the dedicated admin port is not enabled.' 81 | end 82 | 83 | -- Fetch ListenOnAllIPs value. 84 | -- If set to 1, this means the instance is listening to all IP addresses. 85 | -- If set to 0, this means the instance is listening to specific IP addresses. 86 | declare @listen_all int 87 | exec xp_instance_regread 88 | N'HKEY_LOCAL_MACHINE', 89 | N'SOFTWARE\Microsoft\Microsoft SQL Server\\MSSQLServer\SuperSocketNetLib\Tcp', 90 | N'ListenOnAllIPs', 91 | @listen_all output 92 | 93 | if (@listen_all = 1) 94 | begin 95 | print 'This SQL Server instance is listening to all IP addresses (default mode).' 96 | end 97 | else 98 | begin 99 | print 'This SQL Server instance is listening to specific IP addresses (ListenOnAllIPs is disabled).' 100 | end 101 | 102 | -- Check for dynamic port configuration, not recommended with read-only routing. 103 | declare @tcp_dynamic_ports varchar(255) 104 | exec xp_instance_regread 105 | N'HKEY_LOCAL_MACHINE', 106 | N'SOFTWARE\Microsoft\Microsoft SQL Server\\MSSQLServer\SuperSocketNetLib\Tcp\IPAll', 107 | N'TcpDynamicPorts', 108 | @tcp_dynamic_ports output 109 | 110 | if (@tcp_dynamic_ports = '0') 111 | begin 112 | print 'This SQL Server instance is listening on a dynamic tcp port, this is NOT A RECOMMENDED CONFIGURATION when using read-only routing, because the instance port can change each time the instance is restarted.' 113 | end 114 | else 115 | begin 116 | print 'This SQL Server instance is listening on fixed tcp port(s) (it is not configured for dynamic ports), this is a recommended configuration when using read-only routing.' 117 | end 118 | 119 | -- Calculate the server domain and instance FQDN. 120 | -- We use @server_domain later to build the FQDN to the clustered instance. 121 | declare @instance_fqdn varchar(255) 122 | declare @server_domain varchar(255) 123 | 124 | -- Get the instance FQDN using the xp_getnetname API 125 | -- Note all cluster nodes must be in same domain, so this works for calculating cluster FQDN. 126 | set @instance_fqdn = '' 127 | exec xp_getnetname @instance_fqdn output, 1 128 | 129 | -- Remove embedded null character at end if found. 130 | declare @terminator int 131 | set @terminator = charindex(char(0), @instance_fqdn) --1 132 | if (@terminator > 0) 133 | begin 134 | set @instance_fqdn = substring(@instance_fqdn, 1, @terminator) 135 | end 136 | 137 | -- Build @server_domain using @instance_fqdn. 138 | set @server_domain = @instance_fqdn 139 | 140 | -- Remove trailing portion to extract domain name. 141 | set @terminator = charindex('.', @server_domain) 142 | if (@terminator > 0) 143 | begin 144 | set @server_domain = substring(@server_domain, @terminator+1, datalength(@server_domain)) 145 | end 146 | print 'This SQL Server instance resides in domain ''' + @server_domain + '''' 147 | 148 | if (ServerProperty('IsClustered') = 1) 149 | begin 150 | -- Fetch machine name, which for a clustered SQL instance returns the network name of the virtual server. 151 | -- Append @server_domain to build the FQDN. 152 | set @instance_fqdn = cast(serverproperty('MachineName') as varchar(255)) + '.' + @server_domain 153 | end 154 | 155 | declare @ror_url varchar(255) 156 | declare @instance_port int 157 | 158 | set @ror_url = '' 159 | 160 | -- Get first available port for instance. 161 | select 162 | top 1 -- Select first matching port 163 | @instance_port = port 164 | from sys.dm_tcp_listener_states 165 | where 166 | type=0 -- Type 0 = TSQL (to avoid mirroring endpoint) 167 | and 168 | state=0 -- State 0 is online 169 | and 170 | port <> @dac_port -- Avoid DAC port (admin port) 171 | and 172 | -- Avoid availability group listeners 173 | ip_address not in (select ip_address from sys.availability_group_listener_ip_addresses agls) 174 | group by port 175 | order by port asc -- Pick first port in ascending order 176 | 177 | -- Check if there are multiple ports and warn if this is the case. 178 | declare @list_of_ports varchar(max) 179 | set @list_of_ports = '' 180 | 181 | select 182 | @list_of_ports = @list_of_ports + 183 | case datalength(@list_of_ports) 184 | when 0 then cast(port as varchar(max)) 185 | else ',' + cast(port as varchar(max)) 186 | end 187 | from sys.dm_tcp_listener_states 188 | where 189 | type=0 -- Type 0 = TSQL (to avoid mirroring endpoint) 190 | and 191 | state=0 -- State 0 is online 192 | and 193 | port <> @dac_port -- Avoid DAC port (admin port) 194 | and 195 | -- Avoid availability group listeners 196 | ip_address not in (select ip_address from sys.availability_group_listener_ip_addresses agls) 197 | group by port 198 | order by port asc 199 | 200 | print 'This SQL Server instance FQDN (Fully Qualified Domain Name) is ''' + @instance_fqdn + '''' 201 | print 'This SQL Server instance port is ' + cast(@instance_port as varchar(10)) 202 | 203 | set @ror_url = 'tcp://' + @instance_fqdn + ':' + cast(@instance_port as varchar(10)) 204 | 205 | print '****************************************************************************************************************' 206 | print 'The read_only_routing_url for this SQL Server instance is ''' + @ror_url + '''' 207 | print '****************************************************************************************************************' 208 | 209 | -- If there is more than one instance port (unusual) list them out just in case. 210 | if (charindex(',', @list_of_ports) > 0) 211 | begin 212 | print 'Note there is more than one instance port, the list of available instance ports for read_only_routing_url is (' + @list_of_ports + ')' 213 | print 'The above URL just uses the first port in the list, but you can use any of these available ports.' 214 | end 215 | 216 | end -- if server supports read-only routing 217 | go -------------------------------------------------------------------------------- /Assembly/AssemblyBinariesPath.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Retorna informações sobre assemblies! 8 | A coluna content é útil para recriar um assembly se precisar! 9 | 10 | */ 11 | SELECT 12 | * 13 | FROM 14 | sys.assembly_files -------------------------------------------------------------------------------- /Audit/QueryAuditFile.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Este script lista diversas informações relevantes que podem existir em um arquivo de audit. 9 | Eu faço uam conversão para hora local (por padrão, a hora no audio é GMT). E trago as colunas mais relevantes (Geralmente). 10 | Adicionalmente, faço join com algumas dmvs para converter o id das operações em algo mais legível. 11 | 12 | Não esqueça de trocar o caminho do audit no parâmetro de fn_get_audit_file 13 | Lemebre-se que estes arquivos devem estar acessíveis pelo processo da sua instância, isto é, o processo deve conseguir chegar nesse caminho, o que incluir ter as permissões corretas do usuário que roda o serviço. 14 | */ 15 | 16 | --> Check Audit file! 17 | SELECT 18 | A.EventTimeLocal 19 | ,sequence_number 20 | ,succeeded 21 | ,session_id 22 | ,server_principal_name 23 | ,ACT.name as ActionName 24 | ,database_name 25 | ,object_name 26 | ,statement 27 | ,CONVERT(XML,additional_information) as AdditionaInfo 28 | ,A.class_type 29 | FROM 30 | ( 31 | SELECT TOP 600 32 | CONVERT(datetime,SWITCHOFFSET(convert(datetimeoffset,event_time),'-03:00')) EventTimeLocal 33 | ,A.* 34 | FROM 35 | master.sys.fn_get_audit_file('C:\Caminho\Audios\PrefixoArquivo*',NULL,NULL) A --> Mudar nome do arquivo! 36 | ORDER BY 37 | 1 DESC 38 | ) A 39 | LEFT JOIN 40 | sys.dm_audit_class_type_map AM 41 | ON AM.class_type = A.class_type 42 | LEFT JOIN 43 | sys.dm_audit_actions ACT 44 | ON ACT.action_id = A.action_id 45 | AND ACT.class_desc = AM.securable_class_desc 46 | 47 | --select * from sys.dm_server_audit_status 48 | --select * from sys.dm_xe_sessions where address = 0x000000000738B671 49 | --select * from sys.dm_xe_session_object_columns 50 | 51 | --select * from sys.dm_xe_objects where name = 'audit_event' 52 | --select * from sys.dm_xe_packages where guid = 'F235752A-D5C0-4C9A-A735-9C3B6F6E43B1' 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /Backups/AddLogicDevice.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Adiciona um logical device. 8 | Geralmente fazemos backup direto para um arquivo. 9 | Mas o comando suporta BACKUP para um "device lógico", que é um arquivo fixo, e você referencia por um nome interno, ao invés do caminho. 10 | 11 | Eu confesso que raramente usei isso, mas devo ter guardado isso aqui para ter um exemplo dos parâmetros ou para resolver algum problema. 12 | O comentário com '--> Obsoleto', se não me engano, são parâmetros obsoletos. 13 | 14 | */ 15 | 16 | sp_addumpdevice 17 | @devtype = 'disk' 18 | ,@logicalname = 'DIR_PADRAO_BANCOS' 19 | ,@physicalname = 'C:\Program Files\Microsoft SQL Server\MSSQL10.MSSQLSERVER\MSSQL\DATA\' 20 | --> Obsoleto 21 | --,@cntrltype = 22 | --,@devstatus = 23 | 24 | 25 | -------------------------------------------------------------------------------- /Backups/Backup/BackupDetailed.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # author 4 | Rodrigo Ribeiro Gomes 5 | 6 | # detalhes 7 | Lista a sequencia de backups após os backups de um determinada data. 8 | Útil para ra´pida conferencia dos backuops feitos e para onde foram feitos. 9 | Caso queira filtrar banco, etc., add na primeira 10 | 11 | */ 12 | 13 | IF OBJECT_ID('tempdb..#BackupBaseInfo') IS NOT NULL 14 | DROP TABLE #BackupBaseInfo; 15 | 16 | --> Aqui vamos carregar o backup_set_id encontrado com o filtro. 17 | -- vamos filtrar somente esses que estao apos esse! 18 | SELECT 19 | BS.database_name 20 | ,BaseSetId = MAX(BS.backup_set_id) 21 | INTO 22 | #BackupBaseInfo 23 | FROM 24 | msdb..backupset BS 25 | WHERE 26 | BS.is_copy_only = 0 27 | AND 28 | BS.backup_finish_date < '20250101' 29 | AND 30 | bs.type = 'D' 31 | GROUP BY 32 | BS.database_name 33 | OPTION(RECOMPILE) 34 | 35 | 36 | SELECT 37 | BS.database_name 38 | ,BS.backup_set_id 39 | ,BS.type 40 | ,BS.backup_finish_date 41 | ,BS.is_copy_only 42 | ,BS.is_snapshot 43 | ,BS.compressed_backup_size 44 | ,BS.backup_size 45 | ,BS.differential_base_guid 46 | ,BS.backup_set_uuid 47 | ,FS.* 48 | ,F.* 49 | FROM 50 | #BackupBaseInfo BB 51 | JOIN 52 | msdb..backupset BS 53 | ON BS.database_name = bb.database_name 54 | AND BS.backup_set_id >= BB.BaseSetId 55 | CROSS APPLY ( 56 | SELECT 57 | NumDevices = COUNT(*) 58 | FROM 59 | msdb..backupmediafamily BMF 60 | WHERE 61 | BMF.media_set_id = BS.media_set_id 62 | ) FS 63 | CROSS APPLY ( 64 | SELECT 65 | bmf.physical_device_name + char(13)+char(10) as 'data()' 66 | FROM 67 | msdb..backupmediafamily BMF 68 | WHERE 69 | BMF.media_set_id = BS.media_set_id 70 | ORDER BY 71 | BMF.media_family_id 72 | FOR XML PATH(''),TYPE 73 | ) F(devices) 74 | ORDER BY 75 | BS.database_name 76 | ,BS.backup_set_id -------------------------------------------------------------------------------- /Backups/Backup/BackupVolumetria_PorPeriodo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Durante muito tempo eu senti falta de um script que me desse uma visão dos backups da instância. 8 | Cansei de ver clientes me perguntarem: Quanto de backup faz por dia, por mês, por ano, etc. 9 | Este script foi uma tentativa de conseguir responder isso mais rápido! 10 | 11 | Eu fiz ele em inglês, pois na época estava estudando muito inglês e queria forçar tentatr escrever (até hoje faço isso). 12 | Por isso, vai ver muito o inglês errado! 13 | 14 | Mas o script é bem legal! 15 | Você ajusta alguns parâmetros e consegue ter uma visão muito legal dos seus backups. 16 | Ele usa basicamnete a tabela do msdb..backupset. 17 | Se ela tiver muito grande, o script pode demorar um pouco. Importante monitorar a execução. 18 | 19 | 20 | eu ajustei a doc dos parâmetros para ficar em português para que você consiga usar! 21 | 22 | 23 | 24 | 25 | */ 26 | 27 | -- Created by Rodrigo Ribeiro Gomes (www.thesqltimes.com) 28 | DECLARE 29 | @RefDate datetime 30 | ,@period varchar(10) 31 | ,@GroupByDB bit 32 | ,@groupPeriod bit 33 | ,@reporttype int 34 | ,@considerCopyOnly bit 35 | ,@CustomWhere nvarchar(max) 36 | 37 | -- Veja a descrição abaixo para entender melhor como usar esses parâmetros! 38 | 39 | -- Especifique aqui uma data de referencia. Somente os backups a partir dessa data serão considerados! 40 | SET @RefDate = DATEADD(MONTH,-6,GETDATE()) 41 | 42 | -- Controla como será exibido os resultados 43 | -- 1 - Será exibido um resultado detalhado, onde você poderá ver por banco, etc. 44 | -- 2 - Será feito um resumo. Nesse modo alguns parâmetros abaixo são ignorados. 45 | SET @reporttype = 1 46 | 47 | -- Especifica um periodo (os mesmos valores aceitáveis pela função DATEDIFF). 48 | SET @period = 'DAY'; 49 | 50 | -- Se 1, o resultado final será gerado por banco de dados. Se 0, será agrupado pra instância inteira. 51 | SET @GroupByDB = 1; --> If 1, groups by database. If 0, groups all in instance. 52 | 53 | -- Se 1, agrupa pelo período (por exemplo, você vai ter os backups feitos no mesmo dia). Se 0, ignora o período. 54 | SET @groupPeriod = 0; 55 | 56 | -- Considera backups COPY_ONLY. Geralmente, para volumetria de produção, você não quer saber estes. 57 | SET @considerCopyOnly = 0; 58 | 59 | --> Cláusula where cusotmizada para ser aplicada na tabela backupset. 60 | -- Permite que você usa um filtro personalizado para escolher quais backups considerar. 61 | SET @CustomWhere = NULL; --> You can write a custom msdb filter. This filter will be applied before filters above. 62 | 63 | /* 64 | Descrição dos resultados: 65 | 66 | 67 | @ReportType = 1 (detalhado) 68 | No modo 1, o script vai coletar todos os backups feitos desde @RefData. 69 | O script vai agrupar por tipo de backup (coluna type de msdb..backupset). 70 | 71 | Se você especificar @GroupByDB = 1, o script também agrupa por banco de dados. 72 | Se você especificar @GroupByPeriod = 1, o script vai agrupar pelo período especificado em @Period. 73 | 74 | A coluna BackupPeriod representa esse período. 75 | Por exemplo, se @Period é 'MONTH', e o backup foi feito em '2024-01-31 00:18:20.000', BackupPeriod será '2024-01-01'. 76 | 77 | Você pode combinar @groupPeriod e @GroupByDB. As possibilidades são: 78 | @GroupByDB = 1 @groupPeriod =1 79 | Isso vai mostrar as info de backup por banco e por período, desde @RefData. É útil para uma visão detalhada. 80 | @GroupByDB = 1 @groupPeriod = 0 81 | Isso vai mostrar as informações por banco, desde @RefData. Útil para uma visão de cada banco. 82 | @GroupByDB = 0 @groupPeriod = 1 83 | Isso vai mostrar todos os backups agrpados por período, desde @RefData. 84 | Útil para uma visão geral da instância e em período específicos (mensal, por hora, semanal, etc.) 85 | @GroupByDB = 0 @groupPeriod = 0 86 | Isso via mostrar uma visão geral da instância, desde @RefDate. 87 | 88 | 89 | 90 | Colunas: 91 | 92 | ServerInstance - Nome da instância 93 | BackupSource - Nome do banco. Será null se @GroupByDB = 0 94 | BackupPeriod - A primeira data/hora do período. Exemplo: se @period = MONTH, então YEAR-MONTH-01 00:00:00. 95 | Se @period = DAY, então YEAR-MONTH-DAY 00:00:00 96 | type - Tipo de backup (mesmo de msdb..backupset) 97 | lastBackup - A data do último backup nesse período. Se @groupPeriod = 0, então será a data do último backup. 98 | backupSize - O tamanho total em MB 99 | avgSize - Tamanho médio de cada backup no período. 100 | backupCompressedSize - Tamanho total do backup, comprimido, em MB 101 | avgCompressedSize - Tamanho médio de cada backup comprimido 102 | backupCount - Quantidade total de backups feitas 103 | CompressionSavings - Taxa de compressão. Exemplo: 0.79 é 79% de compressão. Formula 1-(backupCompressedSize/backupSize) 104 | 105 | @ReportType = 2 106 | 107 | In this mode, script will agregates information by period. Using this options enable @groupByPeriod automatically. 108 | The script will groups data by period, then groups by database or not. 109 | 110 | Neste modo o script vai agregar a informação por período. 111 | Habilitando esse modo, automaticamente, habilita @GroupPeriod. 112 | 113 | O script primeiro agrupa por período, e depois por banco (se @groupByDb = 1). 114 | Esse modo é bem útil para ver a periodicidade de backup e responde perguntas do tipo: 115 | - Quanto de backup faço por período? (por dia, por mês, etc.) 116 | - E, você pode responder para um banco específico 117 | 118 | 119 | Columns: 120 | 121 | BackupSource - Nome do banco, se @groupByDB = 1 122 | type - Tipo de Backup 123 | Periodicity - Periodicidade. Baseado em @Period. Se @Period é MONTH, significa "mensal" (mas a coluna mostrará o mesmo valor de @period). Aqui é interessante entender que é como se você estivesse querendos saber quando de backup eu tenho "mensalmente" para esta instancia ou essa base? Ou "diariamente" 124 | LastOnPeriod - Último backup feito desde @RefDate 125 | AvgSizeOnPeriodicity -Tamanho médio do backup feito no periodo, em MB. Exemplo, se periodicity é MONTH (mensal), e essa coluna mostra o valor 100000, isso significa que foi uma média de 100GB por mês 126 | AvgCompressedSizeOnPeriodicity - O mesmo que o anterior, porém considerando o tamanho comprimido. 127 | BackupCountOnPeriod - Total de backups desde @RefDate. 128 | AvgCountPerPeriodicity - Quantidade média feita no período.Por exemplo, se periodicity é MONTH (mensal), e essa coluna mostra 1000, isso significa que foi uma média de 1000 backups por mês! 129 | */ 130 | 131 | --------- 132 | 133 | IF OBJECT_ID('tempdb..#BackupFinalData') IS NOT NULL 134 | DROP TABLE #BackupFinalData; 135 | 136 | IF OBJECT_ID('tempdb..#BackupInfo') IS NOT NULL 137 | DROP TABLE #BackupInfo; 138 | 139 | CREATE TABLE #BackupInfo( 140 | backup_set_id bigint 141 | ,database_name sysname 142 | ,backup_finish_date datetime 143 | ,type varchar(5) 144 | ,backup_size numeric(20,0) 145 | ,compressed_backup_size numeric(20,0) 146 | 147 | ) 148 | 149 | -- Validations... 150 | IF @reporttype = 2 151 | SET @groupPeriod = 1; 152 | 153 | IF LEN(@CustomWhere ) = 0 154 | SET @CustomWhere = NULL; 155 | 156 | -- BackupPeriod: Maps a date to a specific period based on passed parameters. 157 | IF OBJECT_ID('tempdb..#BackupPeriod') IS NOT NULL 158 | DROP TABLE #BackupPeriod; 159 | 160 | CREATE TABLE #BackupPeriod( 161 | originalDate datetime -- Represent the original date 162 | ,periodDate datetime -- Represent the date in the period requested 163 | ) 164 | 165 | 166 | 167 | 168 | DECLARE 169 | @cmd nvarchar(4000) 170 | ,@compressedExpr nvarchar(100) 171 | ,@copyOnly nvarchar(500) 172 | ,@sqlVersion int 173 | ; 174 | 175 | -- Getting SQL Version 176 | SELECT @sqlVersion = LEFT(V.Ver,CHARINDEX('.',V.Ver)-1) FROM (SELECT CONVERT(varchar(30),SERVERPROPERTY('ProductVersion')) as Ver) V 177 | 178 | 179 | -- If supports compression, then add compression column. 180 | 181 | IF EXISTS(SELECT * FROM msdb.INFORMATION_SCHEMA.COLUMNS C WHERE C.TABLE_NAME = 'backupset' AND COLUMN_NAME = 'compressed_backup_size') 182 | SET @compressedExpr = 'BS.compressed_backup_size'; 183 | ELSE 184 | SET @compressedExpr = 'BS.backup_size'; 185 | 186 | IF EXISTS(SELECT * FROM msdb.INFORMATION_SCHEMA.COLUMNS C WHERE C.TABLE_NAME = 'backupset' AND COLUMN_NAME = 'is_copy_only') 187 | SET @copyOnly = 'BS.is_copy_only = 0'; 188 | ELSE 189 | SET @copyOnly = NULL; 190 | 191 | IF ISNULL(@considerCopyOnly,0) = 1 192 | SET @copyOnly = NULL; 193 | 194 | --> Query for collect base backup data 195 | SET @cmd = N' 196 | INSERT INTO 197 | #BackupInfo 198 | SELECT -- The DISTINCT remove duplicates generated by join 199 | BS.backup_set_id 200 | ,BS.database_name 201 | ,BS.backup_finish_date 202 | ,BS.type 203 | ,BS.backup_size 204 | ,'+@compressedExpr+' as compressedSize 205 | FROM 206 | ( 207 | SELECT 208 | * 209 | FROM 210 | msdb.dbo.backupset BS 211 | WHERE 212 | 1 = 1 213 | -- #CustomWhereFilter 214 | '+ISNULL(' AND ('+@CustomWhere+')','')+' 215 | ) BS 216 | WHERE 217 | BS.backup_finish_date >= @RefDate 218 | 219 | '+ISNULL('AND '+@copyOnly,'')+' 220 | ' 221 | -- Run Query! 222 | EXEC sp_executesql @cmd,N'@RefDate datetime',@RefDate; 223 | 224 | 225 | 226 | -- Converting backup dates to period dates... 227 | DECLARE @PeriodMinutes int; 228 | 229 | SET @cmd = N' 230 | SET @PeriodMinutes = DATEDIFF(MI,''19000101'',DATEADD('+@period+',1,''19000101'')) 231 | 232 | INSERT INTO 233 | #BackupPeriod 234 | SELECT 235 | D.backup_finish_date AS originalDate 236 | ,DATEADD('+@period+',DATEDIFF('+@period+',''19000101'',D.backup_finish_date),''19000101'') as periodDate 237 | FROM 238 | ( 239 | SELECT DISTINCT 240 | backup_finish_date 241 | FROM 242 | #BackupInfo BI 243 | ) D 244 | '; 245 | EXEC sp_executesql @cmd,N'@PeriodMinutes int OUTPUT',@PeriodMinutes OUTPUT; 246 | 247 | 248 | SELECT 249 | @@SERVERNAME as ServerInstance 250 | ,B.BackupSource 251 | ,B.BackupPeriod 252 | ,B.type 253 | ,MAX(B.backup_finish_date) lastBackup 254 | ,CONVERT(decimal(17,2),SUM(backup_size/1024/1024)) backupSize 255 | ,CONVERT(decimal(17,2),AVG(backup_size/1024/1024)) avgSize 256 | ,CONVERT(decimal(17,2),SUM(compressed_backup_size/1024/1024)) backupCompressedSize 257 | ,CONVERT(decimal(17,2),AVG(compressed_backup_size/1024/1024)) avgCompressedSize 258 | ,CONVERT(bigint,COUNT(backup_set_id)) backupCount 259 | INTO 260 | #BackupFinalData 261 | FROM 262 | ( 263 | SELECT 264 | CASE 265 | WHEN @groupPeriod = 1 THEN BP.periodDate 266 | ELSE NULL 267 | END BackupPeriod 268 | ,CASE 269 | WHEN @GroupByDB = 1 THEN BI.database_name 270 | ELSE NULL 271 | END as BackupSource 272 | ,BI.* 273 | FROM 274 | #BackupInfo BI 275 | INNER JOIN 276 | #BackupPeriod BP 277 | ON BP.originalDate = BI.backup_finish_date 278 | ) B 279 | GROUP BY 280 | B.BackupSource 281 | ,B.BackupPeriod 282 | ,B.type 283 | 284 | IF @reporttype = 1 --'PERIOD_DETAILED' 285 | SELECT 286 | * 287 | ,1-CONVERT(decimal(3,2),backupCompressedSize/backupSize) as CompressionSavings 288 | FROM 289 | #BackupFinalData BFD 290 | ORDER BY 291 | BFD.BackupSource 292 | ,BFD.BackupPeriod 293 | ,BFD.type 294 | 295 | IF @reporttype = 2 --'PERIOD_STATS' 296 | SELECT 297 | PS.* 298 | ,AvgFreq = F.Formmatted 299 | FROM 300 | ( 301 | SELECT 302 | BFD.BackupSource 303 | ,BFD.type 304 | ,@period As Periodicity 305 | ,MAX(lastBackup) aS LastOnPeriod 306 | ,AVG(backupSize) AS AvgSizeOnPeriodicity 307 | ,AVG(backupCompressedSize) AS AvgCompressedSizeOnPeriodicity 308 | ,COUNT(*) AS Periods 309 | ,SUM(backupCount) AS BackupCountOnPeriod 310 | ,CEILING(AVG(backupCount*1.)) AS AvgCountPerPeriodicit 311 | FROM 312 | #BackupFinalData BFD 313 | GROUP BY 314 | BFD.BackupSource 315 | ,BFD.type 316 | 317 | WITH ROLLUP 318 | ) PS 319 | CROSS APPLY ( 320 | SELECT 321 | Formmatted = ISNULL(NULLIF(t.Y+'y','0y'),'') 322 | +ISNULL(NULLIF(t.Mo+'mo','0mo'),'') 323 | +ISNULL(NULLIF(t.D+'d','0d'),'') 324 | +ISNULL(NULLIF(t.H+'h','0h'),'') 325 | +ISNULL(NULLIF(t.M+'m','0m'),'') 326 | +ISNULL(NULLIF(t.S+'s','0s'),'') 327 | FROM 328 | ( 329 | SELECT 330 | CONVERT(varchar(10),(FC.AvgFreq%60)) as S 331 | ,CONVERT(varchar(10),(FC.AvgFreq/60)%60) as M 332 | ,CONVERT(varchar(10),(FC.AvgFreq/3600)%24) as H 333 | ,CONVERT(varchar(10),(FC.AvgFreq/86400)%30) as D 334 | ,CONVERT(varchar(10),(FC.AvgFreq/2592000)%12) as Mo 335 | ,CONVERT(varchar(10),(FC.AvgFreq/31104000)) as Y 336 | FROM 337 | ( 338 | SELECT AvgFreq = CONVERT(int,@PeriodMinutes/AvgCountPerPeriodicit)*60 339 | ) FC 340 | ) t 341 | ) F 342 | ORDER BY 343 | BackupSource 344 | ,type 345 | 346 | 347 | -------------------------------------------------------------------------------- /Backups/Backup/Backup_DestinationInfo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Esse é uma das primeiras versões d eum script para trazer a sequencia de backup e logs pra um restore fácil. 8 | Tenho um melhor (pasta restores). Mas fica aqui pra ideias! 9 | */ 10 | 11 | 12 | select database_name,type,begins_log_chain,backup_finish_date,is_copy_only,has_incomplete_metadata,d.recovery_model_desc,bmf.physical_device_name,o.* 13 | ,backup_size 14 | 15 | from msdb..backupset bs 16 | inner join 17 | sys.databases d 18 | on d.name = bs.database_name 19 | inner join 20 | msdb..backupmediafamily bmf 21 | on bmf.media_set_id = bs.media_set_id 22 | outer apply(select top 1 bs2.backup_finish_date as LastFull from msdb..backupset bs2 where bs2.database_name = bs.database_name and bs2.type = 'D' 23 | order by bs2.backup_finish_date desc 24 | ) o 25 | where 26 | d.name = 'master' and type = 'L' --> ajustar filtros 27 | order by 28 | bs.backup_finish_date desc 29 | 30 | -------------------------------------------------------------------------------- /Backups/Backup/Backup_PolicyEstimator.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # autor 4 | Rodrigo ribeiro gomes 5 | 6 | # Detalhes 7 | Esse aqui eu nao lembro, mas pelo nome, foi alguma tentativa frustada de criar algo que estimasse a politica de backup, com base no log. 8 | Mantendo aqui para , quem sabe, um dia, voltar e terminar! 9 | 10 | 11 | 12 | */ 13 | 14 | 15 | ;WITH backups AS 16 | ( 17 | SELECT 18 | BS.backup_set_id 19 | ,BS.database_name 20 | ,BS.type 21 | ,BS.backup_finish_date 22 | FROM 23 | msdb.dbo.backupset BS 24 | WHERE 25 | BS.type IN ('L') 26 | AND 27 | BS.backup_finish_date >= '20150601' AND BS.backup_finish_date < '20150701' 28 | AND 29 | BS.is_copy_only = 0 30 | ) 31 | SELECT 32 | BS.* 33 | ,BSA.backup_finish_date 34 | ,DATEDIFF(MINUTE,BSA.backup_finish_date,BS.backup_finish_date) DiffMinutes 35 | ,DATEDIFF(HOUR,BSA.backup_finish_date,BS.backup_finish_date) DiffHours 36 | ,DATEDIFF(DAY,BSA.backup_finish_date,BS.backup_finish_date) DiffDias 37 | FROM 38 | backups BS 39 | CROSS APPLY 40 | ( 41 | SELECT TOP 1 42 | * 43 | FROM 44 | backups BSA 45 | WHERE 46 | BSA.database_name = BS.database_name 47 | AND 48 | BSA.type = BS.type 49 | AND 50 | BSA.backup_finish_date <= BS.backup_finish_date 51 | AND 52 | BSA.backup_set_id != BS.backup_set_id 53 | ORDER BY 54 | BSA.backup_finish_date DESC 55 | ) BSA 56 | ORDER BY 57 | BS.database_name 58 | ,BS.type 59 | ,BS.backup_finish_date -------------------------------------------------------------------------------- /Backups/Backup/Backuphistory.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz o histórico de backups de um banco específico, junto com o local. 8 | Útil para ter uma visão um pouco mais detalhada e rápida de como os backups estão sendo feitos, quando e onde foram feitos. 9 | 10 | */ 11 | 12 | SELECT 13 | BS.backup_set_id 14 | ,BS.database_name 15 | ,BS.backup_finish_date 16 | ,BS.type 17 | ,BS.is_copy_only 18 | ,MF.destination 19 | ,DaysAgo = DATEDIFF(DAY,backup_finish_date,CURRENT_TIMESTAMP) 20 | FROM 21 | msdb..backupset BS 22 | OUTER APPLY 23 | ( 24 | SELECT 25 | BMF.physical_device_name+NCHAR(13)+NCHAR(10) 26 | FROM 27 | msdb..backupmediafamily BMF 28 | WHERE 29 | BMF.media_set_id = BS.media_set_id 30 | FOR XML PATH(''),TYPE 31 | ) MF(destination) 32 | 33 | WHERE --> Ajusto manualmente os filtros conforme o caso! 34 | 35 | BS.database_name = 'master' --> coloque aqui os bancos que quer consultar 36 | AND 37 | BS.is_copy_only = 0 38 | AND 39 | BS.type = 'D' --> somente full, mas pode ajustar, ex,: bs.type in ('L','D') 40 | ORDER BY 41 | BS.backup_set_id DESC -------------------------------------------------------------------------------- /Backups/Backup/CheckBackupDoing.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Report simples e rápido para saber o último backup de cada base por tipo. 8 | Uso esse quando eu quero uma visã rápida e simples se o backup está sendo feito! 9 | 10 | Somente banco que estão ONLINE. 11 | 12 | */ 13 | IF OBJECT_ID('tempdb..#BackupInfo') IS NOT NULL 14 | DROP TABLE #BackupInfo; 15 | 16 | IF OBJECT_ID('tempdb..#DatabaseLastBackups') IS NOT NULL 17 | DROP TABLE #DatabaseLastBackups; 18 | 19 | SELECT 20 | BS.database_name 21 | ,BS.type 22 | ,BS.backup_finish_date 23 | INTO 24 | #BackupInfo 25 | FROM 26 | msdb..backupset BS 27 | WHERE 28 | BS.backup_set_id = ( 29 | SELECT TOP 1 30 | BS2.backup_set_id 31 | FROM 32 | msdb..backupset BS2 33 | WHERE 34 | BS2.database_name = BS.database_name 35 | AND 36 | BS2.type = BS.type 37 | AND 38 | EXISTS ( 39 | SELECT 40 | * 41 | FROM 42 | msdb..backupmediafamily BMF 43 | WHERE 44 | BMF.media_set_id = BS2.media_set_id 45 | ) 46 | AND BS2.is_copy_only = 0 47 | ORDER BY 48 | BS2.backup_set_id DESC 49 | ) 50 | 51 | 52 | SELECT 53 | D.NAME 54 | ,BI.* 55 | INTO 56 | #DatabaseLastBackups 57 | FROM 58 | sysdatabases D 59 | LEFT JOIN 60 | ( 61 | SELECT 62 | BI.database_name 63 | ,MAX(CASE WHEN BI.type = 'D' THEN BI.backup_finish_date END) as LastFullBackup 64 | ,MAX(CASE WHEN BI.type = 'I' THEN BI.backup_finish_date END) as LastDiffBackup 65 | ,MAX(CASE WHEN BI.type = 'L' THEN BI.backup_finish_date END) as LastLogBackup 66 | FROM 67 | #BackupInfo BI 68 | GROUP BY 69 | BI.database_name 70 | ) BI 71 | ON BI.database_name = D.name 72 | WHERE 73 | d.name not in ('tempdb','model') 74 | AND 75 | (ISNULL(DATABASEPROPERTYEX(d.name,'IsOffline'),0) = 0 AND ISNULL(DATABASEPROPERTYEX(d.name,'Status'),'ONLINE') = 'ONLINE' ) 76 | 77 | SELECT 78 | * 79 | ,DATEDIFF(DAY,LastFullBackup,CURRENT_TIMESTAMP) TimePassedFull 80 | FROM 81 | #DatabaseLastBackups 82 | ORDER BY 83 | LastFullBackup 84 | 85 | 86 | -------------------------------------------------------------------------------- /Backups/Backup/CheckBackupLatency.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Esse foi um script que fiz para facilitar o acesso a info de backups de qualquer banco. 8 | Eu nem lembro pq deixei em ingles, eu acho que era pela prática (vide pelá péssima gramática e sintaxe). 9 | 10 | Acho que após esse script,e u fiz outros melhores, mas, mantive aqui pois há algumas técnicas legais que usei para resolver algumas coisinhas. 11 | Fica de referência e quem sabe não uso futuramente em alguns novos scripts 12 | 13 | A palavra "Latency" do nome do arquivo acho que foi usada incorremtanete... 14 | O objetivo principal era responder: "Tem quanto tempo que não faço um backup X do banco B?" 15 | 16 | */ 17 | 18 | USE master 19 | GO 20 | 21 | IF OBJECT_ID('dbo.sp_CheckLastBackup') IS NULL 22 | EXEC('CREATE PROCEDURE sp_CheckLastBackup AS SELECT 0 AS StubVersion'); 23 | EXEC sp_MS_marksystemobject sp_CheckLastBackup; 24 | GO 25 | 26 | ALTER PROCEDURE sp_CheckLastBackup 27 | ( 28 | @LatencyFULL varchar(100) = '7 DAY' 29 | ,@LatencyDIFF varchar(100) = '1 DAY' 30 | ,@LatencyLOG varchar(100) = '1 HOUR' 31 | ,@MediaFilter varchar(8000) = '%' 32 | ,@IncludeCopyOnly bit = 0 33 | ,@LatencyTable varchar(250) = NULL 34 | ,@LatencyTableXML varchar(8000) = NULL 35 | ) 36 | AS 37 | 38 | /** 39 | This scripts validates backups by checking the time passed since last backup time. This time difference is the latency. 40 | You can specify the time that you can validate by backup type or by database. This time is called the CheckLatency. 41 | 42 | If latency of a specific database is out of CheckLatency, then this database/backyp type pair is marked as invalid. 43 | The action take for invalid is send a report. Futher, new actions can be add. 44 | 45 | Latency Expressions 46 | Latency expressions are simply a way of you specify the check latency. 47 | You make this by using same values are DATEDIFF function. 48 | For example, you can specify latencies of 7 DAY, 1 MONTH, 5 WEEK. 49 | 50 | **/ 51 | -- TestParams... 52 | --DECLARE 53 | -- @Database sysname 54 | -- ,@LatencyFULL varchar(100) 55 | -- ,@LatencyDIFF varchar(100) 56 | -- ,@LatencyLOG varchar(100) 57 | -- ,@MediaFilter varchar(8000) 58 | -- ,@IncludeCopyOnly bit 59 | -- ,@LatencyTable varchar(250) 60 | -- ,@LatencyTableXML varchar(8000) 61 | 62 | --SET @LatencyFULL = '7 DAY'; 63 | --SET @LatencyDIFF = '1 DAY'; 64 | --SET @LatencyLOG = '1 HOUR'; 65 | --SET @MediaFilter = '%' 66 | --SET @IncludeCopyOnly = 0; 67 | --SET @LatencyTableXML = '' 68 | 69 | -- Will will store latency infomration on this table. 70 | DECLARE 71 | @LatencyNumber int 72 | ,@LatencyPeriod varchar(100) 73 | ,@FullTimeLimit datetime 74 | ,@DiffTimeLimit datetime 75 | ,@LogTimeLimit datetime 76 | ,@tsql nvarchar(4000) 77 | ,@tsql_CopyOnly nvarchar(200) 78 | 79 | IF OBJECT_ID('tempdb..#DatabaseLatency') IS NOT NULL 80 | DROP TABLE #DatabaseLatency; 81 | CREATE TABLE #DatabaseLatency(id int NOT NULL PRIMARY KEY IDENTITY,databaseName varchar(200) UNIQUE, FullExpr varchar(100), DiffExpr varchar(100), LogExpr varchar(100), FullBaseTime datetime, DiffBaseTime datetime, LogBaseTime datetime) 82 | 83 | 84 | -- This is implemented as a varchar for comptaibility reasons with sql 2000. XML data type dont exists on 2000... 85 | IF @LatencyTableXML IS NOT NULl 86 | BEGIN 87 | -- XML FORMAT: 88 | SET @tsql = ' 89 | DECLARE @XMLTable XML; 90 | SET @XMLTable = CONVERT(XML,@LatencyXML) 91 | 92 | SELECT 93 | DBNodes.x.value(''@name'',''varchar(200)'') 94 | ,DBNodes.x.value(''@Full'',''varchar(100)'') 95 | ,DBNodes.x.value(''@Diff'',''varchar(100)'') 96 | ,DBNodes.x.value(''@Log'',''varchar(100)'') 97 | FROM 98 | @XMLTable.nodes(''//db'') DBNodes(x) 99 | ' 100 | 101 | INSERT INTO #DatabaseLatency(databaseName,FullExpr,DiffExpr,LogExpr) EXEC sp_executesql @tsql,N'@LatencyXML varchar(8000)',@LatencyTableXML; 102 | 103 | END ELSE IF OBJECT_ID(@LatencyTable) IS NOT NULL 104 | BEGIN 105 | SET @tsql = 'SELECT * FROM '+@LatencyTable; 106 | INSERT INTO #DatabaseLatency(databaseName,FullExpr,DiffExpr,LogExpr) EXEC sp_executesql @tsql; 107 | END 108 | 109 | 110 | 111 | -- If a default isnot rpesent, insert the paraemters... 112 | IF NOT EXISTS(SELECT * FROM #DatabaseLatency WHERE databaseName IS NULL) 113 | INSERT INTO #DatabaseLatency(FullExpr,DiffExpr,LogExpr) VALUES(@LatencyFULL,@LatencyDIFF,@LatencyLOG) 114 | 115 | DECLARE 116 | @CurrentId int 117 | ,@col_FullExpr varchar(100) 118 | ,@col_DiffExpr varchar(100) 119 | ,@col_LogExpr varchar(100) 120 | SET @CurrentId = 1; 121 | 122 | WHILE EXISTS(SELECT * FROM #DatabaseLatency WHERE id >= @CurrentId) 123 | BEGIN 124 | SELECT 125 | @col_FullExpr = FullExpr 126 | ,@col_DiffExpr = DiffExpr 127 | ,@col_LogExpr = LogExpr 128 | FROM 129 | #DatabaseLatency 130 | WHERE 131 | id = @CurrentId 132 | 133 | --First, remove extra spaces... 134 | SET @col_FullExpr = REPLACE(REPLACE(REPLACE(@col_FullExpr, ' ', '*^'), '^*', ''), '*^', ' '); 135 | SET @col_DiffExpr = REPLACE(REPLACE(REPLACE(@col_DiffExpr, ' ', '*^'), '^*', ''), '*^', ' '); 136 | SET @col_LogExpr = REPLACE(REPLACE(REPLACE(@col_LogExpr, ' ', '*^'), '^*', ''), '*^', ' '); 137 | 138 | -- Next, lets separate number and data part... 139 | SET @LatencyNumber = LEFT(@col_FullExpr,CHARINDEX(' ',@col_FullExpr)) 140 | SET @LatencyPeriod = RIGHT(@col_FullExpr,LEN(@col_FullExpr)-CHARINDEX(' ',@col_FullExpr)) 141 | SET @tsql = 'SET @ReturnValue = DATEADD('+@LatencyPeriod+',-@LatencyNumber,CURRENT_TIMESTAMP)' 142 | EXEC sp_executesql @tsql,N'@ReturnValue datetime OUTPUT,@LatencyNumber int',@FullTimeLimit OUTPUT,@LatencyNumber ; 143 | 144 | -- Next, lets separate number and data part... 145 | SET @LatencyNumber = LEFT(@col_DiffExpr,CHARINDEX(' ',@col_DiffExpr)) 146 | SET @LatencyPeriod = RIGHT(@col_DiffExpr,LEN(@col_DiffExpr)-CHARINDEX(' ',@col_DiffExpr)) 147 | SET @tsql = 'SET @ReturnValue = DATEADD('+@LatencyPeriod+',-@LatencyNumber,CURRENT_TIMESTAMP)' 148 | EXEC sp_executesql @tsql,N'@ReturnValue datetime OUTPUT,@LatencyNumber int',@DiffTimeLimit OUTPUT,@LatencyNumber ; 149 | 150 | -- Next, lets separate number and data part... 151 | SET @LatencyNumber = LEFT(@col_LogExpr,CHARINDEX(' ',@col_LogExpr)) 152 | SET @LatencyPeriod = RIGHT(@col_LogExpr,LEN(@col_LogExpr)-CHARINDEX(' ',@col_LogExpr)) 153 | SET @tsql = 'SET @ReturnValue = DATEADD('+@LatencyPeriod+',-@LatencyNumber,CURRENT_TIMESTAMP)' 154 | EXEC sp_executesql @tsql,N'@ReturnValue datetime OUTPUT,@LatencyNumber int',@LogTimeLimit OUTPUT,@LatencyNumber ; 155 | 156 | UPDATE 157 | #DatabaseLatency 158 | SET 159 | FullBaseTime = @FullTimeLimit 160 | ,DiffBaseTime = @DiffTimeLimit 161 | ,LogBaseTime = @LogTimeLimit 162 | WHERE 163 | id = @CurrentId 164 | 165 | SET @CurrentId = @CurrentId + 1; 166 | END 167 | 168 | 169 | 170 | 171 | -- Get last backups... 172 | IF OBJECT_ID('tempdb..#BackupInfo') IS NOT NULL 173 | DROP TABLE #BackupInfo; 174 | CREATE TABLE #BackupInfo(database_name sysname, type varchar(5), backup_finish_date datetime) 175 | 176 | IF OBJECT_ID('tempdb..#DatabaseLastBackups') IS NOT NULL 177 | DROP TABLE #DatabaseLastBackups; 178 | 179 | 180 | IF EXISTS(SELECT * FROM msdb.INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'backupset' AND COLUMN_NAME = 'is_copy_only') 181 | BEGIN 182 | SET @tsql_CopyOnly = 'BS.copy_only = 0'; 183 | 184 | IF @IncludeCopyOnly = 0 185 | SET @tsql_CopyOnly = NULL; 186 | END 187 | 188 | 189 | 190 | SET @tsql = N' 191 | SELECT 192 | BS.database_name 193 | ,BS.type 194 | ,BS.backup_finish_date 195 | FROM 196 | msdb..backupset BS 197 | WHERE 198 | BS.backup_set_id = ( 199 | SELECT TOP 1 200 | BS2.backup_set_id 201 | FROM 202 | msdb..backupset BS2 203 | WHERE 204 | BS2.database_name = BS.database_name 205 | AND 206 | BS2.type = BS.type 207 | AND 208 | EXISTS ( 209 | SELECT 210 | * 211 | FROM 212 | msdb..backupmediafamily BMF 213 | WHERE 214 | BMF.media_set_id = BS2.media_set_id 215 | AND 216 | BMF.physical_device_name like @MediaFilter 217 | ) 218 | ORDER BY 219 | BS2.backup_set_id DESC 220 | ) 221 | 222 | '+ISNULL('AND'+@tsql_CopyOnly,'')+' 223 | ' 224 | 225 | INSERT INTO #BackupInfo 226 | EXEC sp_executesql @tsql,N'@MediaFilter varchar(8000)',@Mediafilter; 227 | 228 | SELECT 229 | D.NAME 230 | ,BI.* 231 | INTO 232 | #DatabaseLastBackups 233 | FROM 234 | sysdatabases D 235 | LEFT JOIN 236 | ( 237 | SELECT 238 | BI.database_name 239 | ,MAX(CASE WHEN BI.type = 'D' THEN BI.backup_finish_date END) as LastFullBackup 240 | ,MAX(CASE WHEN BI.type = 'I' THEN BI.backup_finish_date END) as LastDiffBackup 241 | ,MAX(CASE WHEN BI.type = 'L' THEN BI.backup_finish_date END) as LastLogBackup 242 | FROM 243 | #BackupInfo BI 244 | GROUP BY 245 | BI.database_name 246 | ) BI 247 | ON BI.database_name = D.name 248 | WHERE 249 | d.name not in ('tempdb','model') 250 | AND 251 | (ISNULL(DATABASEPROPERTYEX(d.name,'IsOffline'),0) = 0 AND ISNULL(DATABASEPROPERTYEX(d.name,'Status'),'ONLINE') = 'ONLINE' ) 252 | 253 | IF OBJECT_ID('tempdb..#BackupStatus') IS NOT NULL 254 | DROP TABLE #BackupStatus; 255 | 256 | SELECT 257 | DLB.* 258 | ,ISNULL(DLT.FullBaseTime,DLTDEF.FullBaseTime) FullBaseTime 259 | ,ISNULL(DLT.DiffBaseTime,DLTDEF.DiffBaseTime) DiffBaseTime 260 | ,ISNULL(DLT.LogBaseTime,DLTDEF.LogBaseTime) LogBaseTime 261 | ,CASE 262 | WHEN ISNULL(LastFullBackup,0) < ISNULL(DLT.FullBaseTime,DLTDEF.FullBaseTime) THEN 1 263 | ELSE 0 264 | END as FullOutOfTime 265 | ,CASE 266 | WHEN ISNULL(LastDiffBackup,0) < ISNULL(DLT.DiffBaseTime,DLTDEF.DiffBaseTime) THEN 1 267 | ELSE 0 268 | END as DiffOutOfTime 269 | ,CASE 270 | WHEN ISNULL(LastLogBackup,0) < ISNULL(DLT.LogBaseTime,DLTDEF.LogBaseTime) THEN 1 271 | ELSE 0 272 | END as LogOutOfTime 273 | INTO 274 | #BackupStatus 275 | FROM 276 | #DatabaseLastBackups DLB 277 | LEFT JOIN 278 | #DatabaseLatency DLTDEF 279 | ON DLTDEF.databaseName IS NULL 280 | LEFT JOIN 281 | #DatabaseLatency DLT 282 | ON DLT.databaseName = DLB.database_name 283 | 284 | 285 | SELECT 286 | * 287 | FROM 288 | #BackupStatus BS 289 | WHERE 290 | BS.FullOutOfTime = 1 291 | OR 292 | BS.LogOutOfTime = 1 293 | OR 294 | BS.DiffOutOfTime = 1 295 | 296 | -------------------------------------------------------------------------------- /Backups/Backup/FrequencyAnalyzer_v2.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Author 4 | Rodrigo Ribeiro Gomes 5 | 6 | # DEtalhes 7 | Tenta descobrir a frequencia com que o backup é feito! 8 | 9 | */ 10 | 11 | -- Created by Rodrigo Ribeiro Gomes (www.thesqltimes.com) 12 | DECLARE 13 | @RefDate datetime 14 | ,@considerCopyOnly bit 15 | ,@CustomWhere nvarchar(max) 16 | ,@FullTolerance int 17 | ,@DiffTolerance int 18 | ,@LogTolerance int 19 | 20 | 21 | SET @RefDate = '20250101'; 22 | SET @considerCopyOnly = 0; --> Control if script must include backups taken with "COPY_ONLY" (2005+) option. 23 | SET @CustomWhere = NULL; --> You can write a custom msdb filter. This filter will be applied before filters above. 24 | SET @FullTolerance = 60 --> controla o arrendondamento... 25 | SET @DiffTolerance = 60 26 | SET @LogTolerance = 60 27 | 28 | /* 29 | Results descriptions: 30 | 31 | 32 | */ 33 | 34 | --------- 35 | 36 | IF OBJECT_ID('tempdb..#BackupFinalData') IS NOT NULL 37 | DROP TABLE #BackupFinalData; 38 | 39 | IF OBJECT_ID('tempdb..#BackupInfo') IS NOT NULL 40 | DROP TABLE #BackupInfo; 41 | 42 | CREATE TABLE #BackupInfo( 43 | backup_set_id bigint 44 | ,database_name sysname 45 | ,backup_finish_date datetime 46 | ,type varchar(5) 47 | ,backup_size numeric(20,0) 48 | ,compressed_backup_size numeric(20,0) 49 | 50 | ) 51 | 52 | IF LEN(@CustomWhere ) = 0 53 | SET @CustomWhere = NULL; 54 | 55 | 56 | DECLARE 57 | @cmd nvarchar(4000) 58 | ,@compressedExpr nvarchar(100) 59 | ,@copyOnly nvarchar(500) 60 | ,@sqlVersion int 61 | ; 62 | 63 | -- Getting SQL Version 64 | SELECT @sqlVersion = LEFT(V.Ver,CHARINDEX('.',V.Ver)-1) FROM (SELECT CONVERT(varchar(30),SERVERPROPERTY('ProductVersion')) as Ver) V 65 | 66 | 67 | -- If supports compression, then add compression column. 68 | 69 | IF EXISTS(SELECT * FROM msdb.INFORMATION_SCHEMA.COLUMNS C WHERE C.TABLE_NAME = 'backupset' AND COLUMN_NAME = 'compressed_backup_size') 70 | SET @compressedExpr = 'BS.compressed_backup_size'; 71 | ELSE 72 | SET @compressedExpr = 'BS.backup_size'; 73 | 74 | IF EXISTS(SELECT * FROM msdb.INFORMATION_SCHEMA.COLUMNS C WHERE C.TABLE_NAME = 'backupset' AND COLUMN_NAME = 'is_copy_only') 75 | SET @copyOnly = 'BS.is_copy_only = 0'; 76 | ELSE 77 | SET @copyOnly = NULL; 78 | 79 | IF ISNULL(@considerCopyOnly,0) = 1 80 | SET @copyOnly = NULL; 81 | 82 | --> Query for collect base backup data 83 | SET @cmd = N' 84 | INSERT INTO 85 | #BackupInfo 86 | SELECT -- The DISTINCT remove duplicates generated by join 87 | BS.backup_set_id 88 | ,BS.database_name 89 | ,BS.backup_finish_date 90 | ,BS.type 91 | ,BS.backup_size 92 | ,'+@compressedExpr+' as compressedSize 93 | FROM 94 | ( 95 | SELECT 96 | * 97 | FROM 98 | msdb.dbo.backupset BS 99 | WHERE 100 | 1 = 1 101 | -- #CustomWhereFilter 102 | '+ISNULL(' AND ('+@CustomWhere+')','')+' 103 | ) BS 104 | WHERE 105 | BS.backup_finish_date >= @RefDate 106 | 107 | '+ISNULL('AND '+@copyOnly,'')+' 108 | ' 109 | -- Run Query! 110 | EXEC sp_executesql @cmd,N'@RefDate datetime',@RefDate; 111 | 112 | CREATE CLUSTERED INDEX ixCluster ON #BackupInfo(database_name,type,backup_set_id); 113 | 114 | IF OBJECT_ID('tempdb..#RawFreqAgg') IS NOT NULL 115 | DROP TABLE #RawFreqAgg; 116 | 117 | IF OBJECT_ID('tempdb..#FreqInfo') IS NOT NULL 118 | DROP TABLE #FreqInfo; 119 | 120 | --> Aqui vamos calcular o intervalo do backup... 121 | --> E vamos normalizar para um múltiplo da tolerancia... 122 | --- Ex,: Toleracnia é 3600 (1h), e backup feito( 3720)... é considerado como 3600 123 | SELECT 124 | BI.* 125 | ,OriginalFreq 126 | ,Freq = CASE BI.type 127 | WHEN 'D' THEN CONVERT(int,OriginalFreq/@FullTolerance) * @FullTolerance 128 | WHEN 'I' THEN CONVERT(int,OriginalFreq/@DiffTolerance)* @DiffTolerance 129 | WHEN 'L' THEN CONVERT(int,OriginalFreq/@LogTolerance)* @LogTolerance 130 | END 131 | INTO 132 | #FreqInfo 133 | FROM 134 | #BackupInfo BI 135 | --> pegar o backup anterior a este! 136 | CROSS APPLY 137 | ( 138 | SELECT TOP 1 139 | * 140 | ,OriginalFreq = DATEDIFF(SS,BIA.backup_finish_date,BI.backup_finish_date) 141 | FROM 142 | #BackupInfo BIA 143 | WHERE 144 | BIA.backup_set_id < BI.backup_set_id 145 | AND 146 | BIA.database_name = BI.database_name 147 | AND 148 | BIA.type = BI.type 149 | ORDER BY 150 | BIA.backup_set_id DESC 151 | ) BA 152 | 153 | IF OBJECT_ID('tempdb..#FreqTable') IS NOT NULL 154 | DROP TABLE #FreqTable; 155 | 156 | --. Agora vamos encontrar a frequencia mais provavel! 157 | --> Um banco pode ter passado por varias frequencias diferentes (ser feito todo dia 1h, mas em um dia anromal, ter sido feito a cada 2h). 158 | --> Vamos identificar essas repeticoes e escolher a que mais se repete! 159 | 160 | --> Primeiro, vamos calculaas frequencias distintas e sus qtds. 161 | SELECT 162 | * 163 | --> Seq é 1 para a freq mais repetida... Se 2 frequencias são iguais, entao usa que tem o maior backup set id. 164 | ,Seq = ROW_NUMBER() OVER(PARTITION BY database_name,type ORDER BY FreqCount DESC,LastBackuupId DESC) 165 | INTO 166 | #FreqTable 167 | FROM 168 | ( 169 | SELECT 170 | database_name 171 | ,type 172 | ,Freq 173 | ,Freqcount = COUNT(*) 174 | ,LastBackuupId = MAX(backup_set_id) 175 | FROM 176 | #FreqInfo 177 | GROUP BY 178 | database_name 179 | ,type 180 | ,Freq 181 | ) F 182 | 183 | CREATE UNIQUE CLUSTERED INDEX ixCluster ON #FreqTable(database_name,type,Seq); 184 | 185 | 186 | --> Agora vamos escolher as que são responsaveis por 50% do total! 187 | -- Por isso, pode aparecer mais de um! 188 | SELECT 189 | * 190 | FROM 191 | ( 192 | SELECT 193 | FT.* 194 | ,TF.TotalFreq 195 | ,PrevFrqPerc = rf.PrevFreqCount*1.0/tf.TotalFreq 196 | ,CurrentFreqPerc = ft.Freqcount*1.0/tf.TotalFreq 197 | FROM 198 | #FreqTable FT 199 | CROSS APPLY 200 | ( 201 | --> Total de frequencias para o mesmo banco/tipo 202 | SELECT 203 | TotalFreq = SUM(Freqcount) 204 | FROM 205 | #FreqTable FTB 206 | WHERE 207 | FTB.database_name = FT.database_name 208 | AND 209 | FTB.type = FT.type 210 | ) TF 211 | OUTER APPLY 212 | ( 213 | SELECT 214 | PrevFreqCount = ISNULL((SUM(Freqcount)*1.),0) 215 | FROM 216 | #FreqTable FTB 217 | WHERE 218 | FTB.database_name = FT.database_name 219 | AND 220 | FTB.type = FT.type 221 | AND 222 | FTB.Seq < FT.Seq 223 | ) RF 224 | where 225 | --> Vamos escolher somente cujo as freq anteriores seja menor que 50%, significa que té a atual, é responsável por 50% do total! 226 | rf.PrevFreqCount*1.0/tf.TotalFreq < 0.5 227 | ) FF 228 | CROSS APPLY 229 | ( 230 | 231 | SELECT 232 | FreqFormatted = 'A cada: '+ISNULL(NULLIF(t.Y+'y','0y'),'') 233 | +ISNULL(NULLIF(t.Mo+'mo','0mo'),'') 234 | +ISNULL(NULLIF(t.D+'d','0d'),'') 235 | +ISNULL(NULLIF(t.H+'h','0h'),'') 236 | +ISNULL(NULLIF(t.M+'m','0m'),'') 237 | +ISNULL(NULLIF(t.S+'s','0s'),'') 238 | FROM 239 | ( 240 | SELECT 241 | CONVERT(varchar(10),(RF.RawFreq%60)) as S 242 | ,CONVERT(varchar(10),(RF.RawFreq/60)%60) as M 243 | ,CONVERT(varchar(10),(RF.RawFreq/3600)%24) as H 244 | ,CONVERT(varchar(10),(RF.RawFreq/86400)%30) as D 245 | ,CONVERT(varchar(10),(RF.RawFreq/2592000)%12) as Mo 246 | ,CONVERT(varchar(10),(RF.RawFreq/31104000)) as Y 247 | FROM 248 | ( 249 | SELECT RawFreq = FF.Freq 250 | ) RF 251 | ) t 252 | ) F 253 | order by 254 | database_name,type,seq 255 | 256 | 257 | 258 | 259 | 260 | 261 | -------------------------------------------------------------------------------- /Backups/Backup/InfoBackups_Agg.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # autor 4 | Rodrigo ribeiro gomes 5 | 6 | # Detalhes 7 | Outro script para trazer info rápida de backups dos bancos. 8 | Você vai notar (ou já deve ter notado) que existem variações desse aqui nessa pasta... 9 | 10 | 11 | 12 | */ 13 | 14 | SELECT 15 | D.name 16 | ,COUNT(CASE WHEN BS.type = 'D' AND BS.is_copy_only = 0 THEN BS.backup_set_id END) as TotalBackupsFull 17 | ,COUNT(CASE WHEN BS.type = 'I' AND BS.is_copy_only = 0 THEN BS.backup_set_id END) as TotalBackupsDiff 18 | ,COUNT(CASE WHEN BS.type = 'L' AND BS.is_copy_only = 0 THEN BS.backup_set_id END) as TotalBackupsLog 19 | ,MAX(CASE WHEN BS.type = 'D' AND BS.is_copy_only = 0 THEN BS.backup_finish_date END) as LastBackupFull 20 | ,MAX(CASE WHEN BS.type = 'I' AND BS.is_copy_only = 0 THEN BS.backup_finish_date END) as LastBackupDiff 21 | ,MAX(CASE WHEN BS.type = 'L' AND BS.is_copy_only = 0 THEN BS.backup_finish_date END) as LastBackupLog 22 | ,COUNT(CASE WHEN BS.type = 'D' AND BS.is_copy_only = 0 AND MS.is_compressed = 1 THEN BS.backup_set_id END) as TotalBackupsFullCompressed 23 | ,COUNT(CASE WHEN BS.type = 'I' AND BS.is_copy_only = 0 AND MS.is_compressed = 1 THEN BS.backup_set_id END) as TotalBackupsDiffCompressed 24 | ,COUNT(CASE WHEN BS.type = 'L' AND BS.is_copy_only = 0 AND MS.is_compressed = 1 THEN BS.backup_set_id END) as TotalBackupsLogCompressed 25 | ,SUM(CASE WHEN BS.type = 'D' AND BS.is_copy_only = 0 THEN BS.compressed_backup_size END)/1024.00/1024.00 as TamanhoBackupsFull 26 | ,SUM(CASE WHEN BS.type = 'I' AND BS.is_copy_only = 0 THEN BS.compressed_backup_size END)/1024.00/1024.00 as TamanhoBackupsFull 27 | ,SUM(CASE WHEN BS.type = 'L' AND BS.is_copy_only = 0 THEN BS.compressed_backup_size END)/1024.00/1024.00 as TamanhoBackupsFull 28 | FROM 29 | sys.databases D 30 | LEFT JOIN 31 | ( 32 | msdb.dbo.backupset BS 33 | INNER JOIN 34 | msdb.dbo.backupmediaset MS 35 | ON MS.media_set_id = BS.media_set_id 36 | AND BS.is_copy_only = 0 37 | ) 38 | ON BS.database_name = D.name 39 | GROUP BY 40 | D.name 41 | ORDER BY 42 | D.name -------------------------------------------------------------------------------- /Backups/Backup/LastBackupSize.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Author 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Lista o tamanho do último backup de cada banco (de cada tipo). 8 | Ultimo para rápida conferencia de tamanho 9 | 10 | */ 11 | 12 | SELECT 13 | BS.database_name 14 | ,BS.type 15 | ,CompressedSize = sum(bs.compressed_backup_size/1024./1024/1024) -- em GB 16 | ,NormalSize = sum(bs.backup_size/1024./1024/1024) -- em GB 17 | FROM 18 | msdb..backupset BS 19 | WHERE 20 | bs.type IN ('D') 21 | and 22 | is_copy_only = 0 23 | AND 24 | BS.backup_set_id = ( SELECT MAX(BSi.backup_set_id) from msdb..backupset BSI where BSI.database_name = bs.database_name and bsi.type = bs.type and bsi.is_copy_only = 0 ) 25 | group by 26 | -- 2 agrupamentos = total e por banco/tipo 27 | grouping sets ( 28 | (),( 29 | BS.database_name 30 | ,BS.type) 31 | ) -------------------------------------------------------------------------------- /Backups/Backup/Timeline.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Aqui é mais um script para trazer info de backup de todos os bancos, tamanho, média, etc. 8 | Provavelmente é um dos promórdios da miha carreira. O script de volumetria é bem melhor que esse. 9 | MAs, esse é mais simples, então acho justo manter! 10 | */ 11 | 12 | 13 | DECLARE 14 | @DataInicial datetime 15 | ,@DataFinal datetime 16 | 17 | SELECT 18 | --> Inicio do ano atual 19 | @DataInicial = DATEADD(YYYY,DATEDIFF(YYYY,'19000101',CURRENT_TIMESTAMP),'19000101') 20 | --> Inicio do mes atual ( a query filtra <, o que pegara somente o mes anterior pra tras). 21 | ,@DataFinal = DATEADD(MM,DATEDIFF(MM,'19000101',CURRENT_TIMESTAMP),'19000101') 22 | 23 | SELECT 24 | BTL.MesBackup 25 | ,BTL.banco 26 | ,BTL.type 27 | ,SUM(BTL.BackupSize) TamanhoBackups 28 | ,COUNT(*) QtdBackups 29 | ,AVG(BTL.BackupSize) MediaTamanho 30 | FROM 31 | ( 32 | SELECT 33 | DATEADD(MM,DATEDIFF(MM,'19000101',BS.backup_finish_date),'19000101') as MesBackup 34 | ,BS.database_name as banco 35 | ,BS.type 36 | ,BS.compressed_backup_size/1024.00/1024.00 as BackupSize 37 | FROM 38 | msdb.dbo.backupset BS 39 | WHERE 40 | BS.backup_finish_date >= @DataInicial 41 | AND 42 | BS.backup_finish_date <= @DataFinal 43 | ) BTL 44 | GROUP BY 45 | BTL.MesBackup 46 | ,BTL.banco 47 | ,BTL.type 48 | ORDER BY 49 | BTL.banco 50 | ,BTL.MesBackup 51 | OPTION(RECOMPILE) -------------------------------------------------------------------------------- /Backups/Backup/olds/Media Tamanho Backup Semanal.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Script para ter uma visão do total e tamanho de backups da semana 8 | 9 | 10 | */ 11 | 12 | select 13 | avg(soma) as MediaSemanal 14 | from 15 | ( 16 | SELECT 17 | faixas.maiorigualque miq 18 | ,faixas.menorque mq 19 | ,sum(bs.backup_size) Soma 20 | from 21 | ( 22 | SELECT DISTINCT 23 | DATEADD(DAY,-6,cast( CONVERT(VARCHAR(10),backup_finish_date,103) AS DATETIME)) as maiorigualque 24 | ,DATEADD( DAY,1,cast( CONVERT(VARCHAR(10),backup_finish_date,103) AS DATETIME)) as menorque 25 | FROM 26 | msdb..backupset 27 | WHERE 28 | DATEPART(weekday,backup_finish_date) = 1 29 | ) as faixas 30 | inner join msdb..backupset bs on bs.backup_finish_date >= faixas.maiorigualque and bs.backup_finish_date < faixas.menorque 31 | group by 32 | faixas.maiorigualque 33 | ,faixas.menorque 34 | 35 | ) as somatoria 36 | 37 | 38 | --order by 39 | --bs.backup_finish_date 40 | --ORDER BY 41 | -- DATEADD( DAY,1,cast( CONVERT(VARCHAR(10),backup_finish_date,103) AS DATETIME)) 42 | 43 | 44 | 45 | --ORDER BY 46 | -- YEAR(backup_finish_date) 47 | --backup_start_date 48 | --backup_finish_date 49 | --print @@datefirst 50 | --SELECT (dw, GETDATE()) 51 | --SELECT DISTINCT backup_finish_date,DATEPART(weekday,backup_finish_date) FROM msdb..backupset order by backup_finish_date -------------------------------------------------------------------------------- /Backups/Backup/olds/Procedure Backup.temp.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Backup/olds/Procedure Backup.temp.sql -------------------------------------------------------------------------------- /Backups/Backup/olds/RelatorioBackup_PorPeriodo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Uma das primeiras versões que criei para obter info de backup 8 | 9 | 10 | */ 11 | 12 | SELECT 13 | B.Data 14 | ,COUNT(B.backup_set_id) 15 | ,COUNT(CASE WHEN B.type = 'D' THEN B.backup_set_id END) as QtdFull 16 | ,COUNT(CASE WHEN B.type = 'I' THEN B.backup_set_id END) as QtdDiff 17 | ,COUNT(CASE WHEN B.type = 'L' THEN B.backup_set_id END) as QtdLog 18 | ,SUM(CASE WHEN B.type = 'D' THEN B.compressed_backup_size END)/1024/1024 as TamFull 19 | ,SUM(CASE WHEN B.type = 'I' THEN B.compressed_backup_size END)/1024/1024 as TamDiff 20 | ,SUM(CASE WHEN B.type = 'L' THEN B.compressed_backup_size END)/1024/1024 as TamLog 21 | FROM 22 | ( 23 | SELECT 24 | --DATEADD(DD,DATEDIFF(DD,'19000101',backup_finish_date),'19000101') as Data 25 | CONVERT(date,backup_finish_date) as Data 26 | ,BS.* 27 | FROM 28 | msdb.dbo.backupset BS 29 | WHERE 30 | is_copy_only = 0 31 | AND 32 | BS.backup_finish_date >= '20150201' --> Ajustar a data aqui 33 | ) B 34 | GROUP BY 35 | Data 36 | ORDER BY 37 | Data 38 | 39 | -------------------------------------------------------------------------------- /Backups/Backup/olds/TamLastBackups.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Uma das primeiras versões que criei para obter o tamanho do último backup! 8 | 9 | 10 | */ 11 | SELECT 12 | * 13 | FROM 14 | ( 15 | SELECT 16 | BS.database_name 17 | ,BS.type 18 | ,BS.backup_finish_date 19 | ,BS.compressed_backup_size/1024.00/1024.00 AS TamMB 20 | ,ROW_NUMBER() OVER(PARTITION BY BS.database_name,BS.type ORDER BY BS.backup_finish_date DESC) as LastRn 21 | FROM 22 | msdb.dbo.backupset BS 23 | INNER JOIN 24 | msdb.dbo.backupmediaset MS 25 | ON MS.media_set_id = BS.media_set_id 26 | AND BS.is_copy_only = 0 27 | ) BL 28 | WHERE 29 | BL.LastRn = 1 -------------------------------------------------------------------------------- /Backups/Backup/olds/[RECENCIA_NOV2014]sp_BackupBanco.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Backup/olds/[RECENCIA_NOV2014]sp_BackupBanco.sql -------------------------------------------------------------------------------- /Backups/Backup/olds/backup_log_sql2000.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Um dos primeiros scrips para info de backup 8 | 9 | 10 | */ 11 | 12 | SELECT 13 | DiaBackup 14 | ,database_name 15 | ,sum(backup_size) 16 | FROM 17 | (select *,DATEADD(DD,DATEDIFF(DD,'19000101',backup_finish_date),'19000101') Diabackup from msdb.dbo.backupset) D 18 | WHERE 19 | type = 'D' 20 | and 21 | backup_finish_date >= '20150501' 22 | group by 23 | DiaBackup 24 | ,database_name 25 | order by 26 | database_name,DiaBackup desc 27 | 28 | option(maxdop 1) -------------------------------------------------------------------------------- /Backups/Backup/olds/lista de backups recentes.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Backup/olds/lista de backups recentes.sql -------------------------------------------------------------------------------- /Backups/Backup/olds/relatorio_backup_lastbackups.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Uma das primeiras versões que criei para obter info de backup 8 | 9 | 10 | */ 11 | 12 | DECLARE 13 | @Ontem datetime 14 | ,@Hoje datetime 15 | ; 16 | SET @Ontem = CONVERT(VARCHAR(8),CURRENT_TIMESTAMP-1,112) 17 | SET @Hoje = CONVERT(VARCHAR(8),CURRENT_TIMESTAMP,112) 18 | 19 | SELECT 20 | D.name 21 | ,B.* 22 | FROM 23 | sys.databases D 24 | OUTER APPLY 25 | ( 26 | SELECT 27 | COUNT(*) as TotalBacukups 28 | ,MAX(CASE WHEN BS.type = 'D' THEN BS.backup_finish_date END) as LastBackupFull 29 | ,MAX(CASE WHEN BS.type = 'I' THEN BS.backup_finish_date END) as LastBackupDiff 30 | ,MAX(CASE WHEN BS.type = 'L' THEN BS.backup_finish_date END) as LastBackupLog 31 | ,MAX(CASE WHEN BS.Seq = 1 AND BS.type = 'L' THEN BS.backup_finish_date END) as LastBackupLogAntesHoje 32 | FROM 33 | ( 34 | SELECT 35 | * 36 | ,CASE 37 | WHEN BS.backup_finish_date < @Hoje THEN ROW_NUMBER() OVER( PARTITION BY BS.name,BS.type ORDER BY CONVERT(datetime,CONVERT(VARCHAR(8),BS.backup_finish_date,112)) DESC ) 38 | ELSE NULL 39 | END as Seq 40 | FROM 41 | msdb.dbo.backupset BS 42 | WHERE 43 | BS.database_name = D.name 44 | ) BS 45 | WHERE 46 | BS.database_name = D.name 47 | ) B 48 | OPTION(RECOMPILE) -------------------------------------------------------------------------------- /Backups/Backup/olds/relatorio_backup_simple.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Mais uma versão para obter info de backup e datas! 8 | 9 | 10 | */ 11 | 12 | SELECT 13 | * 14 | FROM 15 | ( 16 | SELECT 17 | * 18 | ,DATEDIFF(dd,B.LastBackupFull,CURRENT_TIMESTAMP) as DiaUltimoFull 19 | ,DATEDIFF(dd,B.LastBackupDiff,CURRENT_TIMESTAMP) as DiaUltimoDiff 20 | ,DATEDIFF(Mi,B.LastBackupLog,CURRENT_TIMESTAMP) as MinutosUltimoLog 21 | FROM 22 | ( 23 | SELECT 24 | D.name 25 | --,MAX(CASE WHEN BS.Seq = 1 THEN BMF.physical_device_name END) as Caminho 26 | ,MAX(CASE WHEN BS.type = 'D' THEN BS.backup_finish_date END) as LastBackupFull 27 | ,MAX(CASE WHEN BS.type = 'I' THEN BS.backup_finish_date END) as LastBackupDiff 28 | ,MAX(CASE WHEN BS.type = 'L' THEN BS.backup_finish_date END) as LastBackupLog 29 | FROM 30 | sys.databases D 31 | LEFT JOIN 32 | ( 33 | SELECT 34 | * 35 | --,ROW_NUMBER() OVER(PARTITION BY BS.database_name,BS.type ORDER BY BS.backup_finish_date DESC) Seq 36 | FROM 37 | msdb.dbo.backupset BS 38 | ) BS 39 | ON BS.database_name = D.name 40 | AND BS.is_copy_only = 0 41 | --LEFT JOIN 42 | --msdb.dbo.backupmediafamily BMF 43 | -- ON BMF.media_set_id = BS.media_set_id 44 | WHERE 45 | D.name NOT IN ('model','tempdb') 46 | GROUP BY 47 | D.name 48 | ) B 49 | ) F 50 | ORDER BY 51 | DiaUltimoFull DESC 52 | 53 | 54 | -------------------------------------------------------------------------------- /Backups/Backup/relatorio_backup_lastbackups.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Mais um script que devo ter criado no inicio pra trazer info dos ultimso backups de um banco 8 | */ 9 | 10 | DECLARE 11 | @Ontem datetime 12 | ,@Hoje datetime 13 | ; 14 | SET @Ontem = CONVERT(VARCHAR(8),CURRENT_TIMESTAMP-1,112) 15 | SET @Hoje = CONVERT(VARCHAR(8),CURRENT_TIMESTAMP,112) 16 | 17 | SELECT 18 | D.name 19 | ,B.* 20 | FROM 21 | sys.databases D 22 | OUTER APPLY 23 | ( 24 | SELECT 25 | COUNT(*) as TotalBacukups 26 | ,MAX(CASE WHEN BS.type = 'D' THEN BS.backup_finish_date END) as LastBackupFull 27 | ,MAX(CASE WHEN BS.type = 'I' THEN BS.backup_finish_date END) as LastBackupDiff 28 | ,MAX(CASE WHEN BS.type = 'L' THEN BS.backup_finish_date END) as LastBackupLog 29 | ,MAX(CASE WHEN BS.Seq = 1 AND BS.type = 'L' THEN BS.backup_finish_date END) as LastBackupLogAntesHoje 30 | FROM 31 | ( 32 | SELECT 33 | * 34 | ,CASE 35 | WHEN BS.backup_finish_date < @Hoje THEN ROW_NUMBER() OVER( PARTITION BY BS.name,BS.type ORDER BY CONVERT(datetime,CONVERT(VARCHAR(8),BS.backup_finish_date,112)) DESC ) 36 | ELSE NULL 37 | END as Seq 38 | FROM 39 | msdb.dbo.backupset BS 40 | WHERE 41 | BS.database_name = D.name 42 | ) BS 43 | WHERE 44 | BS.database_name = D.name 45 | ) B 46 | OPTION(RECOMPILE) -------------------------------------------------------------------------------- /Backups/Backup/sp_DoBackup.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Eu acho que essa foi uma das primeiras procs que criei para fazer backup. 9 | Eu queria ter algo que chegasse fácil em um ambiente e caso nao tivesse backup ja colocava pra rodar. 10 | 11 | Acho que hoje ja existe solucoes muito melhores... 12 | Mas, ainda sim, por ser um script bem simples, pode ser útil para quebrar um galho por aí. 13 | */ 14 | 15 | USE master 16 | GO 17 | 18 | IF OBJECT_ID('dbo.sp_DoBackup') IS NULL 19 | EXEC('CREATE PROCEDURE sp_DoBackup AS SELECT 0 AS StubVersion'); 20 | EXEC sp_MS_marksystemobject sp_DoBackup; 21 | GO 22 | 23 | ALTER PROCEDURE sp_DoBackup 24 | ( 25 | @folder nvarchar(1000) = NULL 26 | ,@database nvarchar(1000) = NULL 27 | ,@backupType varchar(50) = 'FULL' 28 | ,@Compression bit = 1 29 | ,@CopyOnly bit = 1 30 | ,@Mode smallint = 1 -- 1 - Print Only | 2 - Execute | 3 - Print and Execute 31 | ,@ReturnFileName bit = 0 32 | ) 33 | AS 34 | 35 | --DECLARE 36 | -- @folder nvarchar(1000) 37 | -- ,@database nvarchar(1000) 38 | -- ,@backupType varchar(50) 39 | -- ,@Compression bit 40 | -- ,@CopyOnly bit 41 | -- ,@Mode smallint 42 | 43 | --SET @folder = '\\10.1.114.30\sql\Import'; 44 | --SET @database = 'msdb'; 45 | --SET @Mode = 1; 46 | --SET @Compression = 1; 47 | 48 | ------------- 49 | 50 | -- Validating @Database 51 | IF @database IS NULL 52 | SET @database = DB_NAME() 53 | 54 | -- Validating @folder 55 | IF @folder IS NULL 56 | SET @folder = ''; 57 | 58 | IF RIGHT(@folder,1) NOT IN ('\','/') 59 | SET @folder = @folder + '\'; 60 | 61 | IF RIGHT(@folder,1) NOT IN ('\','/') 62 | SET @folder = @folder + '\'; 63 | 64 | -- Validating @BackupType 65 | IF @backupType IS NULL 66 | SET @backupType = 'FULL'; 67 | 68 | IF @backupType NOT IN ('FULL','LOG','DIFF') 69 | BEGIN 70 | RAISERROR('Invalid backup type: %s',16,1,@backupType); 71 | return; 72 | END 73 | 74 | DECLARE 75 | @cmdBackup nvarchar(4000) 76 | ,@serverName nvarchar(200) 77 | ,@timestamp varchar(20) 78 | ,@finalFilename varchar(600) 79 | ,@tsql_compression varchar(200) 80 | ,@tsql_backupType varchar(20) 81 | ,@tsql_DiffWith nvarchar(100) 82 | ,@tsql_CopyOnly nvarchar(100) 83 | ,@FileExtension varchar(15) 84 | ; 85 | 86 | -- Collecting auxiliary informations 87 | SET @serverName = REPLACE(@@SERVERNAME,'\','-'); 88 | SET @timestamp = REPLACE(REPLACE(REPLACE(REPLACE(CONVERT(varchar(23),CURRENT_TIMESTAMP,121),'-',''),':',''),' ',''),'.','') 89 | 90 | -- Determining features... 91 | IF @Compression = 1 92 | SELECT @tsql_compression = 'COMPRESSION' FROM msdb.INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'backupset' AND COLUMN_NAME = 'compressed_backup_size' 93 | -- Determining features... 94 | IF @CopyOnly = 1 95 | SELECT @tsql_CopyOnly = 'COPY_ONLY' FROM msdb.INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'backupset' AND COLUMN_NAME = 'is_copy_only' 96 | 97 | -- Determining extensions 98 | SET @FileExtension = CASE 99 | WHEN @backupType = 'LOG' THEN 'trn' 100 | ELSE 'bak' 101 | END 102 | 103 | -- Determing backup type tsql 104 | SET @tsql_backupType = CASE WHEN @backupType IN ('FULL','DIFF') THEN 'DATABASE' ELSE 'LOG' END; 105 | SET @tsql_DiffWith = CASE WHEN @backupType IN ('DIFF') THEN 'DIFFERENTIAL' ELSE NULL END; 106 | SET @finalFilename = @folder+@serverName+'_'+@database+'_'+@backupType+'_'+@timestamp+'.'+@FileExtension 107 | 108 | SET @cmdBackup = ' 109 | BACKUP '+@tsql_backupType+' 110 | ['+@database+'] 111 | TO DISK = '''+@finalFilename+''' 112 | WITH 113 | STATS = 10 114 | '+ISNULL(','+@tsql_compression,'')+' 115 | '+ISNULL(','+@tsql_DiffWith,'')+' 116 | '+ISNULL(','+@tsql_CopyOnly,'')+' 117 | '; 118 | 119 | IF @Mode in (1,3) 120 | PRINT @cmdBackup 121 | 122 | IF @Mode in (2,3) 123 | EXEC(@cmdBackup) 124 | 125 | IF @ReturnFileName = 1 126 | SELECT @finalFilename as backupfile -------------------------------------------------------------------------------- /Backups/Backup/valid_backup_schedule.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Rapaz... esse é um script que nunca terminei (ou devo ter feito algum outro). 8 | O objetivo desse era responder: Meus backups estão sendo feitos no scheduel esperado? 9 | 10 | PRa fazer isso, eu iria ter que consultar a sysschedules, e fazer uns cálculos... 11 | Nao lembro realmente se desistir no meio do caminhjopela complexidade ou comecei algium outro... 12 | 13 | Mas deixei aqui, para ou alguém pegar e terminar ou eu mesmo, no futuro, resolver andar com isso! 14 | 15 | A maior diversão desse script é brincar com a msdb..sysschedules, que é onde o sql guarda as info dos schedules dos sql agent 16 | */ 17 | 18 | 19 | DECLARE @testDate datetime 20 | SET @testDate =CURRENT_TIMESTAMP; 21 | 22 | SET DATEFIRST 7; -- Sunday is the first day of month... 23 | 24 | DECLARE @WeekDaysMappings TABLE(weekDay smallint, freqIntervalNumber int); 25 | INSERT INTO @WeekDaysMappings VALUES(1,1); 26 | INSERT INTO @WeekDaysMappings VALUES(2,2); 27 | INSERT INTO @WeekDaysMappings VALUES(3,4); 28 | INSERT INTO @WeekDaysMappings VALUES(4,8); 29 | INSERT INTO @WeekDaysMappings VALUES(5,16); 30 | INSERT INTO @WeekDaysMappings VALUES(6,32); 31 | INSERT INTO @WeekDaysMappings VALUES(7,64); 32 | 33 | SELECT 34 | S.name 35 | ,@testDate [testdate] 36 | ,CONVERT(datetime,CONVERT(varchar(10),S.active_start_date)) as StartDate 37 | ,S.freq_type 38 | ,S.freq_interval 39 | ,S.freq_recurrence_factor 40 | 41 | ,-- day is elegible? 42 | ( 43 | SELECT 44 | ISNULl((SELECT 'BYDAILY' WHERE DATEDIFF(DAY,convert(VARCHAR(10),S.active_start_date),@testDate)%NULLIF(S.freq_interval,0) = 0),'') 45 | + 46 | ISNULL((SELECT 'BYWEEKDAY' FROM @WeekDaysMappings WDM WHERE WDM.weekDay = DATEPART(WEEKDAY,@testDate) AND WDM.freqIntervalNumber & S.freq_interval = WDM.freqIntervalNumber 47 | AND DATEDIFF(WEEK,convert(VARCHAR(10),S.active_start_date),@testDate)%NULLIF(S.freq_recurrence_factor,0) = 0 48 | ),'') 49 | ) AS DayEligibleReason 50 | ,DATEDIFF(WEEK,convert(VARCHAR(10),S.active_start_date),@testDate) PasssedTime 51 | ,DATEADD(WEEK,S.freq_recurrence_factor,convert(VARCHAR(10),S.active_start_date)) 52 | FROM 53 | msdb..sysschedules S 54 | 55 | /* 56 | 64 1 57 | 0 0 0 0 0 0 0 58 | */ 59 | 60 | select dateadd(week,20,'20150824') -------------------------------------------------------------------------------- /Backups/Restore/Data_Ultimo_Restore.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Script simple para obter o ultimo restore de cada banco registrado na tabela de restores 8 | 9 | 10 | */ 11 | 12 | USE MSDB 13 | GO 14 | 15 | SELECT 16 | RH.DESTINATION_DATABASE_NAME as Banco 17 | ,rh.restore_type as Tipo 18 | ,MAX(rh.restore_date) as Data 19 | FROM 20 | msdb..restorehistory rh 21 | WHERE 22 | rh.restore_type = 'D' 23 | GROUP BY 24 | RH.DESTINATION_DATABASE_NAME 25 | ,rh.restore_type 26 | HAVING 27 | MAX(rh.restore_date) < DATEADD(d,-1,getDate()) 28 | ORDER BY 29 | Banco 30 | ,Tipo -------------------------------------------------------------------------------- /Backups/Restore/InfoUltimoRestoreEx.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Informações de data de restore 8 | 9 | 10 | */ 11 | SELECT 12 | * 13 | FROM 14 | ( 15 | SELECT DISTINCT 16 | RH.destination_database_name as name 17 | FROM 18 | msdb..restorehistory RH 19 | ) D 20 | OUTER APPLY 21 | ( 22 | SELECT TOP 1 23 | BS.backup_finish_date as DataDosDados 24 | ,RH.restore_date as DataExecucaoRestore 25 | FROM 26 | msdb..restorehistory RH 27 | INNER JOIN 28 | msdb..backupset BS 29 | ON BS.backup_set_id = RH.backup_set_id 30 | WHERE 31 | RH.destination_database_name = D.name 32 | ORDER BY 33 | RH.restore_date DESC 34 | ) RH 35 | --WHERE 36 | -- D.NAME in ('master') --> nome dos bancos 37 | 38 | -------------------------------------------------------------------------------- /Backups/Restore/LastRestores.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Restore/LastRestores.sql -------------------------------------------------------------------------------- /Backups/Restore/MasterManual/CreateLogins.sql: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Script para recuperar informações dos logins a partir de um banco master restautado. 8 | PAra usar, você deve obter o backup do banco do master e restautar com o nome OldMaster (se mudar, ajuste no script). 9 | Exemplo: 10 | restore database OldMaster from disk = 'CaminhoBackup' 11 | with 12 | move 'master' to 'CaminhoRestore\OldMaster.mdf' 13 | ,move 'mastlog' to 'CaminhoRestore\OldMaster.ldf' 14 | 15 | 16 | Conecte como DAC e rode esse script. 17 | Esse scriopt é bem útil em cenários de recuperação, onde você, por algum motivo, precisou remontar o servidor e não pode restauar a master. 18 | Com esse script, você tem mais chance de recuperar os logins SQL junsto com as senhas e configurações. 19 | Já me ajudou bem a minimizar o impacto e manter as apps fincionando com a mesma senha, mesmo quando ngm sabia. 20 | */ 21 | 22 | 23 | USE OldMaster 24 | go 25 | 26 | SELECT 27 | * 28 | ,'CREATE LOGIN '+QUOTENAME(name) 29 | +CASE 30 | WHEN type in ('G','U') THEN ' FROM WINDOWS ' 31 | WHEN type = 'S' THEN 32 | +' WITH PASSWORD = '+CONVERT(varchar(max),pwdhash,1)+' HASHED, SID = '+convert(VARCHAR(MAX),p.sid,1)+' ' 33 | +',CHECK_POLICY = '+CheckPolicy 34 | +',CHECK_EXPIRATION = '+CheckExpiration 35 | +isnull(',CREDENTIAL = '+CredentialName,'') 36 | END 37 | +',DEFAULT_DATABASE = '+quotename(dbname) 38 | +',DEFAULT_LANGUAGE = '+lang 39 | 40 | +CASE WHEN IsDisabled = 1 THEN ';ALTER LOGIN '+quotename(name)+' DISABLE' ELSE '' END 41 | FROM 42 | ( 43 | select 44 | p.name 45 | ,dbname 46 | ,pwdhash 47 | ,sid 48 | ,lang 49 | ,CheckPolicy = CASE WHEN convert(bit, p.status & 0x10000) = 1 THEN 'ON' ELSE 'OFF' END 50 | ,CheckExpiration = CASE WHEN convert(bit, p.status & 0x20000) = 1 THEN 'ON' ELSE 'OFF' END 51 | ,IsDisabled = convert(bit, p.status & 0x80) 52 | ,CredentialName = co.name 53 | ,p.type 54 | from 55 | sys.sysxlgns p 56 | LEFT JOIN 57 | sys.syssingleobjrefs r ON r.depid = p.id AND r.class = 63 AND r.depsubid = 0 58 | LEFT JOIN 59 | sys.sysclsobjs co on co.id = r.indepid and co.class = 57and co.type='' 60 | WHERE 61 | p.type IN ('U','G','S') 62 | AND NOT EXISTS ( 63 | SELECT * FROM sys.server_principals SP WHERE SP.name = p.name 64 | ) 65 | ) P 66 | 67 | 68 | -------------------------------------------------------------------------------- /Backups/Restore/RestoreAvancado.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Restore/RestoreAvancado.sql -------------------------------------------------------------------------------- /Backups/Restore/RestoreFrequency.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz informações da frequência de restore de um banco 8 | 9 | 10 | */ 11 | 12 | SELECT 13 | d.NAME 14 | ,RF.AvgRestorePeerWeek 15 | FROM 16 | sys.databases D 17 | LEFT JOIN 18 | ( 19 | SELECT 20 | RG1.DatabaseName 21 | ,RestoreCount 22 | ,AvgRestorePeerWeek = ISNULL(RestoreCount/NULLIF(ElapsedTime,0),0) 23 | ,LastRestore 24 | ,FirstRestore 25 | FROM 26 | ( 27 | SELECT 28 | RR.DatabaseName 29 | --,RR.StartWeek 30 | ,RestoreCount = COUNT(*) 31 | ,LastRestore = MAX(RR.RestoreDate) 32 | ,FirstRestore = MIN(RR.RestoreDate) 33 | ,ElapsedTime = DATEDIFF(WK,MIN(RR.RestoreDate),CURRENT_TIMESTAMP) 34 | FROM 35 | ( 36 | SELECT 37 | DatabaseName = RH.destination_database_name 38 | ,RestoreDate = RH.restore_date 39 | ,StartWeek = DATEADd(WK,DATEDIFF(WK,0,RH.restore_date),0)-1 40 | FROM 41 | msdb..restorehistory RH 42 | WHERE 43 | RH.restore_type = 'D' 44 | AND 45 | RH.restore_date >= '20160101' 46 | ) RR 47 | GROUP BY 48 | RR.DatabaseName 49 | --,RR.StartWeek 50 | ) RG1 51 | ) RF 52 | ON RF.DatabaseName = D.name 53 | WHERE 54 | D.database_id > 4 55 | AND 56 | ISNULL(RF.AvgRestorePeerWeek,0) = 0 -------------------------------------------------------------------------------- /Backups/Restore/RestoreFrequencyWithSizings.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz informações da frequência de restoe de um banco 8 | 9 | 10 | */ 11 | 12 | IF OBJECT_ID('tempdb..#TamanhoBancos') IS NOT NULL 13 | DROP TABLE #TamanhoBancos; 14 | 15 | CREATE TABLE 16 | #TamanhoBancos( Banco sysname, TamanhoTotalPag int, TamanhoUsadoPag int ); 17 | 18 | EXEC sp_MSforeachdb ' 19 | USE [?]; 20 | 21 | INSERT INTO #TamanhoBancos 22 | SELECT 23 | db_name() 24 | ,SUM(size) 25 | ,SUM(FILEPROPERTY(name,''SpaceUsed'')) 26 | FROM 27 | sys.database_files 28 | ' 29 | 30 | SELECT 31 | d.NAME 32 | ,RF.AvgRestorePeerWeek 33 | ,TB.TamanhoUsadoPag/128.00 34 | FROM 35 | sys.databases D 36 | JOIN 37 | #TamanhoBancos TB 38 | ON TB.Banco = D.name 39 | LEFT JOIN 40 | ( 41 | SELECT 42 | RG1.DatabaseName 43 | ,RestoreCount 44 | ,AvgRestorePeerWeek = ISNULL(RestoreCount/NULLIF(ElapsedTime,0),0) 45 | ,LastRestore 46 | ,FirstRestore 47 | FROM 48 | ( 49 | SELECT 50 | RR.DatabaseName 51 | --,RR.StartWeek 52 | ,RestoreCount = COUNT(*) 53 | ,LastRestore = MAX(RR.RestoreDate) 54 | ,FirstRestore = MIN(RR.RestoreDate) 55 | ,ElapsedTime = DATEDIFF(WK,MIN(RR.RestoreDate),CURRENT_TIMESTAMP) 56 | FROM 57 | ( 58 | SELECT 59 | DatabaseName = RH.destination_database_name 60 | ,RestoreDate = RH.restore_date 61 | ,StartWeek = DATEADd(WK,DATEDIFF(WK,0,RH.restore_date),0)-1 62 | FROM 63 | msdb..restorehistory RH 64 | WHERE 65 | RH.restore_type = 'D' 66 | AND 67 | RH.restore_date >= '20160101' 68 | ) RR 69 | GROUP BY 70 | RR.DatabaseName 71 | --,RR.StartWeek 72 | ) RG1 73 | ) RF 74 | ON RF.DatabaseName = D.name 75 | WHERE 76 | D.database_id > 4 77 | AND 78 | ISNULL(RF.AvgRestorePeerWeek,0) = 0 -------------------------------------------------------------------------------- /Backups/Restore/RestoreReport.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Informações de restore de um banco específico ou com algum outro critério de filtros. 8 | 9 | 10 | */ 11 | 12 | SELECT 13 | rh.destination_database_name as DatabaseName 14 | ,RH.restore_date as DataExecucaoRestore 15 | ,Bs.backup_finish_date as DataBackup 16 | FROM 17 | msdb..restorehistory RH JOIN msdb..backupset BS ON BS.backup_set_id = RH.backup_set_id 18 | WHERE 19 | --> Filtros para refinar 20 | 21 | rh.destination_database_name = 'master' --> Nome do Banco 22 | --AND 23 | --RH.user_name = 'UserRestore' 24 | ORDER BY 25 | RH.restore_date DESC -------------------------------------------------------------------------------- /Backups/Restore/RestoreSequence.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Retorna a sequência de RESTORE para um banco! 8 | Útil para rapidamente montar os comandos de restore! 9 | Pode te ajudar a ganhar tempo para executar um restore de emergência! 10 | Informe o banco e o full máximo, e a partir disso, o script descobre os difs e logs para serem usados! 11 | 12 | */ 13 | -- Preencha essa tabela com os nomes dos bancos que queria! 14 | -- Deixei uma temp table para facilitar popular com algum script se você precisar! 15 | IF OBJECT_ID('tempdb..#BasesRestore') IS NOT NULL 16 | DROP TABLE #BasesRestore; 17 | 18 | 19 | 20 | -->>>> PARÂMETROS DO SCRIPT 21 | -- Basta popular a tabela #BasesRestore(coluna name) com o nome dos bancos que quer. 1 banco por linha. 22 | 23 | SELECT 'NomeBanco' name INTO #BasesRestore; 24 | 25 | DECLARE 26 | @BackupPath nvarchar(max) = 'C:\Origem' --> diretorio onde os backupa estarão 27 | ,@ReplacePath nvarchar(max) = 'C:\Destino' --> diretorio original 28 | ,@MaxFullDate datetime = GETDATE() --> Usar um full com, no máximo, essa data. A partir disso, pegará os logs digs! 29 | -- Use isso para que o script limite qual full vai usar, permitindo você escolher um full anterior ao mais recente! 30 | 31 | 32 | 33 | ---- DAQUI PRA FRENTE NÃO PRECISA MAIS ALTERAR --- 34 | -- Se a query ficar muito lenta, esse índice me ajudou... 35 | -- Mas, crie por sua própria conta e risco, visto que não é recomendando mexer em tabelas mantidas pela microsoft!!!! 36 | -- --create index ixtemp on backupset(database_name,type,is_copy_only) with(data_compression = page) 37 | 38 | 39 | 40 | USE msdb; 41 | 42 | SELECT 43 | DatabaseName = R.name 44 | ,LastFull = b.backup_finish_date 45 | ,b.FullSizeGB 46 | ,LastDiff = d.backup_finish_date 47 | ,d.DiffSizeGB 48 | ,LastLog.* 49 | ,FullRestoreSql = 'RESTORE DATABASE '+quotename(r.name)+' from disk = '''+b.FullRestorePath+''' WITH NORECOVERY,stats = 10' 50 | ,FullRestoreDiff = 'RESTORE DATABASE '+quotename(r.name)+' from disk = '''+d.DiffRestorePath+''' WITH NORECOVERY,stats = 10' 51 | ,LogsRestore = L.restoreslog 52 | FROM 53 | #BasesRestore R 54 | CROSS APPLY ( 55 | select top 1 bs.backup_finish_date,backup_set_id 56 | ,FullRestorePath = REpLACE(f.physical_device_name,@ReplacePath,@BackupPath) 57 | ,FullSizeGB = bs.compressed_backup_size/1024/1024/1024 58 | From backupset bs 59 | join backupmediafamily f 60 | on f.media_set_id = bs.media_set_id 61 | 62 | where type = 'D' and database_name = R.name 63 | and is_copy_only = 0 64 | and backup_finish_date < ISNULL(@MaxFullDate,GETDATE()) 65 | order by backup_set_id desc 66 | ) b 67 | OUTER APPLY ( 68 | select top 1 bs.backup_finish_date,backup_set_id 69 | ,DiffRestorePath = REpLACE(f.physical_device_name,@ReplacePath,@BackupPath) 70 | ,DiffSizeGB = bs.compressed_backup_size/1024/1024/1024 71 | From backupset bs 72 | join backupmediafamily f 73 | on f.media_set_id = bs.media_set_id 74 | 75 | where type = 'I' and database_name = R.name 76 | and backup_finish_date > b.backup_finish_date 77 | and is_copy_only = 0 78 | order by backup_set_id desc 79 | ) d 80 | outer APPLY ( 81 | select 82 | [data()] = 'RESTORE LOG '+quotename(r.name)+' from disk = '''+LogRestorePath+''' WITH NORECOVERY,stats = 10'+CHAR(13)+CHAR(10) 83 | from ( 84 | select bs.backup_finish_date 85 | ,LogRestorePath = REpLACE(f.physical_device_name,@ReplacePath,@BackupPath) 86 | From backupset bs 87 | join backupmediafamily f 88 | on f.media_set_id = bs.media_set_id 89 | 90 | where type = 'L' and database_name = R.name 91 | and is_copy_only = 0 92 | AND backup_finish_date > isnull(d.backup_finish_date,b.backup_finish_date) 93 | ) rl 94 | order by backup_finish_date 95 | FOR XML PATH(''),type 96 | ) l(restoreslog) 97 | outer APPLY ( 98 | select 99 | FirstLog = min(bs.backup_finish_date) 100 | ,LastLog = max(bs.backup_finish_date) 101 | ,TotalLogs = count(*) 102 | ,TotalLogSizeGB = sum(compressed_backup_size/1024/1024/1024) 103 | From backupset bs 104 | join backupmediafamily f 105 | on f.media_set_id = bs.media_set_id 106 | where type = 'L' and database_name = R.name 107 | and is_copy_only = 0 108 | AND backup_finish_date > isnull(d.backup_finish_date,b.backup_finish_date) 109 | ) LastLog 110 | 111 | -------------------------------------------------------------------------------- /Backups/Restore/prcRestauraBanco.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Backups/Restore/prcRestauraBanco.sql -------------------------------------------------------------------------------- /CDC/ValidarCdcRoleMembers.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Este script checa se alguns usuários estão nos roles do CDC (Change Data Capture). 9 | Para ter acesso a algumas tabelas do CDC, usuários que não sysadmin, ou db_owner, preciam estar em roles específicas 10 | 11 | ESte script vai em todos os bancos d euma instância que tem CDC habiltiado, e checa se os logins especificados são membros diretos dessas roles. 12 | 13 | Caso não sejam, ele gera um script com sqlcmd para inclusão. 14 | eu não lembro porque não gerei apenas o comando direto para rodar no SSMS. Devia ter algum motivo na épica. 15 | */ 16 | 17 | IF OBJECT_ID('tempdb..#RolesCdc') IS NOT NULL 18 | DROP TABLE #RolesCdc; 19 | 20 | CREATE TABLE #RolesCdc(Banco sysname, Login sysname, Usuario sysname,RoleName sysname, IsMembro bit) 21 | 22 | IF OBJECT_ID('tempdb..#UsuariosCheck') IS NOT NULL 23 | DROP TABLE #UsuariosCheck; 24 | 25 | CREATE TABLE #UsuariosCheck(NomeLogin sysname); 26 | INSERT INTO #UsuariosCheck VALUES('User1'); --> Ajustar esse INSERT para os usuário que queria validar! Colocar o nome do LOGIN. 27 | INSERT INTO #UsuariosCheck VALUES('User2'); 28 | 29 | EXEC sp_MSforeachdb ' 30 | 31 | USE [?]; 32 | 33 | IF NOT EXISTS( SELECT * FROM sys.databases WHERE database_id = DB_ID() AND is_cdc_enabled = 1 ) 34 | RETURN; 35 | 36 | DECLARE 37 | @RoleCdc sysname 38 | 39 | 40 | INSERT INTO 41 | #RolesCdc 42 | SELECT 43 | DB_NAME() 44 | ,L.NomeLogin 45 | ,U.name 46 | ,RCDC.role_name 47 | ,CASE WHEN RM.role_principal_id IS NULL THEN 0 ELSE 1 END as Existe 48 | FROM 49 | #UsuariosCheck L 50 | INNER JOIN 51 | sys.database_principals U 52 | ON U.sid = SUSER_SID(L.NomeLogin) 53 | CROSS JOIN 54 | ( 55 | select distinct 56 | role_name 57 | FROM 58 | cdc.change_tables 59 | ) RCDC 60 | LEFT JOIN 61 | sys.database_role_members RM 62 | ON RM.member_principal_id = U.principal_id 63 | AND rm.role_principal_id = USER_ID(RCDC.role_name) 64 | 65 | ' 66 | 67 | 68 | SELECT 69 | Banco 70 | ,Login 71 | ,'sqlcmd -S '+@@servername+' -d '+Banco+' -Q "SET NOCOUNT OFF;EXEC sp_addrolemember ''cdc_read'','''+Login+'''" ' 72 | FROM 73 | #RolesCdc 74 | where 75 | IsMembro = 0 -------------------------------------------------------------------------------- /CLR/Lab.cs: -------------------------------------------------------------------------------- 1 | using Microsoft.SqlServer.Server; 2 | using System.Data.SqlClient; 3 | using System.Threading; 4 | using System.Data; 5 | using System.Data.SqlTypes; 6 | 7 | 8 | public class SqlLab { 9 | 10 | public static int Sleep(int ms, int rnd){ 11 | Thread.Sleep(ms); 12 | return ms; 13 | } 14 | 15 | 16 | [SqlFunction(DataAccess = DataAccessKind.Read)] 17 | public static int SelectTable(string sql){ 18 | 19 | SqlDataReader r; 20 | DataSet ds = new DataSet(); 21 | 22 | using (SqlConnection c = new SqlConnection("context connection=true")) 23 | { 24 | c.Open(); 25 | SqlCommand cmd = new SqlCommand(sql, c); 26 | 27 | r = cmd.ExecuteReader(); 28 | 29 | while(!r.IsClosed) 30 | ds.Tables.Add().Load(r); 31 | 32 | return (int)ds.Tables[0].Rows[0].ItemArray[0]; 33 | } 34 | } 35 | 36 | [SqlProcedure] 37 | public static void ResultAsXml(string sql,out SqlString result){ 38 | 39 | SqlDataReader r; 40 | DataSet ds = new DataSet(); 41 | 42 | using (SqlConnection c = new SqlConnection("context connection=true")) 43 | { 44 | c.Open(); 45 | SqlCommand cmd = new SqlCommand(sql, c); 46 | 47 | r = cmd.ExecuteReader(); 48 | 49 | while(!r.IsClosed) 50 | ds.Tables.Add().Load(r); 51 | } 52 | 53 | 54 | DataTable t = ds.Tables[0]; 55 | System.IO.StringWriter writer = new System.IO.StringWriter(); 56 | 57 | t.WriteXml(writer, XmlWriteMode.IgnoreSchema, false); 58 | result = writer.ToString(); 59 | } 60 | 61 | } -------------------------------------------------------------------------------- /CLR/LoadCSC.ps1: -------------------------------------------------------------------------------- 1 | 2 | <# 3 | .SYNOPSIS 4 | Get availble list of csc compilers! 5 | #> 6 | function Get-CSCompiler { 7 | [CmdLetBinding()] 8 | param() 9 | 10 | $NETVersions = $Env:SystemRoot + '\Microsoft.NET\Framework' 11 | 12 | gci $NETVersions | ? {$_.Name -like 'v*' -and (Test-Path ($_.FullName + '\csc.exe'))} | %{ 13 | 14 | 15 | [void]($_ | Add-Member -Type Noteproperty -name IsCurrent -Value $false -Force); 16 | 17 | if( (Get-CSCompilerCurrentPath) -eq $_.FullName){ 18 | $_.IsCurrent = $true; 19 | } 20 | 21 | return $_; 22 | } | select Name,IsCurrent,FullName 23 | } 24 | 25 | <# 26 | .SYNOPSIS 27 | Get current CSC path 28 | #> 29 | function Get-CSCompilerCurrentPath { 30 | [CmdLetBinding()] 31 | param() 32 | return $Global:CsCompilerPath; 33 | } 34 | 35 | <# 36 | .SYNOPSIS 37 | Set current csc compiler! 38 | #> 39 | function Set-CSCompiler { 40 | [CmdLetBinding()] 41 | param($version = '*', [switch]$Last = $False) 42 | 43 | $Elegible = @(Get-CSCompiler | ? { $_.Name -like $version+'*' }); 44 | 45 | if(!$Last -and $Elegible.count -gt 1){ 46 | throw "$Version matches more that one version: $($Elegible | %{$_.Name})" 47 | } 48 | 49 | 50 | 51 | if($Last){ 52 | $Global:CscompilerPath = $Elegible[-1].FullName; 53 | } else { 54 | $Global:CscompilerPath = $Elegible[0].FullName; 55 | } 56 | 57 | } 58 | 59 | Set-CSCompiler -Last; 60 | #Get the path to the c# compiler and build path to it... 61 | $csc = Get-CSCompilerCurrentPath 62 | if($csc){ 63 | Set-Alias -Name csc -Value "$csc\csc.exe" -Scope 1; 64 | write-host "Try invoke csc and start compiling =)" 65 | } 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /CLR/SqlLabClr.create.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Esse script foi criado como um pequeno Lab para aprender a usar CLR. 9 | O Dirceu Resende é o único doido que conheço que usa isso. 10 | #Brinks #Dirceu ❤️ (Dirceu é o mito do SQL... Se você não conhece o blog dele, acesse em https://dirceuresende.com) 11 | 12 | 13 | Eu tenho uma preguiça de instalar o Visual Studio so pra compilar CLR rs. 14 | Então, eu criei um powershell que me ajuda niso.. Ele usa um compilador nativo de C# que vem no Windows... 15 | Recentemente, eu descobri que esse compilador é meio antiguinho... Então, pode não surportar as sintaxes mais recente do c#... 16 | 17 | Mas pra um CLR básico, quebra um galho, ainda mais se você quer aprender... 18 | Então, o jeito mais fácil de usar tudo isso aqui é assim: 19 | 20 | 1) Abre um powershell 21 | 2) Rode o arquivo compile.ps1 22 | CLR\compile.ps1 23 | 3) ele vai gerar o mesmo cnoteúdo desse arquivo, porém com a dll compilada e com o caminho completo! 24 | 4) Se você quiser alterar o CLR, mexa no arquivo Lab.cs e execute o passo 2 novamente. 25 | 26 | OBS: Eu criei a função Sleep para explorar o internals de funcionamento do plano de execução. Um dia eu mostro isso! 27 | 28 | */ 29 | DROP FUNCTION if exists ResultAsXml,SleepRow 30 | GO 31 | 32 | DROP ASSEMBLY IF EXISTS SqlLabClr 33 | GO 34 | 35 | CREATE ASSEMBLY SqlLabClr FROM 'SqlLabClr.dll'; 36 | GO 37 | 38 | DROP FUNCTION IF EXISTS SleepRow; 39 | GO 40 | 41 | CREATE FUNCTION SleepRow(@ms int, @random int) RETURNS INT 42 | AS EXTERNAL NAME SqlLabClr.SqlLab.Sleep; 43 | GO 44 | 45 | 46 | DROP FUNCTION IF EXISTS SelectTable; 47 | GO 48 | 49 | CREATE FUNCTION SelectTable(@TableName varchar(500)) RETURNS INT 50 | AS EXTERNAL NAME SqlLabClr.SqlLab.SelectTable; 51 | GO 52 | 53 | CREATE FUNCTION ResultAsXml(@sql nvarchar(max)) RETURNS nvarchar(max) 54 | AS EXTERNAL NAME SqlLabClr.SqlLab.ResultAsXml; 55 | GO 56 | 57 | 58 | -------------------------------------------------------------------------------- /CLR/SqlLabClr.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CLR/SqlLabClr.dll -------------------------------------------------------------------------------- /CLR/compile.ps1: -------------------------------------------------------------------------------- 1 | $ErrorActionPreference = "Stop"; 2 | 3 | push-location 4 | 5 | cd $PsScriptRoot; 6 | 7 | try { 8 | 9 | .\LoadCSC.ps1 10 | 11 | write-host "Compilando Lab.cs..." 12 | csc -nologo -out:SqlLabClr.dll -target:library Lab.cs 13 | 14 | $FilePath = @(Get-Item "$PsScriptRoot\SqlLabClr.dll").FullName 15 | 16 | $CreateAssembly = Get-Content .\SqlLabClr.create.sql -Raw 17 | 18 | write-host "==== RODE O CÓDIGO ABAIXO NO SEU SQL ====" 19 | 20 | $CreateAssembly = $CreateAssembly.replace('SqlLabClr.dll',$FilePath ) 21 | 22 | write-host $CreateAssembly 23 | } finally { 24 | pop-location 25 | } -------------------------------------------------------------------------------- /CMS/CMSProperties/DynamicPivotProperties.sql: -------------------------------------------------------------------------------- 1 | DECLARE 2 | @PropNames varchar(200) 3 | ,@tsql nvarchar(max) 4 | ,@FilterExpression nvarchar(4000) 5 | 6 | IF OBJECT_ID('tempdb..#PropertiesPivoted') IS NOT NULL 7 | DROP TABLE #PropertiesPivoted; 8 | 9 | 10 | SELECT 11 | @PropNames = ISNULL(@PropNames + ',','') + QUOTENAME(P.propName) 12 | FROM 13 | cmsprops.CMSProperties P 14 | 15 | IF @FilterExpression IS NULL 16 | SET @FilterExpression = '1 = 1'; 17 | 18 | 19 | SET @tsql = N' 20 | SELECT 21 | P.* 22 | FROM 23 | ( 24 | SELECT 25 | * 26 | FROM 27 | cmsprops.cpFullInstanceProperties FIP 28 | ) F 29 | PIVOT 30 | ( 31 | MAX(F.propValue) FOR F.propName IN ('+@PropNames+') 32 | ) P 33 | WHERE 34 | ( 35 | '+REPLACE(@FilterExpression,'"','''')+' 36 | ) 37 | ' 38 | 39 | EXEC sp_Executesql @tsql; -------------------------------------------------------------------------------- /CMS/CMSProperties/ExplorerServerPath.sql: -------------------------------------------------------------------------------- 1 | 2 | WITH ServersParents AS 3 | ( 4 | SELECT 5 | S.server_id 6 | ,S.server_group_id as parentGroup 7 | ,CONVERT(varchar(max),S.name) as ServerPath 8 | ,CONVERT(bigint,1) as HierarchyLevel 9 | FROM 10 | msdb..sysmanagement_shared_registered_servers S 11 | JOIN 12 | msdb..sysmanagement_shared_server_groups SG 13 | ON SG.server_group_id = S.server_group_id 14 | 15 | UNION ALL 16 | 17 | SELECT 18 | SP.server_id 19 | ,SG.parent_id 20 | ,CONVERT(varchar(max),SG.name+'/'+SP.ServerPath) as ServerPath 21 | ,SP.HierarchyLevel+1 as HierarchyLevel 22 | FROM 23 | ServersParents SP 24 | INNER JOIN 25 | msdb..sysmanagement_shared_server_groups SG 26 | ON SG.server_group_id = SP.parentGroup 27 | WHERE 28 | SG.parent_id IS NOT NULL 29 | ) 30 | SELECT 31 | S.server_id 32 | ,S.ServerPath 33 | FROM 34 | ( 35 | SELECT 36 | * 37 | ,ROW_NUMBER() OVER(PARTITION BY SP.server_id ORDER BY SP.HierarchyLevel DESC) Lastency 38 | FROM 39 | ServersParents SP 40 | ) S 41 | WHERE 42 | S.Lastency = 1 -------------------------------------------------------------------------------- /CMS/CMSProperties/QueryProperties.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CMS/CMSProperties/QueryProperties.sql -------------------------------------------------------------------------------- /CMS/CMSProperties/README.md: -------------------------------------------------------------------------------- 1 | # CMS Properties 2 | 3 | Esse foi um projeto que criei em um ambiente com várias instâncias SQL Server. 4 | Eu gerenciava essas instâncias usando um recurso que existe há muito tempo no SQL + Management Studio: Central Management Server (CMS). 5 | 6 | Para acessar isso você vai, no SSMS, em View -> Registered Servers. 7 | Então, você vai ver uma opção Central Management Servers. Clica com o botão diretio, escolher `Register Central Management Server`. 8 | E aí você escolhe uma instância que irá servir como instância central. 9 | Então, posteriormente, você pode registrar instâncias. 10 | 11 | O CMS usa o msdb para guardar estes dados. 12 | Toda essa pasta que eu criei foi para adicionar um recurso: tags, que eu carinhosamente chamei de `CMS Properties`. 13 | A ideia era,na descrição das instâncias adicionar propriedades no formato `[Nome:Valor]`. 14 | 15 | Então, eu poderia ler isso de dentro do T-SQL, criando um jeito de categorizar e rotular minhas diferentes instâncias. 16 | Se não me falha a memória, eu categorizava de várias maneiras... Exemplos: [Time=Abc] (nome do time resonsável pela instância). 17 | 18 | Enfim, como tem tanto tempo que eu usei isso, eu não lembro exatamente para que eu usava. 19 | Nem sei se isso faz sentido hoje em dia. 20 | 21 | De qualquer modo, eu resolvi deixar os scripts e publicá-los devido a quantidade de coisas interessantes: 22 | 23 | - Tabelas internas do msdb que podem ser úteis para alguém 24 | - Exemplos de queries dinâmicas 25 | - Exemplos de PIVOT 26 | 27 | Pode ter algo reaproveitável aí para alguém! -------------------------------------------------------------------------------- /CMS/CMSProperties/cmsprops.CMSProperties.tab.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE [cmsprops].[CMSProperties]( 2 | [propName] [nvarchar](300) NOT NULL 3 | ,[propDescription] [varchar](8000) NULL 4 | ,CONSTRAINT [pkCMSProperties] PRIMARY KEY CLUSTERED ([propName] ASC) 5 | ) -------------------------------------------------------------------------------- /CMS/CMSProperties/cmsprops.cpInstanceProperties.vw.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CMS/CMSProperties/cmsprops.cpInstanceProperties.vw.sql -------------------------------------------------------------------------------- /CMS/CMSProperties/cmsprops.prcGetInstance.proc.sql: -------------------------------------------------------------------------------- 1 | IF OBJECT_ID('cmsprops.prcGetInstance') IS NULL 2 | EXEC('CREATE PROCEDURE [cmsprops].[prcGetInstance] AS SELECT 1 as StubVersion') 3 | GO 4 | 5 | 6 | ALTER PROCEDURE 7 | [cmsprops].[prcGetInstance](@FilterExpression nvarchar(3000) = NULL) 8 | AS 9 | DECLARE 10 | @PropNames varchar(200) 11 | ,@tsql nvarchar(max) 12 | 13 | IF OBJECT_ID('tempdb..#PropertiesPivoted') IS NOT NULL 14 | DROP TABLE #PropertiesPivoted; 15 | 16 | 17 | SELECT 18 | @PropNames = ISNULL(@PropNames + ',','') + QUOTENAME(P.propName) 19 | FROM 20 | cmsprops.CMSProperties P 21 | 22 | IF @FilterExpression IS NULL 23 | SET @FilterExpression = '1 = 1'; 24 | 25 | 26 | SET @tsql = N' 27 | SELECT 28 | P.* 29 | FROM 30 | ( 31 | SELECT 32 | * 33 | FROM 34 | cmsprops.cpFullInstanceProperties FIP 35 | ) F 36 | PIVOT 37 | ( 38 | MAX(F.propValue) FOR F.propName IN ('+@PropNames+') 39 | ) P 40 | WHERE 41 | ( 42 | '+REPLACE(@FilterExpression,'"','''')+' 43 | ) 44 | ' 45 | 46 | EXEC sp_Executesql @tsql; 47 | 48 | GO 49 | 50 | 51 | -------------------------------------------------------------------------------- /CMS/CMSProperties/cmsprops.prcSetProperty.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CMS/CMSProperties/cmsprops.prcSetProperty.sql -------------------------------------------------------------------------------- /CMS/CMSProperties/cmsprops.sch.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA [cmsprops] 2 | 3 | -------------------------------------------------------------------------------- /CMS/CMSProperties/cpFullInstanceProperties.vw.sql: -------------------------------------------------------------------------------- 1 | IF OBJECT_ID('cmsprops.cpFullInstanceProperties') IS NOT NULL 2 | EXEC('DROP VIEW cmsprops.cpFullInstanceProperties'); 3 | GO 4 | 5 | CREATE VIEW 6 | [cmsprops].[cpFullInstanceProperties] 7 | AS 8 | SELECT 9 | I.serverId 10 | ,I.connectionName 11 | ,I.displayName 12 | ,CP.propName 13 | ,IP.propValue 14 | FROM 15 | cmsprops.cpInstances I 16 | CROSS JOIN 17 | cmsprops.CMSProperties CP 18 | LEFT JOIN 19 | cmsprops.cpInstanceProperties IP 20 | ON IP.server_id = I.serverId 21 | AND IP.propName = CP.propName 22 | 23 | 24 | GO 25 | 26 | 27 | -------------------------------------------------------------------------------- /CMS/CMSProperties/cpInstances.vw.sql: -------------------------------------------------------------------------------- 1 | IF OBJECT_ID('cmsprops.cpInstances') IS NOT NULL 2 | EXEC('DROP VIEW [cmsprops].[cpInstances]'); 3 | GO 4 | 5 | CREATE VIEW 6 | [cmsprops].[cpInstances] 7 | AS 8 | SELECT 9 | S.server_id AS serverId 10 | ,S.name AS displayName 11 | ,S.server_name AS connectionName 12 | ,S.description AS instanceDescription 13 | FROM 14 | msdb..sysmanagement_shared_registered_servers S 15 | GO 16 | 17 | 18 | -------------------------------------------------------------------------------- /CMS/CMSProperties/dbo.FullServerPath.vw.sql: -------------------------------------------------------------------------------- 1 | CREATE VIEW 2 | [dbo].[FullServerPath] 3 | AS 4 | WITH ServersParents AS 5 | ( 6 | SELECT 7 | S.server_id 8 | ,S.server_group_id as parentGroup 9 | ,CONVERT(varchar(max),S.name) as ServerPath 10 | ,CONVERT(bigint,1) as HierarchyLevel 11 | FROM 12 | msdb..sysmanagement_shared_registered_servers S 13 | JOIN 14 | msdb..sysmanagement_shared_server_groups SG 15 | ON SG.server_group_id = S.server_group_id 16 | 17 | UNION ALL 18 | 19 | SELECT 20 | SP.server_id 21 | ,SG.parent_id 22 | ,CONVERT(varchar(max),SG.name+'/'+SP.ServerPath) as ServerPath 23 | ,SP.HierarchyLevel+1 as HierarchyLevel 24 | FROM 25 | ServersParents SP 26 | INNER JOIN 27 | msdb..sysmanagement_shared_server_groups SG 28 | ON SG.server_group_id = SP.parentGroup 29 | WHERE 30 | SG.parent_id IS NOT NULL 31 | ) 32 | SELECT 33 | S.server_id 34 | ,S.ServerPath 35 | FROM 36 | ( 37 | SELECT 38 | * 39 | ,ROW_NUMBER() OVER(PARTITION BY SP.server_id ORDER BY SP.HierarchyLevel DESC) Lastency 40 | FROM 41 | ServersParents SP 42 | ) S 43 | WHERE 44 | S.Lastency = 1 45 | 46 | -------------------------------------------------------------------------------- /CMS/ConcederAcesso_CMS.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CMS/ConcederAcesso_CMS.sql -------------------------------------------------------------------------------- /CPU/CPUActivityEx.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Essa é uma das primeiras versões que criei para obter info PRECISA de CPU de uma maneira agregada! 8 | 9 | */ 10 | 11 | SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; 12 | 13 | 14 | SELECT 15 | OS.identificador 16 | ,MIN(OS.start_time) as MenorTempoStart 17 | ,SUM(OS.cpu_time) as CPUTime 18 | ,COUNT(OS.task_address) as NumTasks 19 | ,COUNT(OS.worker_address) as NumWorkers 20 | ,COUNT(CASE WHEN OS.scheduler_idle = 1 AND OS.ActiveWorker = 1 THEN OS.scheduler_address END) as ActiveIdles 21 | ,MAX(OS.Working) as Working 22 | ,MAX(OS.os_thread_id) as ThreadID 23 | ,MAX(OS.thread_address) as ThreadAddr 24 | ,MAX(OS.CurrentRequest) as CurrentRequest 25 | ,AVG(CPUFactor) as CPUFactor 26 | FROM 27 | ( 28 | SELECT 29 | CASE 30 | WHEN R.session_id < 50 THEN -50 31 | ELSE R.session_id 32 | END as identificador 33 | ,R.session_id 34 | ,R.request_id 35 | ,R.start_time 36 | ,R.status as request_status 37 | ,R.command 38 | ,R.wait_type 39 | ,R.wait_time 40 | ,R.cpu_time 41 | ,T.task_address 42 | ,T.task_state 43 | ,W.worker_address 44 | ,W.state 45 | ,S.scheduler_address 46 | ,S.status as scheduler_status 47 | ,S.is_idle as scheduler_idle 48 | ,CASE 49 | WHEN S.active_worker_address = W.worker_address THEN 1 50 | ELSE 0 51 | END AS ActiveWorker 52 | ,CASE 53 | WHEN S.active_worker_address = W.worker_address AND S.is_idle = 0 THEN 1 54 | ELSE 0 55 | END as Working 56 | ,TH.os_thread_id 57 | --,DENSE_RANK() OVER(PARTITION BY R.session_id,R.request_id ORDER BY W.quantum_used-W.start_quantum DESC) as RankThreadTime 58 | ,CASE 59 | WHEN R.session_id = @@SPID THEN 1 60 | ELSE 0 61 | END as CurrentRequest 62 | ,TH.thread_address 63 | ,R.cpu_time*1.00/NULLIF(R.total_elapsed_time,0) as CPUFactor 64 | FROM 65 | sys.dm_exec_requests R 66 | LEFT JOIN 67 | ( 68 | sys.dm_os_tasks T 69 | INNER JOIN 70 | sys.dm_os_workers W 71 | ON W.worker_address = T.worker_address 72 | INNER JOIN 73 | sys.dm_os_threads TH 74 | ON TH.worker_address = W.worker_address 75 | INNER JOIN 76 | sys.dm_os_schedulers S 77 | ON S.scheduler_address = W.scheduler_address 78 | ) 79 | ON R.session_id = T.session_id 80 | AND R.request_id = T.request_id 81 | ) OS 82 | 83 | GROUP BY 84 | identificador WITH ROLLUP 85 | ORDER BY 86 | Working DESC,CPUTime DESC 87 | 88 | 89 | 90 | 91 | --SELECT * FROM sys.dm_os_threads; 92 | --select * from sys.dm_os_sys_info 93 | --select * from sys.dm_os_schedulers 94 | 95 | --SP_WHOISACTIVE 96 | 97 | /* 98 | select t.session_id,th.os_thread_id from sys.dm_os_threads th 99 | inner join 100 | sys.dm_os_workers W 101 | on w.worker_address = th.worker_address 102 | inner join 103 | sys.dm_os_tasks t 104 | on t.task_address = w.task_address 105 | where th.os_thread_id = 11712 106 | **/ 107 | 108 | 109 | 110 | --sp_whoisactive @delta_interval = 2 111 | 112 | -------------------------------------------------------------------------------- /CPU/CPUDelta.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/CPUDelta.sql -------------------------------------------------------------------------------- /CPU/CPUDeltaEx.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Um CPU Delta um pouco mais enfeitado... 8 | Faz várias coletas,ao invés de 2 e depois calcula o delta entre a primeira e a última! 9 | Não lembro exatamente o porque faço várias coletas, mas acho que era pra garantir ter o máximo de info intermediária para usar se precisar! 10 | 11 | */ 12 | 13 | DECLARE 14 | @collectsCount int 15 | ,@TimeToCollect varchar(12) 16 | ,@CPU100MS smallint 17 | ; 18 | 19 | SET @collectsCount = 20; 20 | SET @TimeToCollect = 1000; 21 | SET @CPU100MS = 1000; 22 | 23 | ----> Internal structures 24 | IF OBJECT_ID('tempdb..#requests') IS NOT NULL 25 | DROP TABLE #requests; 26 | 27 | IF OBJECT_ID('tempdb..#processed') IS NOT NULL 28 | DROP TABLE #processed; 29 | 30 | IF OBJECT_ID('tempdb..#report') IS NOT NULL 31 | DROP TABLE #report; 32 | 33 | DECLARE 34 | @TimePerCollect int 35 | ,@CollectTimeFormat varchar(13) 36 | ,@StartTime datetime 37 | ,@CollectNumber bigint 38 | ; 39 | 40 | --> COLLECT PHASE 41 | 42 | -- Calculates time need for wait between collects. 43 | SET @TimePerCollect = @TimeToCollect/@collectsCount; 44 | -- Store the final time to wait between collects, in the format 'HH:MM:SS.MMM' 45 | SET @CollectTimeFormat = RIGHT(CONVERT(varchar(23),DATEADD(ms,@TimePerCollect,0),121),12); 46 | 47 | 48 | 49 | --> Initialize @CollectNumber 50 | SET @CollectNumber = 1; 51 | -- Initialize the temp table. 52 | SELECT @CollectNumber as CollectNumber,CURRENT_TIMESTAMP as CollectTimestamp,* INTO #requests FROM sys.dm_exec_requests WHERE 1 = 2; 53 | 54 | SET @StartTime = CURRENT_TIMESTAMP; 55 | WHILE( DATEDIFF(ms,@StartTime,CURRENT_TIMESTAMP) <= @TimeToCollect ) 56 | BEGIN 57 | --> Get the data! 58 | INSERT INTO 59 | #requests 60 | SELECT 61 | @CollectNumber,CURRENT_TIMESTAMP 62 | ,* 63 | FROM 64 | sys.dm_exec_requests; 65 | 66 | --> Increase... 67 | SET @CollectNumber = @CollectNumber + 1; 68 | 69 | --. Waitfor time to collect again. 70 | WAITFOR DELAY @CollectTimeFormat; 71 | END 72 | 73 | 74 | --> BUILD PHASE 75 | 76 | SELECT 77 | F.session_id 78 | ,F.request_id 79 | ,F.start_time 80 | ,F.task_address 81 | ,F.sql_handle 82 | ,~CONVERT(bit,L.session_id) Finished 83 | ,L.cpu_time - F.cpu_time as CPUUsed 84 | ,l.cpu_time CPUAcc 85 | INTO 86 | #processed 87 | FROM 88 | #requests F 89 | OUTER APPLY 90 | ( 91 | SELECT TOP 1 92 | * 93 | FROM 94 | #requests L 95 | WHERE 96 | L.session_id = F.session_id 97 | AND 98 | AND 99 | L.request_id = F.request_id 100 | AND 101 | L.start_time = F.start_time 102 | L.task_address = F.task_address 103 | AND 104 | L.CollectNumber > F.CollectNumber --> Last must be a greater number of first! 105 | ORDER BY 106 | L.CollectNumber DESC 107 | ) L 108 | WHERE 109 | F.CollectNumber = 1 110 | 111 | --> Generating reporting... 112 | 113 | SELECT 114 | P.session_id 115 | ,P.request_id 116 | ,P.start_time 117 | ,QINFO.ProcName 118 | ,P.CPUUsed 119 | ,(P.CPUUsed*100.00)/(SI.cpu_count*@CPU100MS) as EstimatedCPU 120 | ,P.CPUAcc 121 | INTO 122 | #report 123 | FROM 124 | #processed P 125 | CROSS JOIN 126 | sys.dm_os_sys_info SI 127 | INNER JOIN 128 | ( 129 | SELECT 130 | QH.sql_handle 131 | ,OBJECT_NAME(ST.objectid,ST.dbid) as ProcName 132 | FROM 133 | ( 134 | SELECT DISTINCT 135 | sql_handle 136 | FROM 137 | #processed 138 | ) QH 139 | CROSS APPLY 140 | sys.dm_exec_sql_text(QH.sql_handle) ST 141 | ) QINFO 142 | ON QINFO.sql_handle = P.sql_handle 143 | ORDER BY 144 | CPUUsed DESC 145 | 146 | 147 | --> Final data for caller use 148 | 149 | SELECT 150 | * 151 | --,DATEDIFF(MS,R.start_time,CURRENT_TIMESTAMP) Live 152 | ,CPUUsed*100.00/@TimeToCollect as CPUUsedInterval 153 | --,CPUAcc*100.00/DATEDIFF(MS,R.start_time,CURRENT_TIMESTAMP) as CPUUsedLive 154 | FROM 155 | #report R 156 | ORDER BY 157 | CPUUsed DESC -------------------------------------------------------------------------------- /CPU/CPUDelta_Custom.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Versão light do cpu delta! 8 | DEve ter sido um dos primeiros scripts que eu fiz para ter isso... 9 | .\RequestsDElta.sql é mais completo! 10 | */ 11 | 12 | 13 | IF OBJECT_ID('tempdb..#first') IS NOT NULL 14 | DROP TABLE #first; 15 | 16 | IF OBJECT_ID('tempdb..#second') IS NOT NULL 17 | DROP TABLE #second; 18 | 19 | 20 | SELECT * INTO #first FROM sys.dm_exec_requests 21 | WAITFOR DELAY '00:00:01.000' 22 | SELECT * INTO #second FROM sys.dm_exec_requests 23 | 24 | 25 | SELECT 26 | S.session_id 27 | ,s.request_id 28 | ,s.start_time 29 | ,'#' as [#] 30 | ,F.session_id 31 | ,F.request_id 32 | ,F.start_time 33 | ,(S.cpu_time - F.cpu_time) as CPUGasto 34 | FROM 35 | #second S 36 | INNER JOIN 37 | #first F 38 | ON F.session_id = S.session_id 39 | AND F.request_id = S.request_id 40 | AND F.start_time = S.start_time 41 | AND F.task_address = S.task_address 42 | ORDER BY 43 | CPUGasto DESC 44 | 45 | 46 | SELECT 47 | SUM(CASE WHEN S.session_id IS NULL THEN 1 ELSE 0 END) as QtdRquestsNovos 48 | ,SUM(CASE WHEN F.session_id IS NULL THEN 1 ELSE 0 END) as QtdRquestsFinalizados 49 | ,COUNT(*) as TotalCollects 50 | FROM 51 | #second S 52 | FULL JOIN 53 | #first F 54 | ON F.session_id = S.session_id 55 | AND F.request_id = S.request_id 56 | AND F.start_time = S.start_time 57 | AND F.task_address = S.task_address -------------------------------------------------------------------------------- /CPU/CPUDelta_GroupByObject.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/CPUDelta_GroupByObject.sql -------------------------------------------------------------------------------- /CPU/CPUTest-Simple.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/CPUTest-Simple.sql -------------------------------------------------------------------------------- /CPU/CPUTime_Schedulers.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz quais sessões estão usando scheduler e a respectiva thread no SO. 8 | E se for em paralelo, traz a thread que mais consome de CPU de todas as threads da query! 9 | 10 | */ 11 | 12 | SELECT 13 | r.session_id 14 | ,r.cpu_time 15 | ,E.* 16 | FROM 17 | sys.dm_exec_requests r 18 | CROSS APPLY 19 | ( 20 | SELECT 21 | COUNT(EX.scheduler_address) as SchedulerUso 22 | ,MAX(CASE WHEN EX.THRankTime = 1 THEN EX.ThreadID END) as MaxThread 23 | ,COUNT(EX.worker_address) as NumWorkers 24 | FROM 25 | ( 26 | SELECT 27 | S.scheduler_address 28 | ,TH.os_thread_id AS ThreadID 29 | ,ROW_NUMBER() OVER(ORDER BY TH.kernel_time + TH.usermode_time DESC) as THRankTime 30 | ,W.worker_address 31 | from 32 | sys.dm_os_tasks T 33 | INNER JOIN 34 | sys.dm_os_workers W 35 | ON W.worker_address = T.worker_address 36 | INNER JOIN 37 | sys.dm_os_threads TH 38 | ON TH.worker_address = T.worker_address 39 | left JOIN 40 | sys.dm_os_schedulers S 41 | ON W.worker_address = S.active_worker_address 42 | AND w.state = 'RUNNING' 43 | WHERE 44 | T.session_id = r.session_id 45 | AND 46 | T.request_id = R.request_id 47 | ) EX 48 | ) E 49 | WHERE 50 | --R.session_id > 50 51 | --AND 52 | R.session_id <> @@SPID 53 | ORDER BY 54 | E.SchedulerUso DESC, R.cpu_time DESC 55 | 56 | 57 | --sp_whoisactive 58 58 | 59 | /* 60 | select t.session_id,th.os_thread_id from sys.dm_os_threads th 61 | inner join 62 | sys.dm_os_workers W 63 | on w.worker_address = th.worker_address 64 | inner join 65 | sys.dm_os_tasks t 66 | on t.task_address = w.task_address 67 | where t.session_id = 1 68 | **/ 69 | -------------------------------------------------------------------------------- /CPU/CPU_usadasporsessao.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz informações agregadas dos schedulers e threads usados por cada sessão! 8 | Visão rápida de paralelismo. 9 | Hoje, na RequestsDelta.sql, isso já está lá bem mais completo! 10 | 11 | */ 12 | 13 | SELECT 14 | --TH.os_thread_id 15 | --,TH.affinity 16 | --,W.state 17 | --,TK.task_state 18 | TK.session_id_ex 19 | ,COUNT(distinct TK.task_address) as Tasks 20 | ,COUNT(distinct CASE WHEN TK.task_state = 'RUNNING' THEN TK.task_address END) as TasksRunning 21 | ,COUNT(DISTINCT W.worker_address) as NumWorkers 22 | ,COUNT(DISTINCT CASE WHEN TK.task_state = 'RUNNING' THEN S.scheduler_address END) as NumSchedulersUso 23 | ,COUNT(DISTINCT TH.os_thread_id) as NumThreads 24 | FROM 25 | ( 26 | SELECT 27 | * 28 | ,CASE 29 | WHEN TK.session_id > 50 THEN session_id 30 | ELSE -50 31 | END as session_id_ex 32 | FROM 33 | sys.dm_os_tasks TK 34 | ) TK 35 | JOIN 36 | sys.dm_os_workers W 37 | ON W.worker_address = TK.worker_address 38 | JOIN 39 | sys.dm_os_schedulers S 40 | ON S.scheduler_address = W.scheduler_address 41 | JOIN 42 | sys.dm_os_threads TH 43 | ON TH.worker_address = W.worker_address 44 | WHERE 45 | TK.session_id <> @@SPID 46 | GROUP BY 47 | TK.session_id_ex WITH ROLLUP 48 | ORDER BY 49 | CASE WHEN session_id_ex IS NULL THEN 1 ELSE 0 end desc,NumSchedulersUso DESC 50 | 51 | -------------------------------------------------------------------------------- /CPU/CurrentTasks_VsRunnable_Aggregado.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Visão rápida e simples da fila de CPU... 8 | runnable = pronto pra rodar mas está aguardando vez na CPU! 9 | Se isso aqui tá próximo de current tasks e não baixa ou sobe e desce, é estranho, significa que alguma coisa travou algum scheduler... 10 | Pode ser dump, drivers e até bug do sql! 11 | */ 12 | 13 | select 14 | SUM(current_tasks_count) 15 | ,SUM(runnable_tasks_count) 16 | from 17 | sys.dm_os_schedulers with(nolock) 18 | where 19 | status = 'VISIBLE ONLINE' -------------------------------------------------------------------------------- /CPU/NativeCompiledTest.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/NativeCompiledTest.sql -------------------------------------------------------------------------------- /CPU/QuerStatsDelta.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | O mesmo que RequestsDelta, porém usando a sys.dm_exec_query_stats... 8 | ISto é, consigo olhar o que há rodou há pouco segundos... 9 | Isso pode me ajudar a pegar queries extremamente rápidas, mas que rodam aos montes e consomem muita CPU na soma! 10 | */ 11 | 12 | 13 | IF OBJECT_ID('tempdb..#StatsBefore') IS NOT NULL 14 | DROP TABLE #StatsBefore; 15 | 16 | IF OBJECT_ID('tempdb..#StatsAfter') IS NOT NULL 17 | DROP TABLE #StatsAfter; 18 | 19 | IF OBJECT_ID('tempdb..#QueryStatsDelta') IS NOT NULL 20 | DROP TABLE #QueryStatsDelta; 21 | 22 | declare @Start datetime = DATEADD(SS,-1,GETDATE()) 23 | 24 | select 25 | query_hash 26 | ,last_worker_time 27 | ,execution_count 28 | ,st.text 29 | ,creation_time 30 | ,last_execution_time 31 | ,qs.plan_handle 32 | ,qs.statement_start_offset 33 | ,qs.statement_end_offset 34 | ,AvgCpuTime = total_worker_time*1.00/execution_count 35 | ,total_worker_time 36 | ,CollectTime = getdate() 37 | ,ObjectName = OBJECT_NAME(st.objectid,st.dbid) 38 | ,DbName = DB_NAME(st.dbid) 39 | into 40 | #StatsBefore 41 | from 42 | sys.dm_exec_query_stats qs 43 | cross apply 44 | sys.dm_exec_sql_text(qs.sql_handle) st 45 | where 46 | qs.last_execution_time >= @Start 47 | 48 | waitfor delay '00:00:01' 49 | 50 | select 51 | query_hash 52 | ,last_worker_time 53 | ,execution_count 54 | ,st.text 55 | ,creation_time 56 | ,last_execution_time 57 | ,qs.plan_handle 58 | ,qs.statement_start_offset 59 | ,qs.statement_end_offset 60 | ,AvgCpuTime = total_worker_time*1.00/execution_count/1000. 61 | ,total_worker_time 62 | ,CollectTime = getdate() 63 | ,ObjectName = OBJECT_NAME(st.objectid,st.dbid) 64 | ,DbName = DB_NAME(st.dbid) 65 | into 66 | #StatsAfter 67 | from 68 | sys.dm_exec_query_stats qs 69 | cross apply 70 | sys.dm_exec_sql_text(qs.sql_handle) st 71 | where 72 | qs.last_execution_time >= @Start 73 | 74 | 75 | SELECT 76 | DbName = ISNULL(A.DbName,pa.DbName) 77 | ,t.batch 78 | ,Trecho = q.qx 79 | ,A.ObjectName 80 | ,C.Interval 81 | ,LastCpu = CONVERT(decimal(10,2),A.last_worker_time/1000.) 82 | ,ExecDelta = A.execution_count - B.execution_count 83 | ,C.CpuDelta 84 | ,AvgDelta = CONVERT(decimal(10,2),C.CpuDelta/(Interval/1000.)) 85 | ,[CpuDelta%] = CONVERT(int,C.CpuDelta*100/Interval) 86 | ,[AvgDelta%] = CONVERT(int,(CONVERT(decimal(10,2),C.CpuDelta/(Interval/1000.))/1000)*100) 87 | ,CpuBefore = B.total_worker_time/1000. 88 | ,CpuAfter = A.total_worker_time/1000. 89 | ,ExecBefore = B.execution_count 90 | ,ExecAfter = A.execution_count 91 | ,A.plan_handle 92 | ,A.query_hash 93 | ,A.creation_time 94 | ,A.last_execution_time 95 | into 96 | #QueryStatsDelta 97 | FROM 98 | #StatsAfter A 99 | LEFT JOIN 100 | #StatsBefore B 101 | ON B.plan_handle = A.plan_handle 102 | AND B.statement_start_offset = A.statement_start_offset 103 | and B.statement_end_offset = A.statement_end_offset 104 | CROSS APPLY ( 105 | SELECT 106 | CpuDelta = CONVERT(decimal(10,2),(A.total_worker_time - B.total_worker_time)/1000.) 107 | ,Interval = DATEDIFF(MS,B.CollectTime,A.CollectTime) 108 | ) C 109 | cross apply ( 110 | select 111 | DbName = DB_NAME(CONVERT(int,value)) from sys.dm_exec_plan_attributes(A.plan_handle) pa 112 | where pa.attribute = 'dbid' 113 | ) pa 114 | cross apply ( 115 | select 116 | [processing-instruction(q)] = ( 117 | REPLACE 118 | ( 119 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 120 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 121 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 122 | CONVERT 123 | ( 124 | NVARCHAR(MAX), 125 | A.text COLLATE Latin1_General_Bin2 126 | ), 127 | NCHAR(31),N'?'),NCHAR(30),N'?'),NCHAR(29),N'?'),NCHAR(28),N'?'),NCHAR(27),N'?'),NCHAR(26),N'?'),NCHAR(25),N'?'),NCHAR(24),N'?'),NCHAR(23),N'?'),NCHAR(22),N'?'), 128 | NCHAR(21),N'?'),NCHAR(20),N'?'),NCHAR(19),N'?'),NCHAR(18),N'?'),NCHAR(17),N'?'),NCHAR(16),N'?'),NCHAR(15),N'?'),NCHAR(14),N'?'),NCHAR(12),N'?'), 129 | NCHAR(11),N'?'),NCHAR(8),N'?'),NCHAR(7),N'?'),NCHAR(6),N'?'),NCHAR(5),N'?'),NCHAR(4),N'?'),NCHAR(3),N'?'),NCHAR(2),N'?'),NCHAR(1),N'?'), 130 | NCHAR(0), 131 | N'' 132 | ) 133 | ) 134 | for xml path(''),TYPE 135 | ) t(batch) 136 | cross apply ( 137 | select 138 | [processing-instruction(q)] = ( 139 | REPLACE 140 | ( 141 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 142 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 143 | REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(REPLACE( 144 | CONVERT 145 | ( 146 | NVARCHAR(MAX), 147 | SUBSTRING(A.text,A.statement_start_offset/2 + 1, ISNULL((NULLIF(A.statement_end_offset,-1) - A.statement_start_offset)/2 + 1,LEN(A.text)) ) COLLATE Latin1_General_Bin2 148 | ), 149 | NCHAR(31),N'?'),NCHAR(30),N'?'),NCHAR(29),N'?'),NCHAR(28),N'?'),NCHAR(27),N'?'),NCHAR(26),N'?'),NCHAR(25),N'?'),NCHAR(24),N'?'),NCHAR(23),N'?'),NCHAR(22),N'?'), 150 | NCHAR(21),N'?'),NCHAR(20),N'?'),NCHAR(19),N'?'),NCHAR(18),N'?'),NCHAR(17),N'?'),NCHAR(16),N'?'),NCHAR(15),N'?'),NCHAR(14),N'?'),NCHAR(12),N'?'), 151 | NCHAR(11),N'?'),NCHAR(8),N'?'),NCHAR(7),N'?'),NCHAR(6),N'?'),NCHAR(5),N'?'),NCHAR(4),N'?'),NCHAR(3),N'?'),NCHAR(2),N'?'),NCHAR(1),N'?'), 152 | NCHAR(0), 153 | N'' 154 | ) 155 | ) 156 | for xml path(''),TYPE 157 | ) q(qx) 158 | 159 | SELECT 160 | * 161 | FROM 162 | #QueryStatsDelta 163 | WHERE 164 | ExecDelta > 0 165 | ORDER BY 166 | CpuDelta DESC 167 | 168 | 169 | select AvgCpuPercent = c.TotalCPU*100/(si.cpu_count*1000) 170 | ,TotalCPU 171 | ,EstCpuCnt = TotalCPU/1000 172 | ,MaxCPUTime = (si.cpu_count*MaxInterval) 173 | ,TotalCpu = si.cpu_count 174 | from 175 | ( SELECT TotalCPU = SUM(CpuDelta), MaxInterval = MAX(Interval) from #QueryStatsDelta ) c 176 | cross join 177 | sys.dm_os_sys_info si 178 | 179 | select 180 | q.* 181 | ,qh.batch 182 | ,qh.Trecho 183 | ,qh.ObjectName 184 | from ( 185 | SELECT 186 | query_hash 187 | ,Qtd = count(*) 188 | ,CpuTotal = sum(CpuDelta) 189 | FROM 190 | #QueryStatsDelta 191 | group by 192 | query_hash 193 | ) q 194 | outer apply ( 195 | select top 1 * from #QueryStatsDelta d where d.query_hash = q.query_hash 196 | ) qh 197 | order by 198 | CpuTotal desc -------------------------------------------------------------------------------- /CPU/RequestDelta.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/RequestDelta.sql -------------------------------------------------------------------------------- /CPU/TasksActivity.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Retorna apenas as sessões que estão em running, isto é, o worker associado está de fato executando no scheduler (state = running). 8 | Note que não necessariamente ele estará na cpu do SO, pois isso não é controlado pelo SQL! 9 | Se aqui tem muita coisa, e no seu Windows tá consumo baixo de CPU do sql, tem algo muito estranho... 10 | Pode ser, por exemplo, algum AV ou driver, DPC, etc. 11 | */ 12 | 13 | SELECT 14 | * 15 | FROM 16 | ( 17 | SELECT 18 | T.session_id 19 | ,COUNT(W.worker_address) as QtdWorkers 20 | ,COUNT(CASE WHEN S.is_idle = 0 and w.state = 'running' then W.worker_address END) as QtdRunningWorks 21 | FROM 22 | ( 23 | SELECT 24 | T.task_address 25 | ,CASE 26 | WHEN T.session_id < 51 THEN t.session_id 27 | ELSE t.session_id 28 | END as session_id 29 | FROM 30 | sys.dm_os_tasks T 31 | ) T 32 | INNER JOIN 33 | sys.dm_os_workers W 34 | ON W.task_address = T.task_address 35 | LEFT JOIN 36 | sys.dm_os_schedulers S 37 | ON s.active_worker_address = W.worker_address 38 | GROUP BY 39 | T.session_id WITH ROLLUP 40 | ) TAT 41 | WHERE 42 | TAT.QtdRunningWorks > 0 43 | ORDER BY 44 | QtdWorkers DESC -------------------------------------------------------------------------------- /CPU/ThreadScheduleInfo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Mais algumas informações de threads. 8 | Fiz isso apenas para ter de prontidão caso precisasse... Mas, não lembro de usar isso comfrequência... 9 | Se nao me engano, foi mais para estudos 10 | */ 11 | 12 | SELECT 13 | R.session_id 14 | ,DbName = DB_NAME(R.database_id) 15 | ,R.command 16 | ,R.wait_type 17 | ,R.total_elapsed_time 18 | ,R.wait_time 19 | ,R.cpu_time 20 | ,WSigTime = CASE WHEN W.wait_started_ms_ticks = 0 THEN SI.ms_ticks - NULLIF(W.wait_resumed_ms_ticks,0) ELSE 0 END 21 | ,WWaitTime = SI.ms_ticks -NULLIF(W.wait_started_ms_ticks,0) 22 | ,Start_quantum 23 | ,RqStatus = R.status 24 | ,WkStatus = W.state 25 | ,W.is_preemptive 26 | ,S.scheduler_id 27 | ,S.is_idle 28 | ,S.runnable_tasks_count 29 | ,S.cpu_id 30 | ,S.status 31 | FROM 32 | sys.dm_exec_requests R 33 | LEFT JOIN ( 34 | sys.dm_os_tasks T 35 | INNER JOIN 36 | sys.dm_os_workers W 37 | ON W.worker_address = T.worker_address 38 | INNER JOIN 39 | sys.dm_os_schedulers S 40 | ON S.scheduler_address = W.scheduler_address 41 | ) ON T.request_id = R.request_id 42 | AND T.session_id = R.session_id 43 | CROSS JOIN sys.dm_os_sys_info AS SI 44 | WHERE 45 | R.scheduler_id IS NOT NULL 46 | ORDER BY 47 | scheduler_id -------------------------------------------------------------------------------- /CPU/TryCalcCPU_MultipleThreads.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Tentando calcular o uso de CPU usando as DMVs de os_thrads, que contém as infos de cpu direto da API do Windows, para cada thread. 8 | Aqui eu estava tentando obter mais precisão no consumo de CPU, mas, raramente usei essa. 9 | */ 10 | 11 | IF OBJECT_ID('tempdb..#CPUTimes') IS NOT NULL 12 | DROP TABLE #CPUTimes; 13 | 14 | SELECT 15 | TK.session_id 16 | ,TK.task_address 17 | ,TK.exec_context_id 18 | ,T.os_thread_id 19 | ,T.usermode_time 20 | ,T.kernel_time 21 | ,TS = CURRENT_TIMESTAMP 22 | INTO 23 | #CPUTimes 24 | FROM 25 | sys.dm_os_workers W 26 | JOIN 27 | sys.dm_os_threads T 28 | on T.worker_address = W.worker_address 29 | JOIN 30 | sys.dm_os_tasks TK 31 | ON TK.worker_address = W.worker_Address 32 | WHERE 33 | TK.session_id > 50 34 | 35 | WAITFOR DELAY '00:00:01.000'; --> Aguarda 1 segundo (intervalo de monitoramento) 36 | 37 | 38 | SELECT 39 | R.session_id 40 | ,R.task_address 41 | ,R.exec_context_id 42 | ,Intervalo = DATEDIFF(ms,TS,CURRENT_TIMESTAMP) 43 | ,R.usermode_time-U.usermode_time as CPUSpend 44 | ,R.kernel_time-U.kernel_time as KernelSpend 45 | ,(R.usermode_time+R.kernel_time)-(U.usermode_time+U.kernel_time) as TotalSpend 46 | FROM 47 | ( 48 | SELECT 49 | TK.session_id 50 | ,TK.task_address 51 | ,TK.exec_context_id 52 | ,T.os_thread_id 53 | ,T.usermode_time 54 | ,T.kernel_time 55 | FROM 56 | sys.dm_os_workers W 57 | JOIN 58 | sys.dm_os_threads T 59 | on T.worker_address = W.worker_address 60 | JOIN 61 | sys.dm_os_tasks TK 62 | ON TK.worker_address = W.worker_Address 63 | WHERE 64 | TK.session_id > 50 65 | ) R 66 | LEFT JOIN 67 | #CPUTimes U 68 | ON R.session_id = U.session_id 69 | AND R.exec_context_id = U.exec_context_id 70 | AND R.os_thread_id = U.os_thread_id 71 | ORDER BY 72 | TotalSpend DESC -------------------------------------------------------------------------------- /CPU/Ver Schedulers Possivel CPU Bottlenecks.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/Ver Schedulers Possivel CPU Bottlenecks.sql -------------------------------------------------------------------------------- /CPU/threads.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/CPU/threads.sql -------------------------------------------------------------------------------- /Checkdb/PerTableCheck.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Executa um DBCC CHECKTABLE para cada tabela de um banco. 9 | Especifique o nome do banco na variável @DatabaseName. 10 | 11 | Isso já me foi muito útil quando encontrei casos de corrupção e queria saber quais tabelas estavam intactas. 12 | Rodando para cada tabela, eu poderia saber onde falharia, e depois analisaria com calma. 13 | 14 | Me ajudou muito em casos extremos, onde o cliente não tinha backup, e queria salvar o máximo de coisas possíveis. 15 | Sabendo quais tabelas estão boas, isso me guia para decidir quais dados eu vou perder e qual estratégia vou usar! 16 | */ 17 | 18 | DECLARE 19 | @PhysicalOnly bit = 0 20 | ,@DatabaseName sysname = 'NomeBanco' 21 | 22 | 23 | IF @DatabaseName IS NULL 24 | BEGIN 25 | RAISERROR('Null db',16,1); 26 | RETURN; 27 | END 28 | 29 | if object_id('tempdb..#Tables') IS NOT NULL 30 | DROP TABLE #Tables; 31 | 32 | CREATE TABLE #Tables( Id int identity primary key, DatabaseName sysname, TableName sysname, TotalSize bigint, FullName nvarchar(1000) ); 33 | 34 | DECLARE @Sql nvarchar(max); 35 | 36 | 37 | SET @Sql = @DatabaseName+'..sp_executesql'; 38 | 39 | INSERT INTO #Tables 40 | exec @Sql N' 41 | SELECT 42 | DB_NAME() 43 | ,T.name 44 | ,S.TotalSize 45 | ,FullName = QUOTENAME(C.name)+''.''+QUOTENAME(T.name) 46 | FROM 47 | sys.tables T 48 | JOIN 49 | sys.schemas C 50 | ON C.schema_id = T.schema_id 51 | CROSS APPLY 52 | ( 53 | SELECT 54 | TotalRows = SUM(P.rows) 55 | ,TotalSize = SUM(au.total_pages) 56 | FROM 57 | sys.partitions P 58 | JOIN 59 | sys.allocation_units AU 60 | ON AU.container_id = P.partition_id 61 | WHERE 62 | P.object_id = T.object_id 63 | AND 64 | P.index_id <= 1 65 | ) S 66 | ' 67 | 68 | 69 | 70 | DECLARE 71 | @Id int = 0 72 | ,@FullName sysname 73 | ,@colDbname sysname 74 | 75 | 76 | WHILE 1 = 1 77 | BEGIN 78 | SELECT top 1 79 | @Id = Id 80 | ,@FullName = FullName 81 | ,@colDbname = DatabaseName 82 | FROM 83 | #Tables 84 | WHERE 85 | Id > @Id 86 | ORDER BY 87 | Id 88 | 89 | IF @@ROWCOUNT = 0 90 | BREAK; 91 | 92 | SET @sql = 'USE '+@colDbname+'; DBCC CHECKTABLE('''+@FullName+''') WITH NO_INFOMSGS'; 93 | 94 | IF @PhysicalOnly = 1 95 | SET @sql += ',PHYSICAL_ONLY' 96 | 97 | RAISERROR('Running on table %s, sql: %s',0,1,@FullName,@sql) WITH NOWAIT; 98 | exec(@sql); 99 | 100 | IF @@ERROR != 0 101 | RAISERROR(' Table corrupted: %s',0,1,@FullName) WITH NOWAIT; 102 | 103 | END 104 | 105 | 106 | -------------------------------------------------------------------------------- /Collations/CollationAsciiTable.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Collations/CollationAsciiTable.sql -------------------------------------------------------------------------------- /Collations/CollationPrecedence_CollationSensitity_Explained.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Collations/CollationPrecedence_CollationSensitity_Explained.sql -------------------------------------------------------------------------------- /Collations/PrefCollations_CodePage.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Traz um collation de cada Code Page! 8 | 9 | */ 10 | 11 | SELECT 12 | CodePage 13 | ,NAME 14 | ,COLLATIONPROPERTY(name,'ComparisonStyle') 15 | FROM 16 | ( 17 | SELECT 18 | NAME 19 | ,CodePage 20 | ,ROW_NUMBER() OVER(PARTITION BY CodePage ORDER BY Name) [Top] 21 | FROM 22 | ( 23 | SELECT 24 | NAME 25 | ,COLLATIONPROPERTY(Name,'CodePage') as CodePage 26 | FROM 27 | ( 28 | select 29 | NAME 30 | ,PATINDEX('%[_]CP%[_]%',NAME) as CPStart 31 | from 32 | fn_helpcollations() C 33 | WHERE 34 | C.name like '%pref%' 35 | ) C 36 | ) CINF 37 | ) CL 38 | WHERE 39 | CL.[Top] = 1 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /Collations/collate_playing_DrawLibrarySQL.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Collations/collate_playing_DrawLibrarySQL.sql -------------------------------------------------------------------------------- /Colunas/FirstColText.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Colunas/FirstColText.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/DescobrirFK-Sem-Indice.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/DescobrirFK-Sem-Indice.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/GEraCreateFk-AllFks.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/GEraCreateFk-AllFks.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/Gerar Creates e Drops de Fks.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/Gerar Creates e Drops de Fks.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/Informacoes de FKS.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/Informacoes de FKS.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/Tabelas Filhas.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/Tabelas Filhas.sql -------------------------------------------------------------------------------- /Constraints/Foreign Keys/fnGeraCreateFK.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Constraints/Foreign Keys/fnGeraCreateFK.sql -------------------------------------------------------------------------------- /Criptografia/demo-DesmistificandoCriptografia.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Criptografia/demo-DesmistificandoCriptografia.sql -------------------------------------------------------------------------------- /Database/EstimarUltimoUsoBase.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Essa foi um tentativa de uma query para estimar o último uso de banco de dados. 8 | O segredo aqui é usar a DMV sys.dm_db_index_usage_stats que contém um log com a última desde o restart. 9 | Então, a informação não é 100% precisa, mas serve apenas como um norte rápido para uma base que é muito usada. 10 | 11 | */ 12 | 13 | SELECT 14 | CURRENT_TIMESTAMP as CollectionTime 15 | ,DI.* 16 | ,SI.* 17 | FROM 18 | ( 19 | SELECT 20 | DB_NAME(US.database_id) as DatabaseName 21 | ,MAX(LACT.LastDate) as LastUse 22 | FROM 23 | sys.dm_db_index_usage_stats US 24 | OUTER APPLY 25 | ( 26 | SELECT 27 | MAX(ACT.ActionDate) as LastDate 28 | FROM 29 | ( 30 | SELECT US.last_user_lookup as ActionDate 31 | UNION ALL 32 | SELECT US.last_user_scan 33 | UNION ALL 34 | SELECT US.last_user_seek 35 | UNION ALL 36 | SELECT US.last_user_update 37 | ) ACT 38 | ) LACT 39 | GROUP BY 40 | DB_NAME(US.database_id) 41 | ) DI 42 | CROSS JOIN 43 | ( 44 | SELECT 45 | (SELECT create_date FROM sys.databases D WHERE d.name = 'tempdb') as ServerStartTime 46 | ) SI 47 | -------------------------------------------------------------------------------- /Database/LastGoodCheckDB.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Traz o último CHECKDB feito em cada banco de dados. 8 | DBCC DBINFO é um comando não documentado e, portanto, em alguma versão futura do SQL esse script pode não funcionar! 9 | Até o sql 2022, tudo certo. 10 | */ 11 | 12 | 13 | drop table if exists #dbccinfo; 14 | 15 | create table #dbccinfo( DbName nvarchar(1000), ParentObject varchar(200), Object varchar(1000), Field varchar(1000), Value varchar(1000) ) 16 | 17 | -- Poderia usar sp_Msforeach db aqui também. 18 | -- Não usei por cotna de um erro que tive em alguns testes com ela (provavelmente relacionado a algum caracter especial no nome do banco) 19 | set nocount on; 20 | declare @cmd nvarchar(max) = ( 21 | select 22 | N'USE '+quotename(d.name)+'; 23 | raiserror(''Collecting Db '+d.name+''',0,1) with nowait; 24 | insert into #dbccinfo(ParentObject, Object, Field, Value) 25 | exec(''dbcc dbinfo with tableresults,no_infomsgs'') 26 | update #dbccinfo set DBName = db_name() where DBName is null 27 | ' 28 | from 29 | sys.databases D 30 | where 31 | d.state_desc = 'ONLINE' 32 | for xml path,type 33 | ).value('.','nvarchar(max)') 34 | 35 | exec(@cmd); 36 | 37 | select * from #DbccInfo where Field like '%dbi_dbccLastKnownGood%' -------------------------------------------------------------------------------- /Database/SizePerDatabase.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Query rápida para trazer o tamanho real de todos os bancos da instância. 8 | No final, traz o tamanho de cada banco e o total da instância. 9 | Tudo em MB 10 | 11 | */ 12 | 13 | 14 | IF OBJECT_ID('tempdb..#TamanhoBancos') IS NOT NULL 15 | DROP TABLE #TamanhoBancos; 16 | 17 | CREATE TABLE 18 | #TamanhoBancos( Banco sysname, TamanhoTotalPag int, TamanhoUsadoPag int ); 19 | 20 | EXEC sp_MSforeachdb ' 21 | USE [?]; 22 | 23 | INSERT INTO #TamanhoBancos 24 | SELECT 25 | db_name() 26 | ,SUM(size) 27 | ,SUM(FILEPROPERTY(name,''SpaceUsed'')) 28 | FROM 29 | sys.database_files 30 | ' 31 | 32 | select 33 | Banco 34 | ,sum(TamanhoTotalPag)/128.0 as Total 35 | ,sum(TamanhoUsadoPag)/128.0 as Usado 36 | from #TamanhoBancos 37 | 38 | GROUP BY 39 | Banco with rollup 40 | ; 41 | 42 | -------------------------------------------------------------------------------- /Database/TamanhoTodosBancos_2000+.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Uma versão do tamanho de todos os bancos que também funciona no SQL 2000 8 | O segredo é trocar por sysfiles. 9 | 10 | */ 11 | 12 | 13 | IF OBJECT_ID('tempdb..#TamanhoBancos') IS NOT NULL 14 | DROP TABLE #TamanhoBancos; 15 | 16 | CREATE TABLE 17 | #TamanhoBancos( Banco sysname, TamanhoTotalPag int, TamanhoUsadoPag int ); 18 | 19 | EXEC sp_MSforeachdb ' 20 | USE [?]; 21 | 22 | 23 | INSERT INTO #TamanhoBancos 24 | SELECT 25 | DB_NAME() 26 | ,SUM(size) 27 | ,SUM(FILEPROPERTY(name,''SpaceUsed'')) 28 | FROM 29 | sysfiles 30 | ' 31 | 32 | SELECT 33 | @@SERVERNAME as ServerName 34 | ,banco 35 | ,ISNULL(SERVERPROPERTY('ComputerNamePhysicalNetBIOS'),SERVERPROPERTY('MachineName')) as ComputerName 36 | --,count(*) as DatabaseCount 37 | ,TamanhoTotalPag/128 as Total 38 | ,TamanhoUsadoPag/128 as Usado 39 | FROM 40 | #TamanhoBancos 41 | ORDER BY 42 | Usado DESc 43 | ; 44 | 45 | -------------------------------------------------------------------------------- /Database/UsageSizeInfo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Traz o tamanho e o último acesso (ESTIMADO) de cada banco. 8 | 9 | */ 10 | 11 | 12 | IF OBJECT_ID('tempdb..#TamanhoBancos') IS NOT NULL 13 | DROP TABLE #TamanhoBancos; 14 | 15 | CREATE TABLE 16 | #TamanhoBancos( Banco sysname, TamanhoTotalPag int, TamanhoUsadoPag int ); 17 | 18 | EXEC sp_MSforeachdb ' 19 | USE [?]; 20 | 21 | INSERT INTO #TamanhoBancos 22 | SELECT 23 | db_name() 24 | ,SUM(size) 25 | ,SUM(FILEPROPERTY(name,''SpaceUsed'')) 26 | FROM 27 | sys.database_files 28 | ' 29 | 30 | SELECT 31 | Instancia = @@SERVERNAME 32 | ,Banco 33 | ,TamTotal = TamanhoTotalPag/128.0 34 | ,DataCriacao = D.create_date 35 | ,UltimoUso = LU.LastUse 36 | FROM 37 | #TamanhoBancos TB 38 | JOIN 39 | sys.databases D 40 | ON D.name = TB.Banco COLLATE Latin1_General_CI_AI 41 | OUTER APPLY 42 | ( 43 | SELECT 44 | DBName = DB_NAME(IUS.database_id) 45 | ,LastUse = MAX(I.LastUse) 46 | FROM 47 | sys.dm_db_index_usage_stats IUS 48 | CROSS APPLY 49 | ( 50 | SELECT 51 | LastUse = MAX(U.LastUserOp) 52 | FROM 53 | ( 54 | SELECT IUS.last_user_seek 55 | UNION ALL 56 | SELECT IUS.last_user_scan 57 | UNION ALL 58 | SELECT IUS.last_user_lookup 59 | ) U(LastUserOp) 60 | WHERE 61 | U.LastUserOp is not null 62 | ) I 63 | WHERE 64 | I.LastUse IS NOT NULL 65 | AND 66 | DB_NAME(IUS.database_id) = TB.Banco COLLATE Latin1_General_CI_AI 67 | GROUP BY 68 | IUS.database_id 69 | ) LU -------------------------------------------------------------------------------- /DatabaseMail/DatabaseMail_CriarProfile_DBA.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Script de exemplo de como criar um profiler no database mail. 8 | Não foi eu quem criou ele. 9 | Provavelmente peguei de alguma interface que gera ou de alguma fonte na internet. 10 | 11 | 12 | */ 13 | 14 | 15 | ------------------------------------------------------------- 16 | -- Database Mail Simple Configuration Template. 17 | -- 18 | -- This template creates a Database Mail profile, an SMTP account and 19 | -- associates the account to the profile. 20 | -- The template does not grant access to the new profile for 21 | -- any database principals. Use msdb.dbo.sysmail_add_principalprofile 22 | -- to grant access to the new profile for users who are not 23 | -- members of sysadmin. 24 | ------------------------------------------------------------- 25 | 26 | DECLARE @profile_name sysname, 27 | @profile_description nvarchar(256), 28 | @account_name sysname, 29 | @SMTP_servername sysname, 30 | @email_address NVARCHAR(128), 31 | @display_name NVARCHAR(128); 32 | 33 | --- ALTERAR OS DADOS AQUI: 34 | 35 | -- Profile name. Replace with the name for your profile 36 | SET @profile_name = 'DBA'; 37 | SET @profile_description = 'Profile default para ser usado pelos scripts de monitoramento do DBA' 38 | 39 | -- Account information. Replace with the information for your account. 40 | 41 | SET @account_name = 'SQL Server'; 42 | SET @SMTP_servername = 'Servidor SMTP'; 43 | SET @email_address = 'Email do Remetente'; 44 | SET @display_name = 'Nome do Remetente'; 45 | 46 | 47 | 48 | 49 | 50 | -- DAQUI PRA FRENTE, NÃO PRECISA ALTERAR NADA!!!! 51 | 52 | 53 | -- Verify the specified account and profile do not already exist. 54 | IF EXISTS (SELECT * FROM msdb.dbo.sysmail_profile WHERE name = @profile_name) 55 | BEGIN 56 | RAISERROR('The specified Database Mail profile (%s) already exists.', 16, 1, @profile_name); 57 | GOTO done; 58 | END; 59 | 60 | IF EXISTS (SELECT * FROM msdb.dbo.sysmail_account WHERE name = @account_name ) 61 | BEGIN 62 | RAISERROR('The specified Database Mail account (%s) already exists.', 16, 1, @account_name) ; 63 | GOTO done; 64 | END; 65 | 66 | -- Start a transaction before adding the account and the profile 67 | BEGIN TRANSACTION ; 68 | 69 | DECLARE @rv INT; 70 | 71 | -- Add the account 72 | EXECUTE @rv=msdb.dbo.sysmail_add_account_sp 73 | @account_name = @account_name, 74 | @email_address = @email_address, 75 | @display_name = @display_name, 76 | @mailserver_name = @SMTP_servername; 77 | 78 | IF @rv<>0 79 | BEGIN 80 | RAISERROR('Failed to create the specified Database Mail account (%s): %d', 16, 1,@account_name,@rv) ; 81 | GOTO done; 82 | END 83 | 84 | 85 | 86 | -- Add the profile 87 | EXECUTE @rv=msdb.dbo.sysmail_add_profile_sp 88 | @profile_name = @profile_name 89 | ,@description = @profile_description 90 | ; 91 | 92 | IF @rv<>0 93 | BEGIN 94 | RAISERROR('Failed to create the specified Database Mail profile (%s): %d', 16, 1, @rv); 95 | ROLLBACK TRANSACTION; 96 | GOTO done; 97 | END; 98 | 99 | -- Associate the account with the profile. 100 | EXECUTE @rv=msdb.dbo.sysmail_add_profileaccount_sp 101 | @profile_name = @profile_name, 102 | @account_name = @account_name, 103 | @sequence_number = 1 ; 104 | 105 | IF @rv<>0 106 | BEGIN 107 | RAISERROR('Failed to associate the speficied profile (%s) with the specified account (%s): %d', 16, 1, @profile_name,@account_name,@rv) ; 108 | ROLLBACK TRANSACTION; 109 | GOTO done; 110 | END; 111 | 112 | COMMIT TRANSACTION; 113 | 114 | done: 115 | 116 | GO -------------------------------------------------------------------------------- /DatabaseMail/DatabaseMail_TestEmail.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Script para testar o envio de mail usando database mail. 8 | O teste é um simples envio. Ajuste o nome do profile e o email nos parâmetros abaixo. 9 | 10 | 11 | */ 12 | 13 | IF OBJECT_ID('msdb.dbo.sp_send_dbmail') IS NULL 14 | RETURN; 15 | 16 | DECLARE @HtmlFinal nvarchar(4000); 17 | 18 | SET @HtmlFinal = N' 19 | Este email foi enviado como um teste do servidor: '+CONVERT(nvarchar(500),@@SERVERNAME)+', 20 | ' 21 | 22 | EXEC msdb.dbo.sp_send_dbmail 23 | @profile_name = 'Nome do profile' 24 | ,@recipients = 'COLOQUE O EMAIL DE DESTINO AQUI' 25 | ,@subject = N'TESTE DATABASE MAIL' 26 | ,@body = @HtmlFinal 27 | ,@body_format = 'HTML' 28 | 29 | 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/powershell:ubuntu-22.04 2 | 3 | RUN apt update 4 | RUN apt install -y git 5 | 6 | RUN pwsh -NonInteractive -Command "Install-Module -Force powershai" 7 | 8 | WORKDIR /sqlserverlib-search 9 | COPY . . -------------------------------------------------------------------------------- /Dumps/LastDumps.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descricao 7 | Retorna os crash dumps que ocorreram na instância e o intervalo, em dias, entre eles. 8 | Usei muito especialmente com os dumps do sql 2019, onde precisava acompanhar o tempo que ficavm sem o dump após alguma ação. 9 | Quando saia um CU, eu atualizava o SQL e ficava de olho nessa query para saber se o dumps frequentes paravam de acontecer. 10 | Eu acompanhava pelos "DaysPassed" para saber o tempo que ficou sem dump e saber se tinhamos um novo recorde, o q poderia indicar correção. 11 | 12 | 13 | 14 | */ 15 | 16 | select 17 | d.* 18 | ,DaysPassed = DATEDIFF(dd,A.creation_time,D.creation_time) 19 | ,a.creation_time 20 | From 21 | ( 22 | SELECT 23 | d.filename 24 | ,d.creation_time 25 | FROM 26 | sys.dm_server_memory_dumps D 27 | 28 | union all 29 | 30 | SELECT 31 | NULL 32 | ,GETDATE() 33 | ) d 34 | CROSS APPLY ( 35 | SELECT TOP 1 36 | * 37 | FROM 38 | sys.dm_server_memory_dumps Da 39 | where 40 | da.creation_time < D.creation_time 41 | ORDER BY 42 | DA.creation_time DESC 43 | ) A 44 | order by 45 | DaysPassed desc -------------------------------------------------------------------------------- /Endpoints/endpoints.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Endpoints/endpoints.sql -------------------------------------------------------------------------------- /Jobs/JobHistoryStatistics.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Obtém a quantidade de histórico (em linhas) por job! 8 | Já usei para achar qual job estava mais ocupando espaço nos logs 9 | 10 | */ 11 | SELECT 12 | JH.name 13 | ,JHS.* 14 | FROM 15 | ( 16 | SELECT 17 | job_id 18 | ,LogCount = count(*) 19 | FROM 20 | msdb..sysjobhistory 21 | GROUP BY 22 | job_id 23 | ) JHS 24 | INNER JOIN 25 | msdb..sysjobs JH 26 | ON JH.job_id = JHS.job_id 27 | ORDER BY 28 | LogCount desc -------------------------------------------------------------------------------- /Jobs/JobsXProxy.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Jobs/JobsXProxy.sql -------------------------------------------------------------------------------- /Jobs/obter_ultimas_falhas.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Obtém informacoes dos jobs, cujo última execução foi falha. 8 | ATENÇÃO: Esse script pode demorar um pouco e dependendo da quantidade de histórico, pode causar alguma pressão no seu ambiente. 9 | Acredito que tenha um espaço para otimizações e não me lembro exatamente o porquê fiz usando row_number com partition. 10 | 11 | */ 12 | SELECT 13 | CONVERT(varchar(150),J.name) as NomeJOB 14 | ,CONVERT(varchar(150),j.step_name) as NomeSTEP 15 | ,CONVERT(datetime,j.DataStart) as DataStart 16 | ,CONVERT(varchar(8),J.Duracao) as Duracao 17 | ,CONVERT(varchar(500),STUFF(S.agendamentos,1,3,'')) as Agendamentos 18 | ,CONVERT(int,J.run_status) as run_status 19 | FROM 20 | ( 21 | SELECT 22 | * 23 | ,ROW_NUMBER() OVER(PARTITION BY J.name ORDER BY J.DataStart DESC) Rn 24 | FROM 25 | ( 26 | SELECT 27 | J.name 28 | ,J.job_id 29 | ,JH.step_name 30 | ,CONVERT(DATETIME, CONVERT(CHAR(8), run_date, 112) + ' ' 31 | + STUFF(STUFF(RIGHT('000000' + CONVERT(VARCHAR(8), run_time), 6), 5, 0, ':'), 3, 0, ':'), 121 ) DataStart 32 | ,STUFF(STUFF(RIGHT('000000'+CONVERT(varchar(8),JH.run_duration),6),3,0,':'),6,0,':') as Duracao 33 | ,JH.message 34 | ,JH.run_status 35 | FROM 36 | msdb.dbo.sysjobhistory JH 37 | INNER JOIN 38 | msdb.dbo.sysjobs J 39 | ON J.job_id = JH.job_id 40 | WHERE 41 | JH.step_id <> 0 42 | ) J 43 | ) J 44 | OUTER APPLY ( 45 | 46 | SELECT 47 | ' | '+S.name as 'data()' 48 | FROM 49 | msDb.dbo.sysjobschedules JS 50 | JOIN 51 | msdb.dbo.sysschedules S 52 | ON S.schedule_id = JS.schedule_id 53 | WHERE 54 | JS.job_id = J.job_id 55 | FOR XML PATH('') 56 | ) S(agendamentos) 57 | 58 | WHERE 59 | J.Rn = 1 60 | AND 61 | J.run_status = 0 62 | ORDER BY 63 | DataStart DESC -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Misc/Fuckill.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Isso script foi uma leve brinacadeira para responder o post do Erik Darlking 8 | Eu mantive ele aqui apenas pela ideia de você pode executar coisas no proprio servidor, em outra sessao, apenas com t-sql 9 | Nao me lembro onde isso possa ser útil no dia a dia, mas, vai que tem algum maluco ai! 10 | 11 | Post do Erik: 12 | https://www.linkedin.com/posts/erik-darling-data_mind-your-business-sql-activity-7300627709641015297-OCqo 13 | */ 14 | 15 | declare @Me varchar(10) = @@spid 16 | declare @killMe varchar(100) = 'kill '+@me; 17 | 18 | -- exec sp_dropserver 'Fuckill' 19 | EXEC sp_addlinkedserver @server = N'Fuckill',@srvproduct=N'' 20 | ,@provider=N'SQLNCLI11',@datasrc=@@SERVERNAME,@catalog=N'master' 21 | exec sp_serveroption 'Fuckill',N'rpc','true'; 22 | exec sp_serveroption 'Fuckill',N'rpc out','true'; 23 | EXEC sp_addlinkedsrvlogin N'Fuckill', @locallogin = NULL , @useself = N'True', @rmtuser = N'' 24 | 25 | select @@spid 26 | exec(@killMe) at Fuckill 27 | 28 | -------------------------------------------------------------------------------- /Misc/HighQueryCompilationTime.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Misc/HighQueryCompilationTime.sql -------------------------------------------------------------------------------- /Misc/I-Like-Memory.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Misc/I-Like-Memory.sql -------------------------------------------------------------------------------- /Misc/NotIn-Null.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | O objetivo desse código é mostrar como o NULL pode ser prejudicial ao ser usado em filtros, especialmente NOT IN! 8 | 9 | */ 10 | 11 | 12 | drop table if exists #vendas; 13 | create table #vendas ( 14 | DataVenda datetime 15 | ,TotalVenda decimal(10,5) 16 | ,ip varchar(100) 17 | ) 18 | 19 | drop table if exists #regras; 20 | create table #regras ( 21 | Hora int 22 | ,ip varchar(100) 23 | ,email varchar(100) 24 | ,dns varchar(100) 25 | ,username varchar(100) 26 | ) 27 | 28 | insert into #vendas 29 | values 30 | ('20240101 15:41',1500,'1.2.3.4') 31 | ,('20240101 16:41',1500,'1.2.3.4') 32 | ,('20240102 16:00',3000,'4.5.6.7') 33 | ,('20240102 15:39',150,'1.1.1.1') 34 | ,('20240102 15:39',3000,'2.2.2.2') 35 | 36 | insert into #regras(Hora,ip,email,dns,username) 37 | values 38 | (15,'1.1.1.1',null,null,null) 39 | ,(16,null,null,'*.test',null) 40 | 41 | select 42 | Dia = convert(date,DataVenda) 43 | ,TotalVenda = sum(TotalVenda) 44 | from 45 | #vendas v 46 | where 47 | v.ip not in ( 48 | select b.ip from #regras b 49 | where b.Hora = datepart(hh,DataVenda) 50 | ) 51 | group by 52 | convert(date,DataVenda) 53 | 54 | -- fix 1: is not null 55 | select 56 | Dia = convert(date,DataVenda) 57 | ,TotalVenda = sum(TotalVenda) 58 | from 59 | #vendas v 60 | where 61 | v.ip not in ( 62 | select b.ip from #regras b 63 | where b.Hora = datepart(hh,DataVenda) 64 | and b.ip is not null 65 | ) 66 | group by 67 | convert(date,DataVenda) 68 | 69 | -- fix 2: isnull 70 | select 71 | Dia = convert(date,DataVenda) 72 | ,TotalVenda = sum(TotalVenda) 73 | from 74 | #vendas v 75 | where 76 | v.ip not in ( 77 | select isnull(b.ip,'') from #regras b 78 | where b.Hora = datepart(hh,DataVenda) 79 | ) 80 | group by 81 | convert(date,DataVenda) 82 | 83 | -- fix 3: not exists 84 | select 85 | Dia = convert(date,DataVenda) 86 | ,TotalVenda = sum(TotalVenda) 87 | from 88 | #vendas v 89 | where 90 | not exists ( 91 | select * from #regras b 92 | where b.Hora = datepart(hh,DataVenda) 93 | and v.ip = b.ip 94 | ) 95 | group by 96 | convert(date,DataVenda) 97 | 98 | -- fix 4: ANSI_NULLS (dificlmente voce vai querer mexer nisso, até pq é deprecated! Mas tá ai so pra saber!) 99 | SET ANSI_NULLS OFF; 100 | GO 101 | 102 | select 103 | Dia = convert(date,DataVenda) 104 | ,TotalVenda = sum(TotalVenda) 105 | from 106 | #vendas v 107 | where 108 | v.ip not in ( 109 | select b.ip from #regras b 110 | where b.Hora = datepart(hh,DataVenda) 111 | ) 112 | group by 113 | convert(date,DataVenda) 114 | GO 115 | 116 | SET ANSI_NULLS ON; 117 | go 118 | 119 | 120 | 121 | 122 | -------------------------------------------------------------------------------- /Misc/ProcurarTexto.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Procura uma string em todas as colunas de texto do banco atual. 8 | IMPORTANTE: Conecte-se como DAC, pois ele irá buscar em tabelas internas acessíveis apenas como DAC! 9 | IMPORANTE 2: O script pode causar uma enorme pressão e tablescan. 10 | 11 | Eu uso bastante esse script para descorbir onde o SQL Server pode guardar alguma informação de metadado. 12 | E também já usei pra ajudar clientes a descobriem em qual tabela um determinado valor está. 13 | O mais seguro é rodar em backup em um ambiente de testes, mas eu já rodei em produção, sabendo dos riscos e monitorando. 14 | Cada caso é um caso, e eu só recomendo você rodar em produção se tiver experência e conhecimento do ambiente. 15 | */ 16 | DECLARE 17 | @PalavraProcurar nvarchar(max) = '' --> COLOQUE AQUI O TRECHO DA STRING QUE DESJA PESQUISAR 18 | DECLARE 19 | @CMD NVARCHAR(MAX); 20 | 21 | SELECT 22 | @CMD = REPLACE('$'+dado,'$UNION ALL','') 23 | FROM 24 | ( 25 | SELECT 26 | 'UNION ALL SELECT top 1 convert(varchar(8000),'+quotename(C.NAME)+') collate LATIN1_GENERAL_CI_AI AS Valor '+ 27 | ',convert(VARCHAR(500),'+QUOTENAME(C.name,'''')+' ) collate LATIN1_GENERAL_CI_AI AS coluna '+ 28 | ',convert(VARCHAR(500),'+QUOTENAME(object_schema_name(C.object_id)+'.'+object_name(C.object_id),'''')+' ) collate LATIN1_GENERAL_CI_AI AS tabela '+ 29 | ',DB_NAME() collate LATIN1_GENERAL_CI_AI AS banco '+ 30 | 31 | 'FROM '+quotename(object_schema_name(C.object_id))+'.'+quotename(object_name(C.object_id))+ 32 | ' WHERE CONVERT(nvarchar(max),'+quotename(C.name)+') collate LATIN1_GENERAL_CI_AI like '+quotename('%'+@PalavraProcurar+'%','''')+' collate LATIN1_GENERAL_CI_AI ' as 'text()' 33 | FROM 34 | sys.all_columns C 35 | inner join 36 | sys.all_objects T 37 | on T.object_id = C.object_id 38 | and T.type_desc IN ('USER_TABLE','SYSTEM_TABLE','INTERNAL_TABLE') 39 | WHERE 40 | type_name(C.system_type_id) IN ('char','text','nvarchar','varchar','ntext','sql_variant') 41 | for xml path('') 42 | ) T(dado) 43 | 44 | print @cmd 45 | exec(@CMD) -------------------------------------------------------------------------------- /Misc/sp.ForceOledbWait.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Misc/sp.ForceOledbWait.sql -------------------------------------------------------------------------------- /Modulos/DependencyChain.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Esse script já me ajudou muitas e muitas vezes para achar as dependências de um objet, como view, procedure, etc. 8 | Você passa o nome da view, e então ele usa a DMV sys.sql_expression_dependencies recursivamente para achar todas as dependências. 9 | O legal, é que ele acha as dependências em outros bancos, então é bem útil. 10 | 11 | As colunas retornadas são: 12 | referenced_id = object_id da dependencia 13 | ReferencedObject = nome do objeto referenciado (sem o esquema) 14 | type_desc = Tipo do objeto dependente 15 | RefLevel = O nível de dependência. 1 é dependêcnia direta (está direto no código do objeto que você colocou). 16 | 2, ele encontrou em um dos obejtos de nível 1 17 | 3, el encontrou em um dos objetos de nível 2 e por aí vai! 18 | RefChain = Uma representação visual da dependência, partindo do objeto que você passou até este atual. 19 | Com isso você consegue ver todos os objetos referenciados até chegar neste da linha respectiva. 20 | 21 | */ 22 | 23 | 24 | ;WITH Depends AS ( 25 | SELECT 26 | E.referencing_id 27 | ,E.referenced_id 28 | ,E.referenced_database_name 29 | ,ReferencedObject = CONVERT(nvarchar(1000),ISNULL(E.referenced_server_name+'.','')+ISNULL(E.referenced_database_name+'.','')+ISNULL(E.referenced_schema_name+'.','')+ISNULL(E.referenced_entity_name,'')) 30 | ,RefLevel = CONVERT(bigint,1) 31 | ,RefChain = CONVERT(nvarchar(max), OBJECT_NAME(E.referencing_id)+'->'+E.referenced_entity_name ) 32 | FROM 33 | sys.sql_expression_dependencies E 34 | WHERE 35 | E.referencing_id = OBJECT_ID('schema.NomeTabela') 36 | 37 | 38 | UNION ALL 39 | 40 | SELECT 41 | E.referencing_id 42 | ,E.referenced_id 43 | ,E.referenced_database_name 44 | ,ReferencedObject = CONVERT(nvarchar(1000),ISNULL(E.referenced_server_name+'.','')+ISNULL(E.referenced_database_name+'.','')+ISNULL(E.referenced_schema_name+'.','')+ISNULL(E.referenced_entity_name,'')) 45 | ,RefLevel = CONVERT(bigint,D.RefLevel + 1) 46 | ,RefChain = CONVERT(nvarchar(max), D.RefChain+'->'+E.referenced_entity_name ) 47 | FROM 48 | Depends D 49 | INNER JOIN 50 | sys.sql_expression_dependencies E 51 | ON E.referencing_id = D.referenced_id 52 | WHERE 53 | E.referenced_minor_id = 0 54 | 55 | ) 56 | SELECT 57 | D.referenced_id 58 | ,D.ReferencedObject 59 | ,O.type_desc 60 | ,D.RefLevel 61 | ,D.RefChain 62 | FROM 63 | Depends D 64 | LEFT JOIN 65 | sys.objects O 66 | ON (D.referenced_database_name IS NULL OR D.referenced_database_name = DB_NAME()) 67 | AND D.referenced_id = O.object_id 68 | ORDER BY 69 | D.RefLevel 70 | -------------------------------------------------------------------------------- /Modulos/ProcuraEmTodosModulos.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rrg92/sqlserver-lib/d1f734b921061ce26abc290bf0404f5f67d0dcab/Modulos/ProcuraEmTodosModulos.sql -------------------------------------------------------------------------------- /PowerAlerts/ErrorInfo.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | 8 | Obtém informações sobre erros que ocorreram no banco! 9 | O Power Alerts coleta os erros periodicamente. 10 | O Job é o 'PowerRoutine - Load XEvent Database Error' 11 | Se o erro que você está procurando ocorreu recentemente, importante rodar o job para garantir que ele coletou o mais recente. 12 | Por padrao, esse job roda somente na madrugada para minimizar impactos. 13 | Ao rodar no meio de expediente, monitore a execucao para garantir que ele nao estej atrapalhando algum outro processo, pois, 14 | dependendo da quantidade de erros gerados, ele pode consumir um recurso de cpu significativo. 15 | 16 | */ 17 | USE Traces --> Geralmente o Power Alerts é instalado num banco chamado Traces 18 | GO 19 | 20 | --> Primeiro identifica o id (LogId) do erro 21 | SELECT 22 | * 23 | FROM 24 | PowerRoutine_Log_DB_Error 25 | where 26 | err_timestamp >= DATEADD(HH,-24,GETDATE()) --> Ajuste o filtro conforme queria 27 | 28 | --> Traduz a stack de um erro (se aconteceu em procedure, que está de outras, etc.) 29 | -- isso aqui traz exatamente onde ocorreu (incluindo a linha na proc ou batch original)! 30 | select 31 | Query = Stmt 32 | ,ProcName 33 | ,NumLinha = FrameLine 34 | from 35 | vwPowerRoutine_DbErrorFrames 36 | where 37 | LogId = 4725078 --> Obter o LogId da query acima! 38 | order by 39 | LogId,FrameLevel desc -------------------------------------------------------------------------------- /PowerAlerts/README.md: -------------------------------------------------------------------------------- 1 | # Scripts do Power Alerts 2 | 3 | O [Power Alerts] é uma solução de monitoramento escrito em T-SQL pela [Power Tuning]. 4 | São mais de 80 mil linhas de código e vários alertas para monitorar o máximo de itens de uma instância T-SQL! 5 | 6 | Este diretório contém scripts que eu fui usando (ou uso até hoje) em minhas análises em instâncias que possuem o Power Alerts. 7 | 8 | Os scritps aqui não são, necessariamente, padrões da Power Tuning, e são scripts que eu, enquanto DBA de um ambiente com esta solução, usei para me ajudar a encontrar algo. 9 | 10 | O Power Alerts, além de alertar quando há um problema, também faz uma série de coletas! 11 | Então, diante de um problema, saber consultar essas informações podem ajudar o DBA a identificar problemas e responder muito mais rapidamente a incidentes! 12 | 13 | ![Power Alerts](https://poweralerts.com.br/wp-content/uploads/2023/08/Power-Alerts-Alerta-Log-Full.png) 14 | ![Power Alerts](https://poweralerts.com.br/wp-content/uploads/2023/09/Power-Reports.png) 15 | 16 | # Um pouco mais sobre o Power Alerts 17 | 18 | O Power Alerts começou como scripts sql gratuitos que o [Fabrício Lima] criou para ajudar na consultoria dele, há mais de 10 anos. 19 | Ele viu que empresas, principalmente de pequeno porte, eram muito mais abertas a uma solução de monitoramento simples, em que era só chegar e "dar um F5" no SQL para começar a monitorar, ao invés de ter que prover infraestrutura, servidor, etc. 20 | Então, aqueles scrits viraram a principal ferramenta de monitoramento de muitas empresas com SQL Server pelo Brasil! 21 | 22 | A ideia é simples: Instale no seu SQL Server e ele começa a coletar várias coisas e monitora diversos itens, enviando alertas por e-mail quando algo atinge o threshold. 23 | 24 | A versão 3, o qual chamados de Power Alerts v3, manteve essa mesma essência, ao mesmo tempo que adicionamos muito mais elementos e rotinas, o que ajudou a fazer dessa ferramenta essencial para quem tem SQL Server! 25 | Hoje, o Power Alerts, além de monitorar diversos aspectos, realiza várias coletas! 26 | Quem é DBA há algum tempo, sabe o ouro que é você ter coletas periodicas: queries, erros, tamanho, etc. 27 | E a cada nova versão, podem surgir novas rotinas que ajudam a trazer mais e mais informações! 28 | O Power Alerts é completo assim porque ele é uma solução criada e mantida por DBA para DBAs... 29 | Portanto, ele vai atingir em cheio as dores de um ambiente SQL. 30 | 31 | Uma outra grande vantagem do Power Alerts ser centralizado no T-SQL, é a portabilidade e flexibilidade. 32 | Todos os dados que são gerados no e-mail ficam em tabelas, que qualquer DBA pode consultar a sua livre disposição. 33 | É só pensar nas DMVs do SQL: O queria do DBA sem essas DMVs para que ele possa fazer comandos em cima delas e criar o monitoramento? 34 | O Power Alerts extende isso, trazendo os dados relevantes dessa DMV para consulta histórica! 35 | E, como estão em tabelas, você consegue ler isso e disponibilizar em qualquer aplicação que consiga se conecta com um SQL Server! 36 | 37 | Eu, Rodrigo, pude participar da elaboração do core dessa versão, onde trouxemos diversas melhorias de performance e funcionalidades. 38 | Uma delas, é a possibilidade de enviar gráficos diretamente do SQL, complementando os relatórios e alertas gerados pela ferramenta. 39 | 40 | Por falar em email, sim, é assim que o Power Alerts notifica usuários: e-mail. 41 | 42 | Mas Rodrigo, em pleno 2025, vocês ainda enviam e-mail? 43 | Claro! quem não recebe e-mail hoje em dia? Assim como o próprio SQL Server é uma tecnologia "antiga", mas bastante usada, o email tem todo o seu potencial. 44 | É simples, extremamente compatível e você pode acessar de qualquer dispositivo. 45 | 46 | Graças a isso, qualquer empresa com apenas um SQL, já consegue ter acesso a um monitoramento mínimo e de qualidade, rica em informações textuais e visuais, apenas com um F5. 47 | Empresas grandes, que querem algo mais elaborado, podem ter acesso a um dashboard do PowerBI, e em algum momento, o Power Alerts também terá integrações com outras ferramentas, como Grafana. 48 | O fato é que o Power Alerts é para qualquer ambiente, do pequeno ao grande, ele vai ajudar o DBA a ser mais proátivo e ainda o auxilia quando preciar ser reativo! 49 | 50 | ## Power Alerts vs Zabbix 51 | 52 | Falando em Grafana... o Power Alerts não é um concorrente do seu zabbix (ou similiar), ok? 53 | Ele é um complemento! Imagine que o Power Alerts seja o plugin que seu zabbix precisa para ter o monitoramento mais completo de SQL. 54 | 55 | Geralmente, os templates do zabbix são bem padrões, a menos que você tenha gastado horas custmizado pra sua empresa. 56 | Os Admins de Zabbix (e infra em geral), tem vários problemas para se preocupar e diferentes tipos de ativos para monitorar, obviamente, não tem a expertise de um DBA para monitorar os itens certos. 57 | Eu digo isso com tranquilidade pois já trabalhei em uma multi nacional em que usamos Zabbix para monitorar o SQL e eu fui o responsável por customizar esse monitoramento e contornar as diversas barreiras que o Zabbix criava! 58 | Inclusive, através desse monitoramento, gerávamos relatórios sensacionais, como KPIs específicos pro SQL, graças a essa customizações associadas ao poder do Zabbix. 59 | 60 | Com o Power Alerts, você consegue ter um "agente" dentro do seu SQL coletando e gerando métricas. 61 | Essas coletas podem ser enviadas ao zabbix. 62 | Além disso, a estrutura padrão de alertas do Power Alerts, foi pensada justamente para ser facilmente adaptada ao Zabbix. 63 | O Power Alerts é orientado a alertas... Cada alerta possui severidade (familiar?). 64 | Essa simples estrutura permite que você use o Power Alerts com o seu Zabbix! 65 | 66 | Em algum momento, a Power Tuning deve disponibilizar algo relacionado ao zabbix, e digo isso porque eu fui encarregado de fazer essa ponto. 67 | Estou na fase de pesquisa e testes para trazer algo bem bacana! 68 | 69 | 70 | [Power Alerts]: https://poweralerts.com.br 71 | [Power Tuning]: https://powertuning.com.br 72 | [Fabrício Lima]: https://www.fabriciolima.net/blog/ 73 | 74 | 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Progresso](https://progress-bar.xyz/21/?width=200&title=105%2F495%20scripts) 2 | # SQL Server Lib 3 | 4 | > [!TIP] 5 | > **NOVIDADE**: Pesquise o melhor script para sua necessidade com IA aqui: https://huggingface.co/spaces/rrg92/sqlserver-lib-assistant 6 | > Basicamente esse GitHub está indexado para que uma IA possa te ajudar a achar o que precisa! 7 | > É só descrever o que precisa! 8 | > Lembrando que isso é uma v1, então os resultados podem não ser precisos, e depende se o repo tem um script que tem relação com o assunto! 9 | > Em breve faço um post explicando como tudo isso funciona! 10 | 11 | Olá! Eu sou DBA SQL Server há mais de 10 anos e se tem uma coisa que me ajudou muito nessa jornada foi ter sempre um script pronto para me ajudar a investigar problemas! 12 | Sem isso, seria muito mais difícil responder rápido a muitos incidentes, salas de crises e qualquer outro problema que tive com SQL Server. 13 | 14 | Como a comunidade técnica me ajudou a construir isso, através de blogs que li, fóruns, dicas, etc., eu acho justo compartilhar toda essa biblioteca que eu montei ao longo dos anos! 15 | 16 | São mais de 490 scripts (e crescendo), e tem de tudo: desde scripts simples para estressar a CPU até scripts que consultam informações mais avançadas. 17 | ![image](https://github.com/user-attachments/assets/24534a25-c297-4652-85d1-017485112ef9) 18 | 19 | --- 20 | 21 | Porém, antes de publicar, eu preciso revisar cada script. Tirar algum filtro que ficou com alguma info sensível, etc. 22 | Eu poderia fazer isso com IA, mas resolvi fazer melhor: Toda semana vou escolher um ou um grupo de scripts, revisar, adicionar alguns comentários e publicar aqui. 23 | E, devo postar algo no [blog TheSqlTimes](https://thesqltimes.com) ou no [linkedin] falando um pouco mais sobre os scripts. 24 | 25 | --- 26 | 27 | Vou fazer isso até que tudo esteja sincronizado aqui, e após isso, é só manter e adicionar novos (pois até hoje eu adiciono ou ajusto algum script desse diretório). 28 | E, o mais legal é que você pode me ajudar a manter isso também: Se tiver alguma correção, ideia, etc., fique a vontade para submeter um Pull Request, que eu reviso. 29 | 30 | [linkedin]: https://www.linkedin.com/in/rodrigoribeirogomes/ 31 | 32 | 33 | # Dicas (Awesome SQL Server) 34 | 35 | Eu vou aproveitar este repositório e deixar aqui uma série de links de referência e dicas de ferramentas (fazer algo parecido com as `awersome list do github`). 36 | 37 | > [!NOTE] 38 | > Você pode submeter um PR alterando com alterações sugeridas nessa lista 39 | > Irei revisar cada modificação sugerida 40 | 41 | 42 | ## Ferramentas 43 | 44 | - [SQL Server Management Studio](https://learn.microsoft.com/en-us/sql/ssms/download-sql-server-management-studio-ssms?view=sql-server-ver16)(SSMS) 45 | - [SQL Server Developer/Evaluation](https://www.microsoft.com/en-us/sql-server/sql-server-downloads) 46 | 47 | 48 | ## Conteúdo 49 | 50 | - [Blog Oficial SQL Server](https://www.microsoft.com/en-us/sql-server/blog/) 51 | - [Blog Oficial DEV Azure SQL](https://devblogs.microsoft.com/azure-sql/) 52 | - [Hub de Blogs pt-BR](https://mssqlserver.com.br/) 53 | - [Dirceu Resende](https://dirceuresende.com.br) 54 | - [The SQL Times](https://thesqltimes.com) 55 | - [Fabrício Lima](https://www.fabriciolima.net/blog/) 56 | - [Luiz Lima](https://luizlima.net/luiz-vitor-foto-blog/) 57 | - [Power Tuning](https://powertuning.com.br/blog/) 58 | - [SQL Server Version List](https://sqlserverbuilds.blogspot.com/) 59 | - [Porto SQL](https://portosql.wordpress.com/) 60 | - [Diego Nogare](https://diegonogare.net/) 61 | - [Reginaldo Silva](https://blogdojamal.wordpress.com/) 62 | - [Simple Talk](https://www.red-gate.com/simple-talk/) 63 | - [SQL Server Execution Plan Reference](https://sqlserverfast.com/epr/) 64 | - [Éder Lelis](https://ederlelis.com.br/blog/) 65 | - [Edvaldo Castro](https://edvaldocastro.com/) 66 | - [Gustavo Maia](https://gustavomaiaaguiar.wordpress.com/) 67 | - [Blog do Luti](https://luticm.blogspot.com/) 68 | -------------------------------------------------------------------------------- /Schemas/ChangeSchemaOwners.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Descrição 7 | Script simpels para gerar o comando de ALTER OWNER dos schemas! 8 | 9 | */ 10 | 11 | USE master 12 | GO 13 | 14 | IF OBJECT_ID('tempdb..#SchemasOwners') IS NOT NULL 15 | DROP TABLE #SchemasOwners; 16 | 17 | --> Query apenas para gerar a tabela temporaria com a estrutura abaixo. 18 | SELECT 19 | S.name as NomeS 20 | ,P.name as NomeP 21 | ,DB_NAME() as Banco 22 | INTO 23 | #SchemasOwners 24 | FROM 25 | sys.schemas S 26 | JOIN 27 | sys.database_principals P 28 | ON P.principal_id = S.principal_id 29 | WHERE 30 | 1 = 2 31 | 32 | EXECUTE sp_MSforeachdb ' 33 | USE ?; 34 | 35 | INSERT INTO 36 | #SchemasOwners 37 | SELECT 38 | S.name 39 | ,P.name 40 | ,DB_NAME() 41 | FROM 42 | sys.schemas S 43 | JOIN 44 | sys.database_principals P 45 | ON P.principal_id = S.principal_id 46 | WHERE 47 | P.principal_id > 4 48 | AND 49 | P.is_fixed_role = 1 50 | ' 51 | 52 | SELECT 53 | 'USE '+SO.Banco+';ALTER AUTHORIZATION ON SCHEMA::'+QUOTENAME(SO.NomeS)+' TO '+QUOTENAME(SO.NomeS)+';' 54 | FROM 55 | #SchemasOwners SO 56 | WHERE 57 | SO.NomeP <> SO.NomeS; 58 | 59 | -------------------------------------------------------------------------------- /SqlLibEmbeddings/README.md: -------------------------------------------------------------------------------- 1 | # Embeddings do SQL Lib 2 | 3 | Aproveitando o boom e estudos de AI, resolvi indexar todos esses scripts para que seja fáceis de ser procurados por AI. 4 | Todos os scripts pertinentes a esse projeto, irei colocar aqui nesse diretório, o que servirá como guia! 5 | 6 | Como é um projeto público, eu optei por tentar usar o máxio de recursos free ou barato possível. 7 | Portanto, a estrutura do projeto é a seguinte: 8 | 9 | - Irei usar o Azure SQL database, que já tem um suporte de vector (tentei usar o FREE Offer mas já nos testes expirou, então peguei por DTU). 10 | - Sempre que este repositório for modificado, ele irá disparar um github action para atualizar o banco 11 | - Um Space no Hugging Face vai me permitir consultar os scripts, conforme texto do usuário, usando algum serviço de LLM. 12 | 13 | Com isso, eu consigo usar 100% de tecnologias com muito baixo custo e mantenho todo o código público! 14 | 15 | Este diretório contém todos os scripts SQL que irei uar no Azure SQL Database 16 | 17 | # Estrutura do Banco 18 | 19 | O banco terá uma tabela chamada Scripts, que irá conter todo o conteúdo dos scrips gerados. 20 | Junto com o conteúdo, irei armazenar o caminho relativo ao root do projeto no GitHub. 21 | E, para finalizar, uma coluna com os embeddings será usada para calcular os embeddings dos scripts. 22 | 23 | Com isso, conseguiremos pesquisar usando os recuross de vector do sql! 24 | Como ó esperado é menos de 1000 linhas, o que é relativamente pouco, então, o sql deve atender bem! 25 | 26 | 27 | Todo o código e ferramenta da pesquisa estão nesse space do Hugging Face: https://huggingface.co/spaces/rrg92/sqlserver-lib-assistant 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /SqlLibEmbeddings/SqlOperations.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | Author: Rodrigo Ribeiro Gomes (github @rrg92) 3 | Free to use, but MUST keep this header and reference. 4 | #> 5 | 6 | 7 | function SqlClient { 8 | param( 9 | $SQL 10 | ,$ServerInstance = $Server 11 | ,$Database = $Database 12 | ,$AppName = $DefaultSqlAppName 13 | ,$User = $DefaultSqlUser 14 | ,$Password = $DefaultSqlPassword 15 | ,$connection = $null 16 | ,[switch]$AllResults 17 | ,[switch]$KeepConnection 18 | ,[switch]$NoPooling 19 | ) 20 | 21 | $ErrorActionPreference = "Stop"; 22 | 23 | 24 | 25 | if($connection){ 26 | if($connection -isnot [Data.SqlClient.SqlConnection]){ 27 | throw "value informed in -Connection parameter is not a sqlconnection object" 28 | } 29 | 30 | $NewConex = $connection 31 | } else { 32 | $AuthString = "Integrated Security=True"; 33 | 34 | if($User){ 35 | 36 | if($User -is [pscredential]){ 37 | $User = $User.GetNetworkCredential().UserName 38 | $Password = $User.GetNetworkCredential().Password 39 | } 40 | 41 | $AuthString = @( 42 | "User Id=$User" 43 | "Password=$Password" 44 | ) 45 | } 46 | 47 | $ConnectionStringParts = @( 48 | "Server=$ServerInstance" 49 | "Database=$database" 50 | $AuthString 51 | "APP=$DefaultSqlAppName" 52 | ) 53 | 54 | if($NoPooling){ 55 | $ConnectionStringParts += "Pooling=false"; 56 | } 57 | 58 | $NewConex = New-Object System.Data.SqlClient.SqlConnection 59 | $NewConex.ConnectionString = $ConnectionStringParts -Join ";" 60 | $NewConex.Open(); 61 | } 62 | 63 | $DataSet = New-Object System.Data.DataSet; 64 | 65 | try { 66 | $commandTSQL = $NewConex.CreateCommand() 67 | $commandTSQL.CommandTimeout = 0; 68 | $ReaderWrapper = @{reader=$null} 69 | $commandTSQL.CommandText = $SQL; 70 | $ReaderWrapper.reader = $commandTSQL.ExecuteReader(); 71 | 72 | while(!$ReaderWrapper.reader.IsClosed){ 73 | $DataSet.Tables.Add().Load($ReaderWrapper.reader); 74 | } 75 | 76 | } finally { 77 | if(!$KeepConnection){ 78 | $NewConex.Dispose(); 79 | } 80 | } 81 | 82 | 83 | 84 | if($KeepConnection){ 85 | return New-Object PSObject -Prop @{ 86 | connection = $NewConex 87 | results = $DataSet 88 | } 89 | } else { 90 | if(!$AllResults){ 91 | return @($DataSet.Tables[0].Rows); 92 | } 93 | 94 | return $DataSet; 95 | } 96 | 97 | 98 | } 99 | 100 | function SqlBulkInsert { 101 | [CmdletBinding()] 102 | param( 103 | $DataTable 104 | ,$SqlTable = $null 105 | ,$ServerInstance = $Server 106 | ,$Database = $Database 107 | ,$AppName = $DefaultSqlAppName 108 | ,$User = $DefaultSqlUser 109 | ,$Password = $DefaultSqlPassword 110 | ,$ColumnMapping = @{} 111 | ,$PreSQL 112 | ,$PostSQL 113 | ,[switch]$CatchErrors 114 | ) 115 | 116 | $ErrorActionPreference = "Stop"; 117 | $AuthString = "Integrated Security=True"; 118 | 119 | if($User){ 120 | 121 | if($User -is [pscredential]){ 122 | $User = $User.GetNetworkCredential().UserName 123 | $Password = $User.GetNetworkCredential().Password 124 | } 125 | 126 | $AuthString = @( 127 | "User Id=$User" 128 | "Password=$Password" 129 | ) 130 | } 131 | 132 | $ConnectionStringParts = @( 133 | "Server=$ServerInstance" 134 | "Database=$database" 135 | $AuthString 136 | "APP=$DefaultSqlAppName" 137 | ) 138 | 139 | 140 | $BulkInserts = @() 141 | 142 | $NewConex = New-Object System.Data.SqlClient.SqlConnection 143 | $NewConex.ConnectionString = $ConnectionStringParts -Join ";" 144 | 145 | 146 | if($DataTable -isnot [hashtable]){ 147 | $DataTable = @{ 148 | "$SqlTable" = $DataTable 149 | } 150 | } 151 | 152 | foreach($DestTable in $DataTable.GetEnumerator()){ 153 | $DestTableName = $DestTable.key 154 | $DataTableList = $DestTable.value; 155 | 156 | #hack for workaround some shit on powershell ... 157 | <# 158 | if($DestTable.value.getType().FullName -eq 'System.Data.DataTable'){ 159 | $DataTableList = $DestTable.value 160 | } else { 161 | $DataTableList = $DestTable.value 162 | } 163 | #> 164 | 165 | 166 | if($DataTableList.__wasfrom2table__ -ne $true){ 167 | throw "SQLBULKINSERT: Datatable must be generated from Object2Table" 168 | } 169 | 170 | write-verbose " Setting up $DestTableName (SrcTablesCount:$($DataTableList.tables.count))"; 171 | 172 | $BulkCopy = New-Object System.Data.Sqlclient.SqlBulkCopy($NewConex); 173 | $BulkCopy.DestinationTableName = $DestTableName; 174 | 175 | $MyColMapping = $ColumnMapping[$DestTableName]; 176 | 177 | if(!$MyColMapping){ 178 | $MyColMapping = @{}; 179 | } 180 | 181 | $FirstTable = $DataTableList.tables[0].datatable 182 | 183 | foreach($DataTableCol in $FirstTable.Columns){ 184 | if(!$MyColMapping.Contains($DataTableCol.ColumnName)){ 185 | $MyColMapping[$DataTableCol.ColumnName] = $DataTableCol.ColumnName 186 | } 187 | } 188 | 189 | if($MyColMapping.count -eq 0){ 190 | throw "BULKMAPPING_NOTFOUND: $DestTableName" 191 | } 192 | 193 | foreach($ColMap in $MyColMapping.GetEnumerator()){ 194 | 195 | $TableColName = $ColMap.key 196 | $DestColName = $ColMap.value 197 | 198 | write-verbose " Mapping col $TableColName -> $DestTableName.$DestColName"; 199 | $null = $BulkCopy.ColumnMappings.Add($TableColName,$DestColName) 200 | } 201 | 202 | $BulkInserts += New-Object PSObject -Prop @{ 203 | BulkCopy = $BulkCopy 204 | DataTables = $DataTableList 205 | } 206 | 207 | } 208 | 209 | 210 | 211 | $Results = NEw-Object PsObject -Prop @{ 212 | pre = $null 213 | post = $null 214 | errors = @{ 215 | pre = $null 216 | bulk = @() 217 | post = $null 218 | } 219 | } 220 | 221 | try { 222 | $NewConex.Open() 223 | 224 | if($PreSQL){ 225 | 226 | $commandTSQL = $NewConex.CreateCommand() 227 | $commandTSQL.CommandTimeout = 0; 228 | $ReaderWrapper = @{reader=$null} 229 | $commandTSQL.CommandText = $PreSQL; 230 | 231 | try { 232 | $ReaderWrapper.reader = $commandTSQL.ExecuteReader(); 233 | $DataSet = New-Object System.Data.DataSet; 234 | 235 | while(!$ReaderWrapper.reader.IsClosed){ 236 | $DataSet.Tables.Add().Load($ReaderWrapper.reader); 237 | } 238 | 239 | $Results.pre = $DataSet.Tables 240 | } catch { 241 | if(!$CatchErrors){ 242 | throw; 243 | } 244 | 245 | $Results.errors.pre = $_; 246 | } 247 | } 248 | 249 | foreach($Bulk in $BulkInserts){ 250 | $BulkCopy = $Bulk.BulkCopy; 251 | #[object[]]$BulkTableList = $Bulk.DataTables; 252 | 253 | write-verbose " DestTableCount: $($BulkTableList.count)"; 254 | 255 | $tabNum = 0; 256 | foreach($RawDataTable in $Bulk.DataTables.tables){ 257 | $tabNum++; 258 | $SrcTable = $RawDataTable.datatable; 259 | write-verbose " Writing table $tabNum to $($BulkCopy.DestinationTableName)" 260 | 261 | try { 262 | $BulkCopy.WriteToServer($SrcTable); 263 | } catch { 264 | if(!$CatchErrors){ 265 | throw; 266 | } 267 | 268 | $Results.errors.bulk += New-Object PSObject -Prop @{ 269 | Bulk = $Bulk 270 | TabNum = $tabNum 271 | Error = $_ 272 | } 273 | } 274 | 275 | 276 | write-verbose " Done!"; 277 | } 278 | } 279 | 280 | if($PostSQL){ 281 | 282 | $commandTSQL = $NewConex.CreateCommand() 283 | $commandTSQL.CommandTimeout = 0; 284 | $ReaderWrapper = @{reader=$null} 285 | $commandTSQL.CommandText = $PostSQL; 286 | 287 | try { 288 | $ReaderWrapper.reader = $commandTSQL.ExecuteReader(); 289 | $DataSet = New-Object System.Data.DataSet; 290 | 291 | while(!$ReaderWrapper.reader.IsClosed){ 292 | $DataSet.Tables.Add().Load($ReaderWrapper.reader); 293 | } 294 | 295 | $Results.post = $DataSet.Tables 296 | } catch { 297 | if(!$CatchErrors){ 298 | throw; 299 | } 300 | 301 | 302 | $Results.errors.post = $_; 303 | } 304 | } 305 | } finally { 306 | $NewConex.Dispose(); 307 | } 308 | 309 | return $Results; 310 | } 311 | 312 | function SqlMerge { 313 | param($TableName,[object[]]$Data, $IdCol, [switch]$UseDelete) 314 | 315 | if($Data[0] -is [hashtable]){ 316 | $PropList = $Data[0].keys; 317 | } else { 318 | $PropList = $Data[0].psobject.properties | %{ $_.name }; 319 | } 320 | 321 | 322 | $Cols = $PropList | sort 323 | $ColsList = $Cols -Join "," 324 | 325 | $ValuesClause = @(); 326 | $AllDeleteIds = @(); 327 | 328 | if($IdCol -and $Cols -notContains $IdCol){ 329 | throw "INVALIDCOL: $IdCol"; 330 | } 331 | 332 | @($Data) | %{ 333 | $CurrData = $_; 334 | $ValueList = @(); 335 | 336 | if($IdCol){ 337 | $AllDeleteIds += Ps2SqlValue $CurrData.$IdCol 338 | } 339 | 340 | $Cols | %{ 341 | $CurrVal = $CurrData.$_ 342 | $ValueList += Ps2SqlValue $CurrVal 343 | } 344 | 345 | $ValuesClause += "(" + ($ValueList -Join ",") + ")" 346 | } 347 | 348 | if($UseDelete){ 349 | $DeleteSQL = "DELETE FROM $TableName WHERE $IdCol IN ("+($AllDeleteIds -Join ",")+")" 350 | $sql = "INSERT INTO $TableName($ColsList) VALUES " + ($ValuesClause -Join ","); 351 | } else { 352 | $SetClauses = @(); 353 | $InsertValuesClause = @() 354 | 355 | $Cols | %{ 356 | 357 | if($_ -ne $IdCol){ 358 | $SetClauses += "$_ = S.$_"; 359 | } 360 | 361 | $InsertValuesClause += "S.$_"; 362 | } 363 | 364 | $mergeSQL = @( 365 | "MERGE" 366 | "$TableName t" 367 | "USING" 368 | "(" 369 | ("VALUES "+($ValuesClause -Join ",")) 370 | ") S($ColsList)" 371 | "ON" 372 | "t.$IdCol = S.$IdCol" 373 | "WHEN MATCHED THEN" 374 | "UPDATE SET " + ($SetClauses -join ",") 375 | "WHEN NOT MATCHED THEN" 376 | "INSERT ($ColsList) VALUES("+($InsertValuesClause -Join ",")+")" 377 | "; --> Must end with semicolon" 378 | ) -Join "`r`n" 379 | } 380 | 381 | return New-Object PsObject -Prop @{ delete = $DeleteSQL; insert = $sql; merge = $mergeSQL }; 382 | } 383 | 384 | function Ps2SqlValue { 385 | param($val) 386 | 387 | $SQLVal = ""; 388 | 389 | if($val -is [int]){ 390 | $SQLVal = $val 391 | } 392 | elseif($val -is [datetime]){ 393 | $SQLVal = "'"+$val.toString("yyyyMMdd HH:mm:ss.fff")+"'" 394 | } 395 | elseif($val -eq $null){ 396 | $SQLVal = "NULL" 397 | } 398 | elseif($val -is [bool]){ 399 | $SQLVal = [int]$val 400 | } 401 | else { 402 | $SQLVal = "'"+$val.replace("'","''")+"'" 403 | } 404 | 405 | return $SQLVal 406 | } 407 | 408 | function SqlBuildUpdate { 409 | param($TableName,[hashtable[]]$Data, [string]$IdCol) 410 | 411 | $Cols = @($Data[0].keys) | sort 412 | 413 | $AllUpdate = @(); 414 | 415 | if($IdCol){ 416 | if($Cols -notContains $IdCol){ 417 | throw "INVALIDCOL: $IdCol"; 418 | } 419 | 420 | $Cols = $Cols | ? { $_ -ne $IdCol } 421 | } 422 | 423 | 424 | @($Data) | %{ 425 | $CurrData = $_; 426 | $SetClause = @(); 427 | 428 | if($IdDeleteCol){ 429 | $AllDeleteIds += $CurrData[$IdDeleteCol] 430 | } 431 | 432 | $Cols | %{ 433 | $CurrVal = $CurrData[$_] 434 | $SQlVal = Ps2SqlValue $CurrVal 435 | $SetClause += "$_ = $SQLVal" 436 | } 437 | 438 | $Where = @(); 439 | 440 | if($IdCol){ 441 | $IdFilter = $CurrData[$IdCol]; 442 | $Where += "$Idcol = $IdFilter" 443 | } 444 | 445 | $AllUpdate += "UPDATE $TableName SET " + ($SetClause -Join ","); 446 | 447 | if($Where){ 448 | $AllUpdate += " WHERE " + ( $Where -Join " AND " ); 449 | } 450 | } 451 | 452 | 453 | return $AllUpdate -Join "`r`n"; 454 | } 455 | 456 | function SqlGenerateBulkUpdate { 457 | param($TableName,[object[]]$Data, [string]$IdCol) 458 | 459 | 460 | $PropList = $Data[0].psobject.properties | %{$_.Name}; 461 | 462 | $Cols = $PropList | ? { $_ -ne $IdCol } 463 | $AllUpdate = @(); 464 | 465 | 466 | $ColList = @($Cols + $Idcol) -Join "," 467 | $PreScript = "IF OBJECT_ID('tempdb..#BulkTemp') IS NOT NULL DROP TABLE #BulkTemp; SELECT TOP 0 $ColList INTO #BulkTemp FROM $TableName UNION ALL SELECT TOP 0 $ColList FROM $TableName" 468 | 469 | $SetClause = @($Cols | %{"$_ = T.$_"}) 470 | 471 | $PostScript = " 472 | UPDATE 473 | S 474 | SET 475 | "+($SetClause -Join ",")+" 476 | FROM 477 | #BulkTemp T 478 | Join 479 | $TableName S 480 | ON S.$IdCol = T.$IdCol 481 | " 482 | 483 | $DataTable = Object2Table $Data; 484 | 485 | return @{ 486 | PreSql = $PreScript 487 | DataTable = $DataTable 488 | SqlTable = '#BulkTemp' 489 | PostSQL = $PostScript 490 | } 491 | } 492 | 493 | 494 | function SqlGetDic { 495 | param($TableName, $KeyCol, $ValueCol) 496 | 497 | $Sql = "SELECT $KeyCol,$ValueCol FROM $TableName" 498 | 499 | $ResultSQL = SqlClient $sql; 500 | 501 | $HashResult = @{}; 502 | 503 | if($ResultSQL){ 504 | $ResultSQL | %{ 505 | $HashResult[$_.$KeyCol] = $_.$ValueCol 506 | } 507 | } 508 | 509 | return $HashResult 510 | } 511 | 512 | #Convert a array of objects to an data table. 513 | function Object2Table { 514 | [object[]]$AllTables = @(); 515 | 516 | foreach($a in $Args){ 517 | $Object = $a 518 | $Tab = New-Object System.Data.DataTable 519 | 520 | $AllTables += @{datatable = $Tab} 521 | 522 | #First... 523 | $Object[0].psobject.properties | %{ 524 | $PropName = $_.name; 525 | $PropType = $_.TypeNameOfValue; 526 | 527 | $null = $Tab.Columns.Add($PropName,$PropType) 528 | } 529 | 530 | 531 | $Object | %{ 532 | 533 | $NewRow = $Tab.NewRow(); 534 | $Obj = $_; 535 | 536 | $_.psobject.properties | %{ 537 | $PropName = $_.Name; 538 | $NewRow[$PropName] = $Obj.$PropName -as $_.TypeNameOfValue 539 | } 540 | 541 | $Tab.Rows.Add($NewRow); 542 | } 543 | 544 | 545 | 546 | } 547 | 548 | 549 | 550 | #Because DataTable is not enumerable data type, is must return as array. 551 | return New-Object PSObject -Prop @{ 552 | tables = $AllTables 553 | __wasfrom2table__ = $true 554 | } 555 | } 556 | -------------------------------------------------------------------------------- /SqlLibEmbeddings/embed.ps1: -------------------------------------------------------------------------------- 1 | param( 2 | $ReuseEmbeddings = $null 3 | ,[switch]$AllFiles 4 | ) 5 | 6 | $ErrorActionPreference = "Stop" 7 | 8 | 9 | . "$PSScriptRoot/util.ps1" 10 | 11 | if($ReuseEmbeddings){ 12 | write-warning "reusing..."; 13 | $TableContent = $ReuseEmbeddings 14 | } else { 15 | [string]$CurrentDir = Get-Location; 16 | 17 | $Files = git ls-files --full-name | %{ 18 | @{ 19 | item = Get-Item "$CurrentDir/$_" 20 | RelPath = $_ 21 | } 22 | } | ? { $_.item.name -like "*.sql" } 23 | 24 | $ScriptData = @() 25 | 26 | # Index os scripts! 27 | foreach($file in $Files){ 28 | 29 | write-host "File: $($file.item)"; 30 | $FileContent = Get-Content -Raw $file.item; 31 | 32 | 33 | $EmbeddingContent = @( 34 | "Nome do Script: $($file.RelPath)" 35 | "Conteudo do script:" 36 | "$FileContent" 37 | ) -Join "`n" 38 | 39 | 40 | $Embeddings = GetEmbeddings $EmbeddingContent 41 | write-host "Embeddings: $($Embeddings.length)"; 42 | 43 | $ScriptData += [PsCustomObject]@{ 44 | RelPath = $file.RelPath 45 | ChunkNum = 0 46 | ChunkContent = $FileContent 47 | embeddings = ($Embeddings | ConvertTo-Json -Compress) 48 | } 49 | 50 | } 51 | 52 | $TableContent = $ScriptData; 53 | 54 | } 55 | 56 | if(!$TableContent){ 57 | write-warning "Nothing to index!"; 58 | return; 59 | } 60 | 61 | 62 | dbulk $TableContent "Scripts" -pre "TRUNCATE TABLE Scripts"; -------------------------------------------------------------------------------- /SqlLibEmbeddings/query.ps1: -------------------------------------------------------------------------------- 1 | param( 2 | $texto 3 | ,$top = 10 4 | ) 5 | 6 | $ErrorActionPreference = "Stop" 7 | 8 | . "$PSScriptRoot/util.ps1" 9 | 10 | write-host "Translating text" 11 | $TranslatedText = Get-AiChat "Translate that text to english:$texto" -ResponseFormat @{ 12 | name = "FormatResult" 13 | schema = @{ 14 | type = "object" 15 | properties = @{ 16 | text = @{ 17 | type = "string" 18 | description = "translated text" 19 | } 20 | } 21 | } 22 | } -ContentOnly 23 | 24 | $OriginalText = $texto 25 | $QueryText = @($TranslatedText | ConvertFrom-Json).text; 26 | write-host "text: $QueryText"; 27 | 28 | write-host "Getting embeddings..." 29 | $embeddings = GetEmbeddings $QueryText 30 | 31 | $vector = $embeddings | ConvertTo-Json -Compress; 32 | 33 | 34 | $sql = " 35 | declare @search vector(1024) = '$vector' 36 | 37 | select top $top 38 | * 39 | from ( 40 | select 41 | RelPath 42 | ,CosDistance = vector_distance('cosine',embeddings,@search) 43 | ,ScriptContent = ChunkContent 44 | from 45 | Scripts 46 | ) v 47 | order by 48 | CosDistance 49 | 50 | " 51 | 52 | write-host "Getting data from sql..."; 53 | $results = sql $sql; 54 | 55 | 56 | 57 | $ResultJson = $results | select RelPath,ScriptContent | ConvertTo-Json -compress 58 | 59 | $SystemPrompt = " 60 | You are an assistant that helps users find the best T-SQL scripts for their specific needs. 61 | These scripts were created by Rodrigo Ribeiro Gomes and are publicly available for users to query and use. 62 | 63 | The user will provide a short description of what they are looking for, and your task is to present the most relevant scripts. 64 | 65 | To assist you, here is a JSON object with the top matches based on the current user query: 66 | $ResultJson 67 | 68 | --- 69 | This JSON contains all the scripts that matched the user's input. 70 | Analyze each script's name and content, and create a ranked summary of the best recommendations according to the user's need. 71 | 72 | Only use the information available in the provided JSON. Do not reference or mention anything outside of this list. 73 | You can include parts of the scripts in your answer to illustrate or give usage examples based on the user's request. 74 | 75 | Re-rank the results if necessary, presenting them from the most to the least relevant. 76 | You may filter out scripts that appear unrelated to the user query. 77 | 78 | --- 79 | ### Output Rules 80 | 81 | - Review each script and evaluate how well it matches the user’s request. 82 | - Summarize each script, ordering from the most relevant to the least relevant. 83 | - Write personalized and informative review text for each recommendation. 84 | - If applicable, explain how the user should run the script, including parameters or sections (like `WHERE` clauses) they might need to customize. 85 | - When referencing a script, include the link provided in the JSON all scripts are hosted on GitHub 86 | " 87 | 88 | write-host "Analyzing..." 89 | ait "s: $SystemPrompt",$OriginalText 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /SqlLibEmbeddings/start-embedding.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | Prepara para o ambiente para os embeddings do git! 3 | #> 4 | 5 | "powershai" | %{ 6 | $ModName = $_ 7 | 8 | if(-not(Get-Module -ListAvailable $ModName)){ 9 | write-host "Instaling $ModName module"; 10 | $m = Install-Module $ModName -force -PassThru 11 | write-host " Installed: $($m.name) $($m.Version)" 12 | } 13 | 14 | } 15 | 16 | 17 | import-module powershai; 18 | 19 | # Checa se tem acesso ao repo do hugging face! 20 | Set-AiProvider huggingface; 21 | 22 | & "$PSScriptRoot/embed.ps1" -------------------------------------------------------------------------------- /SqlLibEmbeddings/tab.Scripts.sql: -------------------------------------------------------------------------------- 1 | -- For Azure SQL Database or SQL Server 2025+ 2 | 3 | -- drop table Scripts; 4 | CREATE TABLE Scripts ( 5 | id int IDENTITY PRIMARY KEY WITH(DATA_COMPRESSION = PAGE) 6 | ,RelPath varchar(1000) NOT NULL 7 | ,ChunkNum int NOT NULL 8 | ,ChunkContent nvarchar(max) NOT NULL 9 | ,embeddings vector(1024) 10 | ) 11 | 12 | 13 | -------------------------------------------------------------------------------- /SqlLibEmbeddings/util.ps1: -------------------------------------------------------------------------------- 1 | . "$PSScriptRoot/SqlOperations.ps1" 2 | 3 | import-module powershai; 4 | 5 | if(-not(Get-Command SqlLibEmbed -EA SilentlyContinue)){ 6 | Enter-AiProvider HuggingFace { 7 | Get-GradioSession | ? {$_.name -eq 'space:rrg92/sqlserver-lib-assistant'} | Remove-GradioSession 8 | $space = Get-HfSpace rrg92/sqlserver-lib-assistant 9 | New-GradioSessionApiProxyFunction -force -Prefix "SqlLib" 10 | } 11 | } 12 | 13 | function GetEmbeddings { 14 | param($text, $provider = "huggingface", $model = $null) 15 | 16 | 17 | $res = SqlLibEmbed $text 18 | $embeddings = $res.data | ConvertFrom-Json 19 | $embeddings 20 | } 21 | 22 | function sql { 23 | param($sql, $server = $Env:SQL_SERVER, $DB = $ENV:SQL_DB, $User = $ENV:SQL_USER, $Pass = $Env:SQL_PASS) 24 | 25 | 26 | SqlClient -SQL $SQL -Server $Server -Database $Db -User $User -Pass $Pass 27 | } 28 | 29 | function dbulk { 30 | [CmdletBinding()] 31 | param( 32 | $o 33 | ,$sqltab 34 | ,$pre = $null 35 | ,$post = $null 36 | ,$Database = $ENV:SQL_DB 37 | ,$Server = $Env:SQL_SERVER 38 | ,$User = $ENV:SQL_USER 39 | ,$Pass = $Env:SQL_PASS 40 | ) 41 | 42 | $DataTab = Object2Table $o; 43 | 44 | SqlBulkInsert $DataTab $SqlTab -Server $Server -Database $Database -PreSql $pre -PostSQL $post -User $User -Password $Pass; 45 | } -------------------------------------------------------------------------------- /Tables/TableSize2.sql: -------------------------------------------------------------------------------- 1 | /*#info 2 | 3 | # Autor 4 | Rodrigo Ribeiro Gomes 5 | 6 | # Detalhes 7 | Lista o espaço ocupado por tabelas, incluindo consumo de index, LOB e compressão. 8 | Útil para descobrir quais tabelas estão consumindo maior espaço. 9 | O script considera todas as partições de uma tabela. 10 | */ 11 | 12 | SELECT 13 | ObjectName = schema_name(t.schema_id)+'.'+t.name 14 | ,AU.* 15 | ,'EXEC sp_spaceused '+QUOTENAME(schema_name(t.schema_id)+'.'+t.name,'''')+';' 16 | FROM 17 | sys.tables t 18 | CROSS APPLY 19 | ( 20 | SELECT 21 | p.object_id 22 | ,TotalRows = SUM(CASE WHEN p.index_id <= 1 AND au.type_desc = 'IN_ROW_DATA' THEN p.rows ELSE 0 END) 23 | ,TotalMB = SUM(au.total_pages)/128.0 24 | ,LobMB = ISNULL(SUM(CASE WHEN au.type_desc = 'LOB_DATA' THEN au.total_pages ELSE 0 END)/128.0,0) 25 | ,RowOverMB = ISNULL(SUM(CASE WHEN au.type_desc = 'ROW_OVERFLOW_DATA' THEN au.total_pages ELSE 0 END)/128.0,0) 26 | ,IndexSize = ISNULL(SUM(CASE WHEN p.index_id > 1 THEN au.total_pages ELSE 0 END)/128.0,0) 27 | ,PageCompressionMB = SUM(CASE WHEN p.data_compression_desc = 'PAGE' THEN au.total_pages ELSE 0 END)/128.0 28 | ,RowCompressionMB = SUM(CASE WHEN p.data_compression_desc = 'ROW' THEN au.total_pages ELSE 0 END)/128.0 29 | ,TableCompressionMB = SUM(CASE WHEN p.data_compression_desc != 'NONE' AND p.index_id <= 1 THEN au.total_pages ELSE 0 END)/128.0 30 | ,TableSize = SUM(CASE WHEN p.data_compression_desc != 'NONE' AND p.index_id <= 1 THEN au.total_pages ELSE 0 END)/128.0 31 | FROM 32 | sys.allocation_units au 33 | JOIN 34 | sys.partitions p ON p.partition_id = au.container_id 35 | WHERE 36 | p.object_id = t.object_id 37 | GROUP BY 38 | p.object_id 39 | ) AU 40 | ORDER BY 41 | TotalMB DESC 42 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | powershai-publish-test: 3 | build: . 4 | environment: 5 | HF_API_TOKEN: $TEST_HF_API_TOKEN 6 | SQL_SERVER: $SQL_SERVER 7 | SQL_DB: $SQL_DB 8 | SQL_USER: $SQL_USER 9 | SQL_PASS: $SQL_PASS 10 | command: pwsh -NonInteractive -File ./SqlLibEmbeddings/start-embedding.ps1 11 | 12 | 13 | --------------------------------------------------------------------------------