├── .gitattributes
├── .github
└── workflows
│ ├── build copy.yml
│ ├── build.yml
│ ├── codeql-analysis.yml
│ └── tag-create-release.yml
├── .gitignore
├── LICENSE
├── LogShippingService.sln
├── README.md
├── SECURITY.md
├── images
└── diagram.png
├── sql-log-shipping-service-tests
├── GlobalUsings.cs
├── LogShippingServiceTests.csproj
└── UnitTests.cs
├── sql-log-shipping-service
├── AppConfig.cs
├── AssemblyInfo.cs
├── BackupFile.cs
├── BackupFileListRow.cs
├── BackupHeader.cs
├── CommandLineOptions.cs
├── Config.cs
├── DataHelper.cs
├── DatabaseInfo.cs
├── DatabaseInitializerBase.cs
├── DatabaseInitializerFromDiskOrUrl.cs
├── DatabaseInitializerFromMSDB.cs
├── EmbededResourceReader.cs
├── EncryptionHelper.cs
├── ExtensionMethods.cs
├── FileHandling
│ ├── AzureBlobFileHandler.cs
│ ├── DiskFileHandler.cs
│ ├── FileHandler.cs
│ ├── FileHandlerBase.cs
│ └── S3FileHandler.cs
├── HeaderVerificationException.cs
├── LastBackup.cs
├── LogShipping.cs
├── LogShippingService.csproj
├── NamedLocker.cs
├── Program.cs
├── ReadOnlyBackupSet.cs
├── S3Uri.cs
├── SQL
│ ├── GetDatabases.sql
│ ├── GetFilesForLastBackup.sql
│ ├── GetRedoStartLSN.sql
│ └── GetUserDatabases.sql
├── Services_5724.ico
├── SqlStrings.cs
├── Waiter.cs
├── appsettings.json.azure.example
└── appsettings.json.unc.example
└── test
├── CI_Workflow-Restore-Copy.Tests.ps1
└── CI_Workflow.Tests.ps1
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/workflows/build copy.yml:
--------------------------------------------------------------------------------
1 | name: SQL Log Shipping Service - Restore Copy
2 |
3 | on:
4 | push:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | build:
9 | name: Build & Test
10 | runs-on: windows-latest
11 |
12 | steps:
13 | - name: Checkout repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Setup
17 | uses: actions/setup-dotnet@v4
18 | with:
19 | dotnet-version: 8.0.x
20 |
21 | - name: Build solution
22 | run: dotnet build sql-log-shipping-service\LogShippingService.csproj -p:Configuration=Release -o Build
23 |
24 | - name: Install SQL
25 | uses: potatoqualitee/mssqlsuite@v1.7
26 | with:
27 | install: sqlengine
28 | collation: Latin1_General_BIN
29 |
30 | - name: Check SQL Install
31 | run: |
32 | sqlcmd -S localhost -U sa -P dbatools.I0 -d tempdb -Q "SELECT @@version as Version;"
33 | sqlcmd -S localhost -U sa -P dbatools.I0 -d tempdb -Q "SELECT SERVERPROPERTY('Collation') AS Collation;"
34 |
35 | - name: DBATools
36 | run: |
37 | Install-Module dbatools -Force
38 | Set-DbatoolsConfig -FullName sql.connection.trustcert -Value $true -Register
39 |
40 | - name: Create Database
41 | run: |
42 | New-DbaDatabase -SqlInstance localhost -Name LogShipping1,LogShipping2,LogShipping3 -RecoveryModel FULL
43 | Get-DbaDatabase -SqlInstance localhost -ExcludeSystem | Select-Object {$_.Name}
44 | # Test striped backup
45 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\FULL1\,C:\backup\FULL2\ -Database LogShipping1,LogShipping2,LogShipping3 -Type Full -CreateFolder
46 | # Alternating log folders
47 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG1\ -Database LogShipping2,LogShipping3 -Type LOG -CreateFolder
48 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG2\ -Database LogShipping2,LogShipping3 -Type LOG -CreateFolder
49 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG1\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_1.trn
50 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG1\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_1.trn
51 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG2\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_2.trn
52 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG1\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_3.trn
53 | Get-DbaDatabase -SqlInstance localhost -ExcludeSystem | Select-Object {$_.Name}
54 |
55 | - name: Deploy App
56 | run: |
57 | New-Item -Path C:\sql-log-shipping-service -ItemType Directory
58 | New-Item -Path C:\Standby -ItemType Directory
59 | Copy-Item -Path .\Build\* -Destination C:\sql-log-shipping-service -Recurse
60 |
61 | - name: Configure 1
62 | shell: cmd
63 | run: |
64 | "C:\sql-log-shipping-service\LogShippingService.exe" --Destination "Data Source=LOCALHOST;Integrated Security=True;Encrypt=True;Trust Server Certificate=True" --LogFilePath "C:\Backup\LOG1\{DatabaseName},C:\Backup\LOG2\{DatabaseName}" --FullFilePath "C:\Backup\FULL1\{DatabaseName},C:\Backup\FULL2\{DatabaseName}" --StandbyFileName "C:\Standby\{DatabaseName}_Standby.BAK" --RestoreDatabaseNameSuffix "_Copy"
65 |
66 | - name: Run service
67 | run: |
68 | sc.exe create "LogShippingService" binpath="C:\sql-log-shipping-service\LogShippingService.exe"
69 | net start LogShippingService
70 |
71 | - name: Wait & Output Logs
72 | run: |
73 | $LoopCount=0
74 | $MaxLoopCount=30
75 | while((Get-DbaDatabase -SqlInstance "LOCALHOST" -Status "Standby").Count -lt 3 -and $LoopCount -lt $MaxLoopCount) {
76 | Start-Sleep -s 2
77 | Write-Output "Waiting for databases to be in Standby mode"
78 | $LoopCount++
79 | }
80 | if($LoopCount -eq $MaxLoopCount) {
81 | Write-Warning "Timeout waiting for databases to be in Standby mode"
82 | }
83 | Get-ChildItem -Path C:\sql-log-shipping-service\Logs | Get-Content
84 |
85 | - name: Run Pester Tests for Restore Copy
86 | run: |
87 | Install-Module Pester -Force -SkipPublisherCheck
88 | Import-Module Pester -PassThru
89 | Invoke-Pester -Output Detailed test\CI_Workflow-Restore-Copy.Tests.ps1
90 |
91 | - name: Unit Test
92 | run: dotnet test sql-log-shipping-service-tests\LogShippingServiceTests.csproj --verbosity normal
93 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: SQL Log Shipping Service - Build
2 |
3 | on:
4 | push:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | build:
9 | name: Build & Test
10 | runs-on: windows-latest
11 |
12 | steps:
13 | - name: Checkout repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Setup
17 | uses: actions/setup-dotnet@v4
18 | with:
19 | dotnet-version: 8.0.x
20 |
21 | - name: Build solution
22 | run: dotnet build sql-log-shipping-service\LogShippingService.csproj -p:Configuration=Release -o Build
23 |
24 | - name: Install SQL
25 | uses: potatoqualitee/mssqlsuite@v1.7
26 | with:
27 | install: sqlengine
28 | collation: Latin1_General_BIN
29 |
30 | - name: Check SQL Install
31 | run: |
32 | sqlcmd -S localhost -U sa -P dbatools.I0 -d tempdb -Q "SELECT @@version as Version;"
33 | sqlcmd -S localhost -U sa -P dbatools.I0 -d tempdb -Q "SELECT SERVERPROPERTY('Collation') AS Collation;"
34 |
35 | - name: DBATools
36 | run: |
37 | Install-Module dbatools -Force
38 | Set-DbatoolsConfig -FullName sql.connection.trustcert -Value $true -Register
39 |
40 | - name: Create Database
41 | run: |
42 | New-DbaDatabase -SqlInstance localhost -Name LogShipping1,LogShipping2,LogShipping3 -RecoveryModel FULL
43 | Get-DbaDatabase -SqlInstance localhost -ExcludeSystem | Select-Object {$_.Name}
44 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\FULL\ -Database LogShipping1,LogShipping2,LogShipping3 -Type Full -CreateFolder
45 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG\ -Database LogShipping2,LogShipping3 -Type LOG -CreateFolder
46 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_1.trn
47 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_1.trn
48 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_2.trn
49 | Backup-DbaDatabase -SqlInstance localhost -Path C:\backup\LOG\ -Database LogShipping1 -Type LOG -CreateFolder -FilePath LogShipping1_3.trn
50 | Remove-DbaDatabase -SqlInstance localhost -Database LogShipping1,LogShipping2,LogShipping3 -Confirm:$false
51 | Get-DbaDatabase -SqlInstance localhost -ExcludeSystem | Select-Object {$_.Name}
52 |
53 | - name: Deploy App
54 | run: |
55 | New-Item -Path C:\sql-log-shipping-service -ItemType Directory
56 | New-Item -Path C:\Standby -ItemType Directory
57 | Copy-Item -Path .\Build\* -Destination C:\sql-log-shipping-service -Recurse
58 |
59 | - name: Configure
60 | shell: cmd
61 | run: |
62 | "C:\sql-log-shipping-service\LogShippingService.exe" --Destination "Data Source=LOCALHOST;Integrated Security=True;Encrypt=True;Trust Server Certificate=True" --LogFilePath "C:\Backup\\LOG\{DatabaseName}" --FullFilePath "C:\\Backup\FULL\{DatabaseName}" --StandbyFileName "C:\Standby\{DatabaseName}_Standby.BAK"
63 |
64 | - name: Run service
65 | run: |
66 | sc.exe create "LogShippingService" binpath="C:\sql-log-shipping-service\LogShippingService.exe"
67 | net start LogShippingService
68 |
69 | - name: Wait & Output Logs
70 | run: |
71 | $LoopCount=0
72 | $MaxLoopCount=30
73 | while((Get-DbaDatabase -SqlInstance "LOCALHOST" -Status "Standby").Count -lt 3 -and $LoopCount -lt $MaxLoopCount) {
74 | Start-Sleep -s 2
75 | Write-Output "Waiting for databases to be in Standby mode"
76 | $LoopCount++
77 | }
78 | if($LoopCount -eq $MaxLoopCount) {
79 | Write-Warning "Timeout waiting for databases to be in Standby mode"
80 | }
81 | Get-ChildItem -Path C:\sql-log-shipping-service\Logs | Get-Content
82 |
83 | - name: Run Pester Tests
84 | run: |
85 | Install-Module Pester -Force -SkipPublisherCheck
86 | Import-Module Pester -PassThru
87 | Invoke-Pester -Output Detailed test\CI_Workflow.Tests.ps1
88 |
89 | - name: Unit Test
90 | run: dotnet test sql-log-shipping-service-tests\LogShippingServiceTests.csproj --verbosity normal
91 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL"
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: '0 0 * * *' # once in a day at 00:00
7 |
8 | jobs:
9 | analyze:
10 | name: Analyze
11 | runs-on: windows-latest
12 |
13 | strategy:
14 | fail-fast: false
15 | matrix:
16 | language: ['csharp']
17 |
18 | steps:
19 |
20 | - name: Checkout repository
21 | uses: actions/checkout@v4
22 |
23 | - name: Initialize CodeQL
24 | uses: github/codeql-action/init@v3
25 | with:
26 | languages: ${{ matrix.language }}
27 |
28 | - name: Setup
29 | uses: actions/setup-dotnet@v3
30 | with:
31 | dotnet-version: 8.0.x
32 |
33 | - name: Build solution
34 | run: dotnet build LogShippingService.sln -p:Configuration=Release
35 |
36 | - name: Perform CodeQL Analysis
37 | uses: github/codeql-action/analyze@v3
--------------------------------------------------------------------------------
/.github/workflows/tag-create-release.yml:
--------------------------------------------------------------------------------
1 | name: SQL Log Shipping Service - Tag & Create Release
2 |
3 | on:
4 | workflow_dispatch:
5 |
6 | jobs:
7 | build:
8 | name: Build
9 | runs-on: windows-latest
10 |
11 | steps:
12 | - name: Checkout repository
13 | uses: actions/checkout@v4
14 |
15 | - name: Setup
16 | uses: actions/setup-dotnet@v4
17 | with:
18 | dotnet-version: 8.0.x
19 |
20 | - name: Build
21 | run: dotnet build sql-log-shipping-service\LogShippingService.csproj -p:Configuration=Release -o Build
22 |
23 | - name: Get Version
24 | id: GetVersion
25 | shell: powershell
26 | run: |
27 | $path = [System.IO.Path]::Combine((Get-Location),"Build\LogShippingService.dll")
28 | $version = [System.Reflection.Assembly]::LoadFrom($path).GetName().Version
29 | $version.ToString(3)
30 | Write-Output "BUILD_NUMBER=$($version.ToString(3))" >> $env:GITHUB_OUTPUT
31 |
32 | - name: Zip
33 | shell: powershell
34 | run: |
35 | $zipPath = "LogShippingService_${{steps.GetVersion.outputs.BUILD_NUMBER}}.zip"
36 | Compress-Archive -Path "Build\*" -DestinationPath $zipPath
37 |
38 | - name: Publish - GitHub CLI
39 | run: gh release create ${{steps.GetVersion.outputs.BUILD_NUMBER}} "LogShippingService_${{steps.GetVersion.outputs.BUILD_NUMBER}}.zip" --generate-notes --draft
40 | env:
41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/main/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Ww][Ii][Nn]32/
27 | [Aa][Rr][Mm]/
28 | [Aa][Rr][Mm]64/
29 | bld/
30 | [Bb]in/
31 | [Oo]bj/
32 | [Ll]og/
33 | [Ll]ogs/
34 |
35 | # Visual Studio 2015/2017 cache/options directory
36 | .vs/
37 | # Uncomment if you have tasks that create the project's static files in wwwroot
38 | #wwwroot/
39 |
40 | # Visual Studio 2017 auto generated files
41 | Generated\ Files/
42 |
43 | # MSTest test Results
44 | [Tt]est[Rr]esult*/
45 | [Bb]uild[Ll]og.*
46 |
47 | # NUnit
48 | *.VisualState.xml
49 | TestResult.xml
50 | nunit-*.xml
51 |
52 | # Build Results of an ATL Project
53 | [Dd]ebugPS/
54 | [Rr]eleasePS/
55 | dlldata.c
56 |
57 | # Benchmark Results
58 | BenchmarkDotNet.Artifacts/
59 |
60 | # .NET Core
61 | project.lock.json
62 | project.fragment.lock.json
63 | artifacts/
64 |
65 | # ASP.NET Scaffolding
66 | ScaffoldingReadMe.txt
67 |
68 | # StyleCop
69 | StyleCopReport.xml
70 |
71 | # Files built by Visual Studio
72 | *_i.c
73 | *_p.c
74 | *_h.h
75 | *.ilk
76 | *.meta
77 | *.obj
78 | *.iobj
79 | *.pch
80 | *.pdb
81 | *.ipdb
82 | *.pgc
83 | *.pgd
84 | *.rsp
85 | *.sbr
86 | *.tlb
87 | *.tli
88 | *.tlh
89 | *.tmp
90 | *.tmp_proj
91 | *_wpftmp.csproj
92 | *.log
93 | *.tlog
94 | *.vspscc
95 | *.vssscc
96 | .builds
97 | *.pidb
98 | *.svclog
99 | *.scc
100 |
101 | # Chutzpah Test files
102 | _Chutzpah*
103 |
104 | # Visual C++ cache files
105 | ipch/
106 | *.aps
107 | *.ncb
108 | *.opendb
109 | *.opensdf
110 | *.sdf
111 | *.cachefile
112 | *.VC.db
113 | *.VC.VC.opendb
114 |
115 | # Visual Studio profiler
116 | *.psess
117 | *.vsp
118 | *.vspx
119 | *.sap
120 |
121 | # Visual Studio Trace Files
122 | *.e2e
123 |
124 | # TFS 2012 Local Workspace
125 | $tf/
126 |
127 | # Guidance Automation Toolkit
128 | *.gpState
129 |
130 | # ReSharper is a .NET coding add-in
131 | _ReSharper*/
132 | *.[Rr]e[Ss]harper
133 | *.DotSettings.user
134 |
135 | # TeamCity is a build add-in
136 | _TeamCity*
137 |
138 | # DotCover is a Code Coverage Tool
139 | *.dotCover
140 |
141 | # AxoCover is a Code Coverage Tool
142 | .axoCover/*
143 | !.axoCover/settings.json
144 |
145 | # Coverlet is a free, cross platform Code Coverage Tool
146 | coverage*.json
147 | coverage*.xml
148 | coverage*.info
149 |
150 | # Visual Studio code coverage results
151 | *.coverage
152 | *.coveragexml
153 |
154 | # NCrunch
155 | _NCrunch_*
156 | .*crunch*.local.xml
157 | nCrunchTemp_*
158 |
159 | # MightyMoose
160 | *.mm.*
161 | AutoTest.Net/
162 |
163 | # Web workbench (sass)
164 | .sass-cache/
165 |
166 | # Installshield output folder
167 | [Ee]xpress/
168 |
169 | # DocProject is a documentation generator add-in
170 | DocProject/buildhelp/
171 | DocProject/Help/*.HxT
172 | DocProject/Help/*.HxC
173 | DocProject/Help/*.hhc
174 | DocProject/Help/*.hhk
175 | DocProject/Help/*.hhp
176 | DocProject/Help/Html2
177 | DocProject/Help/html
178 |
179 | # Click-Once directory
180 | publish/
181 |
182 | # Publish Web Output
183 | *.[Pp]ublish.xml
184 | *.azurePubxml
185 | # Note: Comment the next line if you want to checkin your web deploy settings,
186 | # but database connection strings (with potential passwords) will be unencrypted
187 | *.pubxml
188 | *.publishproj
189 |
190 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
191 | # checkin your Azure Web App publish settings, but sensitive information contained
192 | # in these scripts will be unencrypted
193 | PublishScripts/
194 |
195 | # NuGet Packages
196 | *.nupkg
197 | # NuGet Symbol Packages
198 | *.snupkg
199 | # The packages folder can be ignored because of Package Restore
200 | **/[Pp]ackages/*
201 | # except build/, which is used as an MSBuild target.
202 | !**/[Pp]ackages/build/
203 | # Uncomment if necessary however generally it will be regenerated when needed
204 | #!**/[Pp]ackages/repositories.config
205 | # NuGet v3's project.json files produces more ignorable files
206 | *.nuget.props
207 | *.nuget.targets
208 |
209 | # Microsoft Azure Build Output
210 | csx/
211 | *.build.csdef
212 |
213 | # Microsoft Azure Emulator
214 | ecf/
215 | rcf/
216 |
217 | # Windows Store app package directories and files
218 | AppPackages/
219 | BundleArtifacts/
220 | Package.StoreAssociation.xml
221 | _pkginfo.txt
222 | *.appx
223 | *.appxbundle
224 | *.appxupload
225 |
226 | # Visual Studio cache files
227 | # files ending in .cache can be ignored
228 | *.[Cc]ache
229 | # but keep track of directories ending in .cache
230 | !?*.[Cc]ache/
231 |
232 | # Others
233 | ClientBin/
234 | ~$*
235 | *~
236 | *.dbmdl
237 | *.dbproj.schemaview
238 | *.jfm
239 | *.pfx
240 | *.publishsettings
241 | orleans.codegen.cs
242 |
243 | # Including strong name files can present a security risk
244 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
245 | #*.snk
246 |
247 | # Since there are multiple workflows, uncomment next line to ignore bower_components
248 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
249 | #bower_components/
250 |
251 | # RIA/Silverlight projects
252 | Generated_Code/
253 |
254 | # Backup & report files from converting an old project file
255 | # to a newer Visual Studio version. Backup files are not needed,
256 | # because we have git ;-)
257 | _UpgradeReport_Files/
258 | Backup*/
259 | UpgradeLog*.XML
260 | UpgradeLog*.htm
261 | ServiceFabricBackup/
262 | *.rptproj.bak
263 |
264 | # SQL Server files
265 | *.mdf
266 | *.ldf
267 | *.ndf
268 |
269 | # Business Intelligence projects
270 | *.rdl.data
271 | *.bim.layout
272 | *.bim_*.settings
273 | *.rptproj.rsuser
274 | *- [Bb]ackup.rdl
275 | *- [Bb]ackup ([0-9]).rdl
276 | *- [Bb]ackup ([0-9][0-9]).rdl
277 |
278 | # Microsoft Fakes
279 | FakesAssemblies/
280 |
281 | # GhostDoc plugin setting file
282 | *.GhostDoc.xml
283 |
284 | # Node.js Tools for Visual Studio
285 | .ntvs_analysis.dat
286 | node_modules/
287 |
288 | # Visual Studio 6 build log
289 | *.plg
290 |
291 | # Visual Studio 6 workspace options file
292 | *.opt
293 |
294 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
295 | *.vbw
296 |
297 | # Visual Studio 6 auto-generated project file (contains which files were open etc.)
298 | *.vbp
299 |
300 | # Visual Studio 6 workspace and project file (working project files containing files to include in project)
301 | *.dsw
302 | *.dsp
303 |
304 | # Visual Studio 6 technical files
305 | *.ncb
306 | *.aps
307 |
308 | # Visual Studio LightSwitch build output
309 | **/*.HTMLClient/GeneratedArtifacts
310 | **/*.DesktopClient/GeneratedArtifacts
311 | **/*.DesktopClient/ModelManifest.xml
312 | **/*.Server/GeneratedArtifacts
313 | **/*.Server/ModelManifest.xml
314 | _Pvt_Extensions
315 |
316 | # Paket dependency manager
317 | .paket/paket.exe
318 | paket-files/
319 |
320 | # FAKE - F# Make
321 | .fake/
322 |
323 | # CodeRush personal settings
324 | .cr/personal
325 |
326 | # Python Tools for Visual Studio (PTVS)
327 | __pycache__/
328 | *.pyc
329 |
330 | # Cake - Uncomment if you are using it
331 | # tools/**
332 | # !tools/packages.config
333 |
334 | # Tabs Studio
335 | *.tss
336 |
337 | # Telerik's JustMock configuration file
338 | *.jmconfig
339 |
340 | # BizTalk build output
341 | *.btp.cs
342 | *.btm.cs
343 | *.odx.cs
344 | *.xsd.cs
345 |
346 | # OpenCover UI analysis results
347 | OpenCover/
348 |
349 | # Azure Stream Analytics local run output
350 | ASALocalRun/
351 |
352 | # MSBuild Binary and Structured Log
353 | *.binlog
354 |
355 | # NVidia Nsight GPU debugger configuration file
356 | *.nvuser
357 |
358 | # MFractors (Xamarin productivity tool) working folder
359 | .mfractor/
360 |
361 | # Local History for Visual Studio
362 | .localhistory/
363 |
364 | # Visual Studio History (VSHistory) files
365 | .vshistory/
366 |
367 | # BeatPulse healthcheck temp database
368 | healthchecksdb
369 |
370 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
371 | MigrationBackup/
372 |
373 | # Ionide (cross platform F# VS Code tools) working folder
374 | .ionide/
375 |
376 | # Fody - auto-generated XML schema
377 | FodyWeavers.xsd
378 |
379 | # VS Code files for those working on multiple tools
380 | .vscode/*
381 | !.vscode/settings.json
382 | !.vscode/tasks.json
383 | !.vscode/launch.json
384 | !.vscode/extensions.json
385 | *.code-workspace
386 |
387 | # Local History for Visual Studio Code
388 | .history/
389 |
390 | # Windows Installer files from build outputs
391 | *.cab
392 | *.msi
393 | *.msix
394 | *.msm
395 | *.msp
396 |
397 | # JetBrains Rider
398 | *.sln.iml
399 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Trimble, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/LogShippingService.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.5.33414.496
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LogShippingService", "sql-log-shipping-service\LogShippingService.csproj", "{AD9649BD-B93E-46C7-8359-2050A17EE834}"
7 | EndProject
8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "LogShippingServiceTests", "sql-log-shipping-service-tests\LogShippingServiceTests.csproj", "{A7F2B9C2-8FD7-407E-AE22-A4874A4BEA49}"
9 | EndProject
10 | Global
11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
12 | Debug|Any CPU = Debug|Any CPU
13 | Release|Any CPU = Release|Any CPU
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {AD9649BD-B93E-46C7-8359-2050A17EE834}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
17 | {AD9649BD-B93E-46C7-8359-2050A17EE834}.Debug|Any CPU.Build.0 = Debug|Any CPU
18 | {AD9649BD-B93E-46C7-8359-2050A17EE834}.Release|Any CPU.ActiveCfg = Release|Any CPU
19 | {AD9649BD-B93E-46C7-8359-2050A17EE834}.Release|Any CPU.Build.0 = Release|Any CPU
20 | {A7F2B9C2-8FD7-407E-AE22-A4874A4BEA49}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
21 | {A7F2B9C2-8FD7-407E-AE22-A4874A4BEA49}.Debug|Any CPU.Build.0 = Debug|Any CPU
22 | {A7F2B9C2-8FD7-407E-AE22-A4874A4BEA49}.Release|Any CPU.ActiveCfg = Release|Any CPU
23 | {A7F2B9C2-8FD7-407E-AE22-A4874A4BEA49}.Release|Any CPU.Build.0 = Release|Any CPU
24 | EndGlobalSection
25 | GlobalSection(SolutionProperties) = preSolution
26 | HideSolutionNode = FALSE
27 | EndGlobalSection
28 | GlobalSection(ExtensibilityGlobals) = postSolution
29 | SolutionGuid = {7FBBF089-D357-47F3-85D9-FE62D3964D4E}
30 | EndGlobalSection
31 | EndGlobal
32 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SQL Log Shipping Service
2 |
3 | This project provides a solution for automatically restoring SQL Server transaction log backups. Implemented as a .NET application that runs as a Windows service, it provides the following features:
4 |
5 | * **Simple config based setup.** *Eliminates the need for per-database basis configuration. Very effective for handling a large number of databases*
6 | * **Automatic initialization of new databases**. *Incorporate new databases without any manual intervention*
7 | * **UNC path or URL (Azure Blob or S3).** *Additional flexibility to work directly with Azure blob containers, S3 buckets or standard UNC paths. e.g. `\\server\share`*
8 | * **Scalable.** *The log shipping service allows for a configurable number of threads so your SQL instance isn't overloaded with a job per database or constrained by a single job. The service is efficient and can scale to a large number (thousands) of databases*
9 | * **Standby mode support.** *Allow users to query your log shipped databases. Prevent open connections from blocking log restores with options to kill sessions after a period of time. Keep your databases available for querying during certain hours. Standby option is only applied after the last log is restored (more efficient than built-in log shipping)*
10 | * **A disaster recovery tool**. *Beyond the tools primary capability as a log shipping tool, it can also be used as part of your disaster recovery strategy to restore your databases from backup.*
11 |
12 | ## Intended Audience
13 |
14 | Ideal for anyone looking to implement log shipping or needing a reliable process for database recovery from backups. Especially beneficial for environments with a large number of databases or utilizing BACKUP TO URL, where alternative solutions are limited. Suitable for both small and large-scale database environments.
15 |
16 | ## Getting Started
17 |
18 | The [Quick Start](../../wiki/Quick-Start) guide will help you get up and running quickly. The [wiki](../../wiki) also provides documentation on all the features and functionality of the tool.
19 |
20 | 👉 [Get Started Here](../../wiki/Quick-Start)
21 |
22 | ## Architecture
23 |
24 | 
25 |
26 | The tool runs as a Windows service, configured using an *appsettings.json* config file. Log restores are run on a specified [schedule](../../wiki/Schedule) with a configurable number of threads processing the restores. It can restore logs from a file share or from Azure blob. For each database in a restoring or standby state, it enumerates the files for the database, checks the restore headers and restores the appropriate log files.
27 |
28 | New databases can be [initialized](../../wiki/Initialization) by specifying the Full/Diff backup paths OR by specifying a connection string to the primary server.
29 |
30 | Designed solely for restoration, this service complements backup solutions like [Ola'Hallegren's](https://ola.hallengren.com/) maintenance solution, which offer parallel backup capabilities and [support for BACKUP TO URL](https://gist.github.com/scheffler/7edd40f430235aab651fadcc7d191a89).
31 |
32 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | Security issues and bugs should be reported privately, via email, to [cybersecurity@trimble.com](mailto:cybersecurity@trimble.com).
6 |
7 | If you do not receive a response within 24 hours, please follow up via email to ensure we received your original message.
8 |
9 | Please do not open issues for anything you think might have a security implication.
10 |
--------------------------------------------------------------------------------
/images/diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trimble-oss/sql-log-shipping-service/a6d8f78e06e71649e2bd97feaa520a4ac3e1b4c4/images/diagram.png
--------------------------------------------------------------------------------
/sql-log-shipping-service-tests/GlobalUsings.cs:
--------------------------------------------------------------------------------
1 | global using Microsoft.VisualStudio.TestTools.UnitTesting;
--------------------------------------------------------------------------------
/sql-log-shipping-service-tests/LogShippingServiceTests.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net8.0-windows
5 | enable
6 | enable
7 |
8 | false
9 | true
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | all
18 | runtime; build; native; contentfiles; analyzers; buildtransitive
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/sql-log-shipping-service-tests/UnitTests.cs:
--------------------------------------------------------------------------------
1 | using LogShippingService;
2 | using Microsoft.Extensions.Configuration;
3 | using System.Diagnostics;
4 |
5 | namespace LogShippingServiceTests
6 | {
7 | [TestClass]
8 | public class UnitTests
9 | {
10 | [TestMethod]
11 | public void TestMethod1()
12 | {
13 | // Start with a blank config file
14 | File.Delete(Config.ConfigFile);
15 | File.WriteAllText(Config.ConfigFile, "{}");
16 | var options = new Config()
17 | {
18 | ContainerUrl = "https://myaccount.blob.core.windows.net/mycontainer",
19 | Destination = "Server=.;Database=master;Integrated Security=true;",
20 | DiffFilePath = @"\\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\DIFF",
21 | FullFilePath = @"\\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\FULL",
22 | LogFilePath = @"\\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\FULL",
23 | MaxBackupAgeForInitialization = 7,
24 | MoveDataFolder = "D:\\Data",
25 | MoveFileStreamFolder = "F:\\FileStream",
26 | MoveLogFolder = "L:\\Log",
27 | MSDBPathFind = "C:\\Backup",
28 | MSDBPathReplace = "\\\\BackupServer\\Backup",
29 | PollForNewDatabasesCron = "*/5 * * * *",
30 | PollForNewDatabasesFrequency = 5,
31 | ReadOnlyFilePath = @"\\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\READONLY",
32 | RecoverPartialBackupWithoutReadOnly = true,
33 | SASToken = "mySASToken",
34 | SourceConnectionString = "Server=.;Database=master;Integrated Security=true;",
35 | Hours = new HashSet { 1, 2, 3, 4, 5 },
36 | StandbyFileName = "D:\\Data\\{DatabaseName}_standby.bak",
37 | KillUserConnectionsWithRollbackAfter = 5,
38 | KillUserConnections = false,
39 | MaxProcessingTimeMins = 20,
40 | AccessKey = "myAccessKey",
41 | SecretKey = "mySecretKey"
42 | };
43 | // Pass values to LogShippingService.exe command line
44 | var commandLine = $"--ContainerUrl {options.ContainerUrl} --Destination \"{options.Destination}\" --DiffFilePath \"{options.DiffFilePath}\" --FullFilePath \"{options.FullFilePath}\" --LogFilePath \"{options.LogFilePath}\" --MaxBackupAgeForInitialization {options.MaxBackupAgeForInitialization} --MoveDataFolder \"{options.MoveDataFolder}\" --MoveFileStreamFolder \"{options.MoveFileStreamFolder}\" --MoveLogFolder \"{options.MoveLogFolder}\" --MSDBPathFind \"{options.MSDBPathFind}\" --MSDBPathReplace \"{options.MSDBPathReplace}\" --PollForNewDatabasesCron \"{options.PollForNewDatabasesCron}\" --PollForNewDatabasesFrequency {options.PollForNewDatabasesFrequency} --ReadOnlyFilePath \"{options.ReadOnlyFilePath}\" --RecoverPartialBackupWithoutReadOnly {options.RecoverPartialBackupWithoutReadOnly} --SASToken \"{options.SASToken}\" --SourceConnectionString \"{options.SourceConnectionString}\" --Hours {string.Join(' ', options.Hours)} --StandbyFileName \"{options.StandbyFileName}\" --KillUserConnectionsWithRollbackAfter {options.KillUserConnectionsWithRollbackAfter} --KillUserConnections {options.KillUserConnections} --MaxProcessingTimeMins {options.MaxProcessingTimeMins} --AccessKey \"{options.AccessKey}\" --SecretKey \"{options.SecretKey}\"";
45 |
46 | // Call LogShippingService.exe with the command line arguments
47 | var p = new Process()
48 | {
49 | StartInfo = new ProcessStartInfo
50 | {
51 | FileName = "LogShippingService.exe",
52 | Arguments = commandLine,
53 | RedirectStandardOutput = true,
54 | UseShellExecute = false,
55 | CreateNoWindow = true
56 | }
57 | };
58 | p.Start();
59 |
60 | // Wait for the process to exit
61 | p.WaitForExit();
62 | Assert.AreEqual(0, p.ExitCode);
63 |
64 | // Read the process output
65 | string output = p.StandardOutput.ReadToEnd();
66 | Assert.AreNotSame(output, "");
67 |
68 | var configuration = new ConfigurationBuilder()
69 | .SetBasePath(Directory.GetCurrentDirectory())
70 | .AddJsonFile("appsettings.json")
71 | .Build();
72 |
73 | var config = configuration.GetSection("Config").Get() ?? new Config();
74 |
75 | Assert.AreEqual(options.ContainerUrl, config.ContainerUrl);
76 | Assert.AreEqual(options.Destination, config.Destination);
77 | Assert.AreEqual(options.DiffFilePath, config.DiffFilePath);
78 | Assert.AreEqual(options.FullFilePath, config.FullFilePath);
79 | Assert.AreEqual(options.LogFilePath, config.LogFilePath);
80 | Assert.AreEqual(options.MaxBackupAgeForInitialization, config.MaxBackupAgeForInitialization);
81 | Assert.AreEqual(options.MoveDataFolder, config.MoveDataFolder);
82 | Assert.AreEqual(options.MoveFileStreamFolder, config.MoveFileStreamFolder);
83 | Assert.AreEqual(options.MoveLogFolder, config.MoveLogFolder);
84 | Assert.AreEqual(options.MSDBPathFind, config.MSDBPathFind);
85 | Assert.AreEqual(options.MSDBPathReplace, config.MSDBPathReplace);
86 | Assert.AreEqual(options.PollForNewDatabasesCron, config.PollForNewDatabasesCron);
87 | Assert.AreEqual(options.PollForNewDatabasesFrequency, config.PollForNewDatabasesFrequency);
88 | Assert.AreEqual(options.ReadOnlyFilePath, config.ReadOnlyFilePath);
89 | Assert.AreEqual(options.RecoverPartialBackupWithoutReadOnly, config.RecoverPartialBackupWithoutReadOnly);
90 | Assert.AreEqual(options.SASToken, config.SASToken);
91 | Assert.AreEqual(options.SourceConnectionString, config.SourceConnectionString);
92 | Assert.AreEqual(options.StandbyFileName, config.StandbyFileName);
93 | Assert.AreEqual(options.KillUserConnectionsWithRollbackAfter, config.KillUserConnectionsWithRollbackAfter);
94 | Assert.AreEqual(options.KillUserConnections, config.KillUserConnections);
95 | Assert.AreEqual(options.MaxProcessingTimeMins, config.MaxProcessingTimeMins);
96 | Assert.AreEqual(options.AccessKey, config.AccessKey);
97 | Assert.AreEqual(options.SecretKey, config.SecretKey); //Should be encrypted
98 |
99 | var json = File.ReadAllText(Config.ConfigFile);
100 | // Check that the secret key and access key are not stored plaintext
101 | Assert.IsFalse(json.Contains(options.SecretKey));
102 | Assert.IsFalse(json.Contains(options.SASToken));
103 | Assert.IsTrue(json.Contains("encrypted:"));
104 |
105 | CollectionAssert.AreEquivalent(options.Hours.ToList(), config.Hours.ToList());
106 | }
107 | }
108 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/AppConfig.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace LogShippingService
8 | {
9 | internal static class AppConfig
10 | {
11 | public static Config Config { get; set; } = new Config();
12 | }
13 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/AssemblyInfo.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 | using System.Runtime.InteropServices;
3 |
4 | // In SDK-style projects such as this one, several assembly attributes that were historically
5 | // defined in this file are now automatically added during build and populated with
6 | // values defined in project properties. For details of which attributes are included
7 | // and how to customise this process see: https://aka.ms/assembly-info-properties
8 |
9 |
10 | // Setting ComVisible to false makes the types in this assembly not visible to COM
11 | // components. If you need to access a type in this assembly from COM, set the ComVisible
12 | // attribute to true on that type.
13 |
14 | [assembly: ComVisible(false)]
15 |
16 | // The following GUID is for the ID of the typelib if this project is exposed to COM.
17 |
18 | [assembly: Guid("484dbbf7-cb55-48a0-8f84-2c8e09669a77")]
19 | [assembly: System.Runtime.Versioning.SupportedOSPlatform("windows7.0")]
20 | [assembly: AssemblyConfiguration("")]
21 | [assembly: AssemblyCompany("Trimble, Inc")]
22 | [assembly: AssemblyProduct("SQL Log Shipping Service")]
23 | [assembly: AssemblyCopyright("Copyright © 2022 Trimble, Inc.")]
24 | [assembly: AssemblyTrademark("")]
25 | [assembly: AssemblyCulture("")]
26 | [assembly: AssemblyVersion("1.7.3")]
27 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/BackupFile.cs:
--------------------------------------------------------------------------------
1 | namespace LogShippingService
2 | {
3 | public class BackupFile
4 | {
5 | private List? _backupFiles;
6 | private List? _headers;
7 | private Config Config => AppConfig.Config;
8 |
9 | public BackupFile(string filePath, BackupHeader.DeviceTypes deviceType, DateTime lastModUtc)
10 | {
11 | FilePath = filePath;
12 | DeviceType = deviceType;
13 | LastModifiedUtc = lastModUtc;
14 | }
15 |
16 | public BackupHeader.DeviceTypes DeviceType { get; set; }
17 |
18 | public string FilePath { get; set; }
19 |
20 | public DateTime LastModifiedUtc { get; set; }
21 |
22 | public BackupHeader FirstHeader => Headers[0];
23 |
24 | public List Headers
25 | {
26 | get
27 | {
28 | _headers ??= BackupHeader.GetHeaders(FilePath, Config.Destination, DeviceType);
29 | return _headers;
30 | }
31 | }
32 |
33 | public List BackupFileList
34 | {
35 | get
36 | {
37 | _backupFiles ??= BackupFileListRow.GetFileList(FilePath, Config.Destination, DeviceType);
38 | return _backupFiles;
39 | }
40 | }
41 | }
42 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/BackupFileListRow.cs:
--------------------------------------------------------------------------------
1 | using System.Data;
2 | using System.Numerics;
3 |
4 | namespace LogShippingService
5 | {
6 | public class BackupFileListRow
7 | {
8 | public string LogicalName { get; set; }
9 | public string PhysicalName { get; set; }
10 |
11 | public string FileName => Path.GetFileName(PhysicalName);
12 |
13 | public char Type { get; set; }
14 |
15 | public string? FileGroupName { get; set; }
16 |
17 | public BigInteger Size { get; set; }
18 |
19 | public BigInteger MaxSize { get; set; }
20 |
21 | public long FileID { get; set; }
22 | public BigInteger CreateLSN { get; set; }
23 |
24 | public BigInteger? DropLSN { get; set; }
25 |
26 | public Guid UniqueId { get; set; }
27 |
28 | public BigInteger? ReadOnlyLSN { get; set; }
29 |
30 | public BigInteger? ReadWriteLSN { get; set; }
31 |
32 | public long BackupSizeInBytes { get; set; }
33 |
34 | public int SourceBlockSize { get; set; }
35 |
36 | public int FileGroupID { get; set; }
37 |
38 | public Guid? LogGroupGUID { get; set; }
39 |
40 | public BigInteger? DifferentialBaseLSN { get; set; }
41 |
42 | public Guid? DifferentialBaseGUID { get; set; }
43 |
44 | public bool IsReadOnly { get; set; }
45 |
46 | public bool IsPresent { get; set; }
47 |
48 | public byte[]? TDEThumbprint { get; set; }
49 |
50 | public string? SnapshotURL { get; set; }
51 |
52 | public BackupFileListRow(DataRow row)
53 | {
54 | LogicalName = (string)row["LogicalName"];
55 | PhysicalName = (string)row["PhysicalName"];
56 | Type = Convert.ToChar(row["Type"]);
57 | FileGroupName = row["FileGroupName"] as string;
58 | Size = BackupHeader.GetBigInteger(row, "Size");
59 | MaxSize = BackupHeader.GetBigInteger(row, "MaxSize");
60 | FileID = (long)row["FileID"];
61 | CreateLSN = BackupHeader.GetBigInteger(row, "CreateLSN");
62 | DropLSN = BackupHeader.GetNullableBigInteger(row, "DropLSN");
63 | UniqueId = (Guid)row["UniqueId"];
64 | ReadOnlyLSN = BackupHeader.GetNullableBigInteger(row, "ReadOnlyLSN");
65 | ReadWriteLSN = BackupHeader.GetNullableBigInteger(row, "ReadWriteLSN");
66 | BackupSizeInBytes = (long)row["BackupSizeInBytes"];
67 | SourceBlockSize = (int)row["SourceBlockSize"];
68 | FileGroupID = (int)row["FileGroupID"];
69 | LogGroupGUID = row["LogGroupGUID"] == DBNull.Value ? null : (Guid)row["LogGroupGUID"];
70 | DifferentialBaseLSN = BackupHeader.GetNullableBigInteger(row, "DifferentialBaseLSN");
71 | DifferentialBaseGUID = row["DifferentialBaseGUID"] == DBNull.Value ? null : (Guid)row["DifferentialBaseGUID"];
72 | IsReadOnly = (bool)row["IsReadOnly"];
73 | IsPresent = (bool)row["IsPresent"];
74 | if (row.Table.Columns.Contains("TDEThumbprint"))
75 | {
76 | TDEThumbprint = row["TDEThumbprint"] == DBNull.Value ? null : (byte[])row["TDEThumbprint"];
77 | }
78 | if (row.Table.Columns.Contains("SnapshotURL"))
79 | {
80 | SnapshotURL = row["SnapshotURL"] as string;
81 | }
82 | }
83 |
84 | public static List GetFileList(string backupFile, string connectionString,
85 | BackupHeader.DeviceTypes deviceType) =>
86 | GetFileList(new List() { backupFile }, connectionString, deviceType);
87 |
88 | public static List GetFileList(List backupFiles, string connectionString, BackupHeader.DeviceTypes deviceType)
89 | {
90 | List fileList = new();
91 | var sql = DataHelper.GetFileListOnlyScript(backupFiles, deviceType);
92 | var dt = DataHelper.GetDataTable(sql, connectionString);
93 | foreach (DataRow row in dt.Rows)
94 | {
95 | fileList.Add(new BackupFileListRow(row));
96 | }
97 | return fileList;
98 | }
99 | }
100 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/BackupHeader.cs:
--------------------------------------------------------------------------------
1 | using System.Data;
2 | using System.Numerics;
3 | using Serilog;
4 | using Serilog.Events;
5 | using SerilogTimings;
6 |
7 | namespace LogShippingService
8 | {
9 | public class BackupHeader
10 | {
11 | public enum BackupTypes
12 | {
13 | DatabaseFull = 1,
14 | TransactionLog = 2,
15 | File = 4,
16 | DatabaseDiff = 5,
17 | FileDiff = 6,
18 | Partial = 7,
19 | PartialDiff = 8
20 | }
21 |
22 | public enum DeviceTypes
23 | {
24 | Unknown = -1,
25 | Disk = 2,
26 | Diskette = 3,
27 | Tape = 5,
28 | Pipe = 6,
29 | Virtual = 7,
30 | Url = 9,
31 | UrlPhysical = 109,
32 | TapePhysical = 105,
33 | DiskPhysical = 102
34 | }
35 |
36 | public enum HeaderVerificationStatus
37 | {
38 | Verified,
39 | TooRecent,
40 | TooEarly,
41 | WrongDatabase,
42 | Other
43 | }
44 |
45 | public string? BackupName { get; set; }
46 | public string? BackupDescription { get; set; }
47 | public BackupTypes BackupType { get; set; }
48 | public DateTime? ExpirationDate { get; set; }
49 | public bool Compressed { get; set; }
50 | public int Position { get; set; }
51 | public DeviceTypes DeviceType { get; set; }
52 |
53 | public string UserName { get; set; } = null!;
54 | public string ServerName { get; set; } = null!;
55 | public string DatabaseName { get; set; } = null!;
56 |
57 | public int DatabaseVersion { get; set; }
58 |
59 | public DateTime DatabaseCreationDate { get; set; }
60 |
61 | public BigInteger FirstLSN { get; set; }
62 | public BigInteger LastLSN { get; set; }
63 | public BigInteger CheckpointLSN { get; set; }
64 |
65 | public BigInteger DatabaseBackupLSN { get; set; }
66 |
67 | public BigInteger? ForkPointLSN { get; set; }
68 |
69 | public BigInteger? DifferentialBaseLSN { get; set; }
70 | public BigInteger BackupSize { get; set; }
71 |
72 | public DateTime BackupStartDate { get; set; }
73 |
74 | public DateTime BackupFinishDate { get; set; }
75 | public short SortOrder { get; set; }
76 |
77 | public short CodePage { get; set; }
78 |
79 | public int UnicodeLocaleId { get; set; }
80 |
81 | public int UnicodeComparisonStyle { get; set; }
82 |
83 | public short CompatibilityLevel { get; set; }
84 |
85 | public int SoftwareVendorId { get; set; }
86 |
87 | public int SoftwareVersionMajor { get; set; }
88 | public int SoftwareVersionMinor { get; set; }
89 | public int SoftwareVersionBuild { get; set; }
90 |
91 | public string MachineName { get; set; } = null!;
92 |
93 | public int Flags { get; set; }
94 |
95 | public Guid BindingID { get; set; }
96 |
97 | public Guid RecoveryForkID { get; set; }
98 |
99 | public string Collation { get; set; } = null!;
100 |
101 | public Guid FamilyGUID { get; set; }
102 |
103 | public bool HasBulkLoggedData { get; set; }
104 | public bool IsSnapshot { get; set; }
105 | public bool IsReadOnly { get; set; }
106 | public bool IsSingleUser { get; set; }
107 |
108 | public bool HasBackupChecksums { get; set; }
109 |
110 | public bool IsDamaged { get; set; }
111 | public bool BeginsLogChain { get; set; }
112 | public bool HasIncompleteMetaData { get; set; }
113 |
114 | public bool IsForceOffline { get; set; }
115 | public bool IsCopyOnly { get; set; }
116 |
117 | public Guid FirstRecoveryForkID { get; set; }
118 |
119 | public string RecoveryModel { get; set; } = null!;
120 | public Guid? DifferentialBaseGUID { get; set; }
121 |
122 | public string BackupTypeDescription { get; set; } = null!;
123 |
124 | public Guid BackupSetGUID { get; set; }
125 |
126 | public long CompressedBackupSize { get; set; }
127 |
128 | public short? Containment { get; set; }
129 |
130 | public string? KeyAlgorithm { get; set; }
131 |
132 | public string? EncryptorThumbprint { get; set; }
133 |
134 | public string? EncryptorType { get; set; }
135 |
136 | public DateTime? LastValidRestoreTime { get; set; }
137 |
138 | public string? TimeZone { get; set; }
139 |
140 | public string? CompressionAlgorithm { get; set; }
141 |
142 | public BackupHeader(DataRow row)
143 | {
144 | SetFromRow(row);
145 | }
146 |
147 | private void SetFromRow(DataRow row)
148 | {
149 | BackupName = row["BackupName"] as string;
150 | BackupDescription = row["BackupDescription"] as string;
151 | BackupType = (BackupTypes)Convert.ToInt32(row["BackupType"]);
152 | ExpirationDate = row["ExpirationDate"] == DBNull.Value ? null : Convert.ToDateTime(row["ExpirationDate"]);
153 | Compressed = Convert.ToBoolean(row["Compressed"]);
154 | Position = Convert.ToInt32(row["Position"]);
155 | DeviceType = (DeviceTypes)Convert.ToInt32(row["DeviceType"]);
156 | UserName = (string)row["UserName"];
157 | ServerName = (string)row["ServerName"];
158 | DatabaseName = (string)row["DatabaseName"];
159 | DatabaseVersion = (int)row["DatabaseVersion"];
160 | DatabaseCreationDate = (DateTime)row["DatabaseCreationDate"];
161 | BackupSize = GetBigInteger(row, "BackupSize");
162 | FirstLSN = GetBigInteger(row, "FirstLSN");
163 | LastLSN = GetBigInteger(row, "LastLSN");
164 | CheckpointLSN = GetBigInteger(row, "CheckpointLSN");
165 | DatabaseBackupLSN = GetBigInteger(row, "DatabaseBackupLSN");
166 | BackupStartDate = (DateTime)row["BackupStartDate"];
167 | BackupFinishDate = (DateTime)row["BackupFinishDate"];
168 | SortOrder = Convert.ToInt16(row["SortOrder"]);
169 | CodePage = Convert.ToInt16(row["CodePage"]);
170 | UnicodeLocaleId = Convert.ToInt32(row["UnicodeLocaleId"]);
171 | UnicodeComparisonStyle = Convert.ToInt32(row["UnicodeComparisonStyle"]);
172 | CompatibilityLevel = Convert.ToInt16(row["CompatibilityLevel"]);
173 | SoftwareVendorId = Convert.ToInt32(row["SoftwareVendorId"]);
174 | SoftwareVersionMajor = Convert.ToInt32(row["SoftwareVersionMajor"]);
175 | SoftwareVersionMinor = Convert.ToInt32(row["SoftwareVersionMinor"]);
176 | SoftwareVersionBuild = Convert.ToInt32(row["SoftwareVersionBuild"]);
177 | MachineName = (string)row["MachineName"];
178 | Flags = Convert.ToInt32(row["Flags"]);
179 | BindingID = (Guid)row["BindingID"];
180 | RecoveryForkID = (Guid)row["RecoveryForkID"];
181 | Collation = (string)row["Collation"];
182 | FamilyGUID = (Guid)row["FamilyGUID"];
183 | HasBulkLoggedData = (bool)row["HasBulkLoggedData"];
184 | IsSnapshot = (bool)row["IsSnapshot"];
185 | IsReadOnly = (bool)row["IsReadOnly"];
186 | IsSingleUser = (bool)row["IsSingleUser"];
187 | HasBackupChecksums = (bool)row["HasBackupChecksums"];
188 | IsDamaged = (bool)row["IsDamaged"];
189 | BeginsLogChain = (bool)row["BeginsLogChain"];
190 | HasIncompleteMetaData = (bool)row["HasIncompleteMetaData"];
191 | IsForceOffline = (bool)row["IsForceOffline"];
192 | IsCopyOnly = (bool)row["IsCopyOnly"];
193 | FirstRecoveryForkID = (Guid)row["FirstRecoveryForkID"];
194 | ForkPointLSN = GetNullableBigInteger(row, "ForkPointLSN");
195 | RecoveryModel = (string)row["RecoveryModel"];
196 | DifferentialBaseLSN = GetNullableBigInteger(row, "DifferentialBaseLSN");
197 | DifferentialBaseGUID = row["DifferentialBaseGUID"] == DBNull.Value ? null : (Guid)row["DifferentialBaseGUID"];
198 | BackupTypeDescription = (string)row["BackupTypeDescription"];
199 | BackupSetGUID = (Guid)row["BackupSetGUID"];
200 | CompressedBackupSize = (long)row["CompressedBackupSize"];
201 |
202 | if (row.Table.Columns.Contains("containment"))
203 | {
204 | Containment = Convert.ToInt16(row["containment"]);
205 | }
206 | if (row.Table.Columns.Contains("KeyAlgorithm"))
207 | {
208 | KeyAlgorithm = row["KeyAlgorithm"] as string;
209 | }
210 | if (row.Table.Columns.Contains("EncryptorThumbprint"))
211 | {
212 | EncryptorThumbprint = row["EncryptorThumbprint"] as string;
213 | }
214 | if (row.Table.Columns.Contains("EncryptorType"))
215 | {
216 | EncryptorType = row["EncryptorType"] as string;
217 | }
218 | if (row.Table.Columns.Contains("LastValidRestoreTime"))
219 | {
220 | LastValidRestoreTime = row["LastValidRestoreTime"] == DBNull.Value ? null : (DateTime)row["LastValidRestoreTime"];
221 | }
222 | if (row.Table.Columns.Contains("TimeZone"))
223 | {
224 | TimeZone = row["TimeZone"] as string;
225 | }
226 | if (row.Table.Columns.Contains("CompressionAlgorithm"))
227 | {
228 | CompressionAlgorithm = row["CompressionAlgorithm"] as string;
229 | }
230 | }
231 |
232 | public static List GetHeaders(string backupFile, string connectionString, DeviceTypes deviceType)
233 | {
234 | return GetHeaders(new List(){backupFile}, connectionString, deviceType);
235 | }
236 |
237 | public static List GetHeaders(List backupFiles, string connectionString, DeviceTypes deviceType)
238 | {
239 | if (backupFiles == null || backupFiles.Count == 0)
240 | {
241 | throw new Exception("No backup files");
242 | }
243 | List headers = new();
244 | var headerSQL = DataHelper.GetHeaderOnlyScript(backupFiles, deviceType);
245 | using var op = Operation.Begin(headerSQL.Replace(Environment.NewLine," "));
246 | var dt = DataHelper.GetDataTable(headerSQL, connectionString);
247 | op.Complete();
248 |
249 | foreach (DataRow row in dt.Rows)
250 | {
251 | headers.Add(new BackupHeader(row));
252 | }
253 | return headers;
254 | }
255 |
256 | public static BigInteger? GetNullableBigInteger(DataRow row, string columnName)
257 | {
258 | return row[columnName] == DBNull.Value ? null : BigInteger.Parse(row[columnName].ToString()!);
259 | }
260 |
261 | public static BigInteger GetBigInteger(DataRow row, string columnName)
262 | {
263 | return BigInteger.Parse(row[columnName].ToString()!);
264 | }
265 | }
266 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/CommandLineOptions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Reflection;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 | using CommandLine;
8 |
9 | namespace LogShippingService
10 | {
11 | internal class CommandLineOptions
12 | {
13 | [Option("Destination", Required = false, HelpText = "Target server connection string. SQL Instance to restore transaction logs to.")]
14 | public string? Destination { get; set; }
15 |
16 | [Option("LogFilePath", Required = false, HelpText = @"Path for transaction logs. Most include {DatabaseName} token. Don't include trailing '\'. e.g. \\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\FULL")]
17 | public string? LogFilePath { get; set; }
18 |
19 | [Option("SASToken", Required = false, HelpText = "SASToken for Azure blob. Allows app to query for files in blob container.")]
20 | public string? SASToken { get; set; }
21 |
22 | [Option("ContainerUrl", Required = false, HelpText = "Azure blob container Url")]
23 | public string? ContainerUrl { get; set; }
24 |
25 | [Option("SourceConnectionString", Required = false, HelpText = "Source server connection string for database initialization (using msdb backup history). Or use FullFilePath/DiffFilePath")]
26 | public string? SourceConnectionString { get; set; }
27 |
28 | [Option("MSDBPathFind", Required = false, HelpText = "Use MSDBPathFind/MSDBPathReplace to do a find/replace on the backup paths returned from msdb history. e.g. Convert a local path to a UNC path ")]
29 | public string? MSDBPathFind { get; set; }
30 |
31 | [Option("MSDBPathReplace", Required = false, HelpText = "Use MSDBPathFind/MSDBPathReplace to do a find/replace on the backup paths returned from msdb history. e.g. Convert a local path to a UNC path ")]
32 | public string? MSDBPathReplace { get; set; }
33 |
34 | [Option("FullFilePath", Required = false, HelpText = @"Full backup file path. Used to initialize new databases. Include {DatabaseName} token in the path. Don't include trailing '\'. e.g. \\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\FULL")]
35 | public string? FullFilePath { get; set; }
36 |
37 | [Option("DiffFilePath", Required = false, HelpText = @"Diff backup file path. Use with FullFilePath to initialize new databases. Include {DatabaseName} token in the path. Don't include trailing '\'. e.g. \\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\DIFF")]
38 | public string? DiffFilePath { get; set; }
39 |
40 | [Option("ReadOnlyFilePath", Required = false, HelpText = @"Read only backup file path. Used to initialize new databases for databases with readonly filegroups & partial backups. Include {DatabaseName} token in the path. e.g. \\BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\READONLY")]
41 | public string? ReadOnlyFilePath { get; set; }
42 |
43 | [Option("RecoverPartialBackupWithoutReadOnly", Required = false, HelpText = @"Restore operation using partial backups will continue without readonly filegroups with this set to true.")]
44 | public bool? RecoverPartialBackupWithoutReadOnly { get; set; }
45 |
46 | [Option("PollForNewDatabasesFrequency", Required = false, HelpText = "Frequency in minutes to poll for new databases.")]
47 | public int? PollForNewDatabasesFrequency { get; set; }
48 |
49 | [Option("PollForNewDatabasesCron", Required = false, HelpText = "Cron expression. Frequency to poll for new databases.")]
50 | public string? PollForNewDatabasesCron { get; set; }
51 |
52 | [Option("MaxBackupAgeForInitialization", Required = false, HelpText = "Max age (days) of backups to use for database initialization")]
53 | public int? MaxBackupAgeForInitialization { get; set; }
54 |
55 | [Option("MoveDataFolder", Required = false, HelpText = @"Option to move data files to a new location. e.g. D:\Data")]
56 | public string? MoveDataFolder { get; set; }
57 |
58 | [Option("MoveLogFolder", Required = false, HelpText = @"Option to move log files to a new location. e.g. L:\Log")]
59 | public string? MoveLogFolder { get; set; }
60 |
61 | [Option("MoveFileStreamFolder", Required = false, HelpText = @"Option to move FILESTREAM to a new location. e.g. F:\FileStream")]
62 | public string? MoveFileStreamFolder { get; set; }
63 |
64 | [Option("InitializeSimple", Required = false, HelpText = @"Databases in SIMPLE recovery model are excluded by default.")]
65 | public bool? InitializeSimple { get; set; }
66 |
67 | [Option("IncludedDatabases", Required = false, HelpText = @"List of databases to include. All other databases are excluded. e.g. --IncludedDatabases ""DB1"" ""DB2""")]
68 | public IEnumerable? IncludedDatabases { get; set; }
69 |
70 | [Option("IncludeDatabase", Required = false, HelpText = @"Add a database to the list of included databases. e.g. --IncludeDatabase ""DB1""")]
71 | public string? IncludeDatabase { get; set; }
72 |
73 | [Option("ExcludedDatabases", Required = false, HelpText = @"List of databases to exclude. e.g. --ExcludedDatabases ""DB1"" ""DB2""")]
74 | public IEnumerable? ExcludedDatabases { get; set; }
75 |
76 | [Option("ExcludeDatabase", Required = false, HelpText = @"Add a database to the list of excluded databases. e.g. --ExcludeDatabase ""DB1""")]
77 | public string? ExcludeDatabase { get; set; }
78 |
79 | [Option("OffsetMins", Required = false, HelpText = @"Offset to deal with timezone differences.")]
80 | public int? OffsetMins { get; set; }
81 |
82 | [Option("CheckHeaders", Required = false, HelpText = @"Check headers of backup files before restore.")]
83 | public bool? CheckHeaders { get; set; }
84 |
85 | [Option("RestoreDelayMins", Required = false, HelpText = @"Minimum age of backup file before it is restored. A delay can be useful for recovering from application/user errors.")]
86 | public int? RestoreDelayMins { get; set; }
87 |
88 | [Option("StopAt", Required = false, HelpText = @"Point in time restore to the specified date/time. ")]
89 | public DateTime? StopAt { get; set; }
90 |
91 | [Option("DelayBetweenIterationsMs", Required = false, HelpText = @"Log restore operations will repeat on this schedule. Or use LogRestoreScheduleCron")]
92 | public int? DelayBetweenIterationsMs { get; set; }
93 |
94 | [Option("LogRestoreScheduleCron", Required = false, HelpText = @"Cron expression. Log restore operations will repeat on this schedule. Or use DelayBetweenIterationsMs.")]
95 | public string? LogRestoreScheduleCron { get; set; }
96 |
97 | [Option("MaxThreads", Required = false, HelpText = @"Max number of threads to use for restore operations.")]
98 | public int? MaxThreads { get; set; }
99 |
100 | [Option("Hours", Required = false, HelpText = @"Hours that log restores are allowed to run. Set to -1 to include ALL hours (default). e.g. --Hours 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23")]
101 | public IEnumerable? Hours { get; set; }
102 |
103 | [Option("StandbyFileName", Required = false, HelpText = @"Option to bring database online in STANDBY mode. Set path to file including {DatabaseName} token. e.g. --StandbyFileName ""D:\Standby\{DatabaseName}_Standby.BAK""")]
104 | public string? StandbyFileName { get; set; }
105 |
106 | [Option("KillUserConnections", Required = false, HelpText = @"Kill user connections before restore. For use with STANDBY so open connections don't prevent restore operations. Default: true.")]
107 | public bool? KillUserConnections { get; set; }
108 |
109 | [Option("KillUserConnectionsWithRollbackAfter", Required = false, HelpText = @"'WITH ROLLBACK AFTER' option for killing user connections. Default 60 seconds.")]
110 | public int? KillUserConnectionsWithRollbackAfter { get; set; }
111 |
112 | [Option("MaxProcessingTimeMins", Required = false, HelpText = @"Max time in minutes to spend processing an individual database each iteration.")]
113 | public int? MaxProcessingTimeMins { get; set; }
114 |
115 | [Option("AccessKey", Required = false, HelpText = @"S3 Access Key - use when log shipping from a S3 bucket or leave blank to use instance profile credentials")]
116 | public string? AccessKey { get; set; }
117 |
118 | [Option("SecretKey", Required = false, HelpText = @"S3 Secret Key - use when log shipping from a S3 bucket or leave blank to use instance profile credentials")]
119 | public string? SecretKey { get; set; }
120 |
121 | [Option("Run", Required = false, HelpText = "Run without saving changes to the config")]
122 | public bool Run { get; set; }
123 |
124 | [Option("RestoreDatabaseNamePrefix", Required = false, HelpText = "Change the name of the database on the destination by adding a prefix")]
125 | public string? RestoreDatabaseNamePrefix { get; set; }
126 |
127 | [Option("RestoreDatabaseNameSuffix", Required = false, HelpText = "Change the name of the database on the destination by adding a suffix")]
128 | public string? RestoreDatabaseNameSuffix { get; set; }
129 |
130 | [Option("OldName", Required = false, HelpText = "Option to restore a database with a new name. e.g. --OldName OriginalName --NewName DestinationName. Omit --NewName to remove a mapping. Repeat as required for multiple databases. Has priority over RestoreDatabaseNamePrefix/RestoreDatabaseNameSuffix")]
131 | public string? OldName { get; set; }
132 |
133 | [Option("NewName", Required = false, HelpText = "Option to restore a database with a new name. e.g. --OldName OriginalName --NewName DestinationName. Omit --NewName to remove a mapping. Repeat as required for multiple databases. Has priority over RestoreDatabaseNamePrefix/RestoreDatabaseNameSuffix")]
134 | public string? NewName { get; set; }
135 | }
136 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/Config.cs:
--------------------------------------------------------------------------------
1 | using Cronos;
2 | using Microsoft.Data.SqlClient;
3 | using Microsoft.Extensions.Configuration;
4 | using Newtonsoft.Json;
5 | using Newtonsoft.Json.Linq;
6 | using Serilog;
7 | using System.ComponentModel.DataAnnotations;
8 | using System.Drawing.Drawing2D;
9 | using System.Globalization;
10 | using System.Net.Mime;
11 | using System.Runtime.CompilerServices;
12 | using System.Text;
13 | using CommandLine;
14 | using Microsoft.Extensions.Azure;
15 | using Microsoft.Extensions.Options;
16 | using static System.Collections.Specialized.BitVector32;
17 | using static LogShippingService.FileHandling.FileHandler;
18 |
19 | namespace LogShippingService
20 | {
21 | [JsonObject()]
22 | public class Config
23 | {
24 | #region "Constants"
25 |
26 | private const int DelayBetweenIterationsMsDefault = 60000;
27 | private const int MaxThreadsDefault = 5;
28 | private const int MaxProcessingTimeMinsDefault = 60;
29 | private const int MaxBackupAgeForInitializationDefault = 14;
30 | private const int PollForNewDatabasesFrequencyDefault = 10;
31 | private const int KillUserConnectionsWithRollbackAfterDefault = 60;
32 | private bool _encryptionRequired = false;
33 | private char[] invalidPathChars => Path.GetInvalidPathChars().Concat(new[] { '"' }).ToArray();
34 |
35 | [JsonIgnore]
36 | public bool EncryptionRequired => _encryptionRequired;
37 |
38 | /// Database token to be used
39 | [JsonIgnore]
40 | public const string DatabaseToken = "{DatabaseName}";
41 |
42 | /// Config file path
43 | public static string ConfigFile => System.IO.Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "appsettings.json");
44 |
45 | #endregion "Constants"
46 |
47 | #region "File Handling"
48 |
49 | public enum FileHandlerTypes
50 | {
51 | Disk,
52 | AzureBlob,
53 | S3
54 | }
55 |
56 | [JsonIgnore]
57 | public FileHandlerTypes FileHandlerType
58 | {
59 | get
60 | {
61 | if (LogFilePath != null && LogFilePath.StartsWith("s3://"))
62 | {
63 | return FileHandlerTypes.S3;
64 | }
65 | else if (!string.IsNullOrEmpty(ContainerUrl) && !string.IsNullOrEmpty(SASToken))
66 | {
67 | return FileHandlerTypes.AzureBlob;
68 | }
69 | else
70 | {
71 | return FileHandlerTypes.Disk;
72 | }
73 | }
74 | }
75 |
76 | [JsonIgnore]
77 | public BackupHeader.DeviceTypes DeviceType => FileHandlerType == FileHandlerTypes.Disk ? BackupHeader.DeviceTypes.Disk : BackupHeader.DeviceTypes.Url;
78 |
79 | #endregion "File Handling"
80 |
81 | #region Azure
82 |
83 | /// Container URL to be used when restoring directly from Azure blob containers
84 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
85 | public string? ContainerUrl { get; set; }
86 |
87 | private string? _sasToken;
88 |
89 | /// SAS Token be used to allow access to Azure blob container when restoring directly from Azure blob.
90 | [JsonIgnore]
91 | public string? SASToken
92 | {
93 | get => _sasToken;
94 | set => SetSASToken(value);
95 | }
96 |
97 | private void SetSASToken(string? value)
98 | {
99 | if (string.IsNullOrEmpty(value))
100 | {
101 | _sasToken = null;
102 | }
103 | else if (EncryptionHelper.IsEncrypted(value))
104 | {
105 | try
106 | {
107 | _sasToken = EncryptionHelper.DecryptWithMachineKey(value);
108 | }
109 | catch (Exception ex)
110 | {
111 | Log.Error(ex, "Error decrypting SAS token. If the config file has been copied from another computer,please input a new SAS token from the portal");
112 | }
113 | }
114 | else
115 | {
116 | _sasToken = value.StartsWith("?") ? value : "?" + value;
117 | _encryptionRequired = true;
118 | }
119 | }
120 |
121 | [JsonProperty("SASToken", DefaultValueHandling = DefaultValueHandling.Ignore)]
122 | public string? SASTokenEncrypted
123 | {
124 | get => !string.IsNullOrEmpty(SASToken) ? EncryptionHelper.EncryptWithMachineKey(SASToken) : null;
125 | set => SetSASToken(value);
126 | }
127 |
128 | #endregion Azure
129 |
130 | #region S3
131 |
132 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
133 | public string? AccessKey { get; set; }
134 |
135 | [JsonIgnore]
136 | public string? SecretKey
137 | {
138 | get => _secretKey;
139 | set => SetSecretKey(value);
140 | }
141 |
142 | private string? _secretKey;
143 |
144 | [JsonProperty("SecretKey", DefaultValueHandling = DefaultValueHandling.Ignore)]
145 | public string? SecretKeyEncrypted
146 | {
147 | get => !string.IsNullOrEmpty(SecretKey) ? EncryptionHelper.EncryptWithMachineKey(SecretKey) : null;
148 | set => SetSecretKey(value);
149 | }
150 |
151 | private void SetSecretKey(string? value)
152 | {
153 | if (string.IsNullOrEmpty(value))
154 | {
155 | _secretKey = null;
156 | }
157 | else if (EncryptionHelper.IsEncrypted(value))
158 | {
159 | _secretKey = EncryptionHelper.DecryptWithMachineKey(value);
160 | }
161 | else
162 | {
163 | _secretKey = value;
164 | _encryptionRequired = true;
165 | }
166 | }
167 |
168 | #endregion S3
169 |
170 | #region BasicConfig
171 |
172 | private string _logFilePath = string.Empty;
173 |
174 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
175 | public string Destination { get; set; } = string.Empty;
176 |
177 | [JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
178 | public string? LogFilePath
179 | {
180 | get => _logFilePath;
181 | set
182 | {
183 | if (!string.IsNullOrEmpty(value) && !value.Contains(DatabaseToken))
184 | {
185 | throw new ArgumentException($"Missing {DatabaseToken} token from LogFilePath");
186 | }
187 |
188 | if (value != null && value.IndexOfAny(invalidPathChars) >= 0)
189 | {
190 | throw new ArgumentException($"LogFilePath contains invalid characters: {value}");
191 | }
192 | _logFilePath = value ?? string.Empty;
193 | }
194 | }
195 |
196 | #endregion BasicConfig
197 |
198 | #region Schedule
199 |
200 | /// Delay between processing log restores in milliseconds
201 | public int DelayBetweenIterationsMs { get; set; } = DelayBetweenIterationsMsDefault;
202 |
203 | /// Cron schedule for log restores. Overrides DelayBetweenIterationsMs if specified
204 | [JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
205 | public string? LogRestoreScheduleCron
206 | {
207 | get => LogRestoreCron?.ToString();
208 | set => LogRestoreCron = value != null ? CronExpression.Parse(value) : null;
209 | }
210 |
211 | /// Return if cron schedule should be used for log restores
212 | [JsonIgnore]
213 | public bool UseLogRestoreScheduleCron => LogRestoreCron != null;
214 |
215 | /// Cron expression for generating next log restore time
216 | [JsonIgnore]
217 | public CronExpression? LogRestoreCron;
218 |
219 | /// Timezone offset to handle timezone differences if needed
220 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
221 | public int OffsetMins { get; set; }
222 |
223 | /// Maximum amount of time to spend processing log restores for a single database in minutes.
224 | public int MaxProcessingTimeMins { get; set; } = MaxProcessingTimeMinsDefault;
225 |
226 | private HashSet _hours = new();
227 |
228 | /// Hours where log restores will run. Default is all hours. 0..23
229 | public HashSet Hours
230 | {
231 | get => _hours;
232 | set
233 | {
234 | if (value is { Count: > 24 })
235 | {
236 | throw new ArgumentException("Too many arguments specified for Hours");
237 | }
238 | else if (value is { Count: 1 } && value.First() == -1)
239 | {
240 | _hours = DefaultHours;
241 | }
242 | else if (value != null && value.Any(h => h is < 0 or > 23))
243 | {
244 | throw new ArgumentException("Hours should have the values 0-23");
245 | }
246 | else
247 | {
248 | _hours = value ?? DefaultHours;
249 | }
250 | }
251 | }
252 |
253 | public static readonly HashSet DefaultHours = new()
254 | {
255 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
256 | 14, 15, 16, 17, 18, 19, 20, 21, 22, 23
257 | };
258 |
259 | public int PollForNewDatabasesFrequency { get; set; } = PollForNewDatabasesFrequencyDefault;
260 |
261 | /// Cron schedule for initializing new databases. Overrides PollForNewDatabasesFrequency if specified
262 | [JsonProperty(NullValueHandling = NullValueHandling.Ignore)]
263 | public string? PollForNewDatabasesCron
264 | {
265 | get => PollForNewDatabasesCronExpression?.ToString();
266 | set => PollForNewDatabasesCronExpression = value != null ? CronExpression.Parse(value) : null;
267 | }
268 |
269 | /// Cron expression for generating next database initialization time
270 | [JsonIgnore]
271 | public CronExpression? PollForNewDatabasesCronExpression;
272 |
273 | /// Return if cron schedule should be used for database initialization
274 | [JsonIgnore]
275 | public bool UsePollForNewDatabasesCron => PollForNewDatabasesCronExpression != null;
276 |
277 | #endregion Schedule
278 |
279 | #region Standby
280 |
281 | private string? _standbyFileName;
282 |
283 | /// Path to standby file which should contain {DatabaseName} token to be replaced with database name. If null, standby will not be used.
284 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
285 | public string? StandbyFileName
286 | {
287 | get => _standbyFileName;
288 | set
289 | {
290 | if (!string.IsNullOrEmpty(value) && !value.Contains(DatabaseToken))
291 | {
292 | throw new ArgumentException($"Missing {DatabaseToken} token from StandbyFileName");
293 | }
294 | _standbyFileName = value;
295 | }
296 | }
297 |
298 | /// Kill user connections to the databases to allow restores to proceed
299 | public bool KillUserConnections { get; set; } = true;
300 |
301 | /// Killed user connections will be rolled back after the specified number of seconds. Defaults to 60 seconds.
302 | public int KillUserConnectionsWithRollbackAfter { get; set; } = KillUserConnectionsWithRollbackAfterDefault;
303 |
304 | #endregion Standby
305 |
306 | #region Initialization
307 |
308 | private string? _fullFilePath;
309 | private string? _diffFilePath;
310 | private string? _readOnlyFilePath;
311 |
312 | /// Full backup path for initialization of new databases. If null, initialization from disk will not be performed. e.g. \BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\FULL
313 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
314 | public string? FullFilePath
315 | {
316 | get => _fullFilePath;
317 | set
318 | {
319 | if (!string.IsNullOrEmpty(value) && !value.Contains(DatabaseToken))
320 | {
321 | throw new ArgumentException($"Missing {DatabaseToken} token from FullFilePath");
322 | }
323 | // Check if path contains invalid characters
324 | if (value != null && value.IndexOfAny(invalidPathChars) >= 0)
325 | {
326 | throw new ArgumentException($"FullFilePath contains invalid characters: {value}");
327 | }
328 | _fullFilePath = value;
329 | }
330 | }
331 |
332 | /// Diff backup path for initialization of new databases. If null, initialization will not use diff backups. e.g. \BACKUPSERVER\Backups\SERVERNAME\{DatabaseName}\DIFF
333 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
334 | public string? DiffFilePath
335 | {
336 | get => _diffFilePath;
337 | set
338 | {
339 | if (!string.IsNullOrEmpty(value) && !value.Contains(DatabaseToken))
340 | {
341 | throw new ArgumentException($"Missing {DatabaseToken} token from DiffFilePath");
342 | }
343 | if (value != null && value.IndexOfAny(invalidPathChars) >= 0)
344 | {
345 | throw new ArgumentException($"DiffFilePath contains invalid characters: {value}");
346 | }
347 | _diffFilePath = value;
348 | }
349 | }
350 |
351 | /// List of databases to include in log shipping. If empty, all databases will be included.
352 | public HashSet IncludedDatabases { get; set; } = new();
353 |
354 | /// List of databases to exclude from log shipping. If empty, all databases will be included.
355 | public HashSet ExcludedDatabases { get; set; } = new();
356 |
357 | /// Source connection string for initialization of new databases from msdb. Overrides FullFilePath and DiffFilePath if specified.
358 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
359 | public string? SourceConnectionString { get; set; }
360 |
361 | /// Option to initialize databases using simple recovery model. These databases can't be used for log shipping but we might want to restore in case of disaster recovery.
362 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
363 | public bool InitializeSimple { get; set; }
364 |
365 | /// Max age of backups to use for initialization in days. Defaults to 14 days. Prevents old backups been used to initialize.
366 | public int MaxBackupAgeForInitialization { get; set; } = MaxBackupAgeForInitializationDefault;
367 |
368 | /// Path to move data files to after initialization. If null, files will be restored to their original location
369 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
370 | public string? MoveDataFolder { get; set; }
371 |
372 | /// Path to move log files to after initialization. If null, files will be restored to their original location
373 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
374 | public string? MoveLogFolder { get; set; }
375 |
376 | /// Path to move filestream folders to after initialization. If null, folders will be restored to their original location
377 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
378 | public string? MoveFileStreamFolder { get; set; }
379 |
380 | /// ReadOnly partial backup path for initialization of new databases.
381 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
382 | public string? ReadOnlyFilePath
383 | {
384 | get => _readOnlyFilePath;
385 | set
386 | {
387 | if (!string.IsNullOrEmpty(value) && !value.Contains(DatabaseToken))
388 | {
389 | throw new ArgumentException($"Missing {DatabaseToken} from ReadOnlyFilePath");
390 | }
391 | _readOnlyFilePath = value;
392 | }
393 | }
394 |
395 | /// Option to recover partial backups without readonly
396 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
397 | public bool RecoverPartialBackupWithoutReadOnly { get; set; }
398 |
399 | /// Find part of find/replace for backup paths from msdb history. e.g. Convert local paths to UNC paths
400 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
401 | public string? MSDBPathFind { get; set; }
402 |
403 | /// Replace part of find/replace for backup paths from msdb history. e.g. Convert local paths to UNC paths
404 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
405 | public string? MSDBPathReplace { get; set; }
406 |
407 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
408 | public string? RestoreDatabaseNamePrefix { get; set; }
409 |
410 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
411 | public string? RestoreDatabaseNameSuffix { get; set; }
412 |
413 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
414 | public Dictionary? DatabaseNameMapping { get; set; }
415 |
416 | [JsonIgnore]
417 | public Dictionary SourceToDestinationMapping => _sourceToDestinationMapping ??= DatabaseNameMapping?.ToDictionary(kvp => kvp.Key.ToLower(), kvp => kvp.Value) ?? new();
418 |
419 | private Dictionary? _destinationToSourceMapping;
420 | private Dictionary? _sourceToDestinationMapping;
421 |
422 | [JsonIgnore]
423 | public Dictionary DestinationToSourceMapping => _destinationToSourceMapping ??= DatabaseNameMapping?.ToDictionary(kvp => kvp.Value.ToLower(), kvp => kvp.Key) ?? new();
424 |
425 | #endregion Initialization
426 |
427 | #region OtherOptions
428 |
429 | /// Option to check headers. Defaults to true
430 | public bool CheckHeaders { get; set; } = true;
431 |
432 | /// Max number of threads to use for log restores and database initialization (each can use up to MaxThreads)
433 | public int MaxThreads { get; set; } = MaxThreadsDefault;
434 |
435 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
436 | public int RestoreDelayMins { get; set; }
437 |
438 | [JsonProperty(DefaultValueHandling = DefaultValueHandling.Ignore)]
439 | public DateTime StopAt { get; set; }
440 |
441 | #endregion OtherOptions
442 |
443 | #region Serialization
444 |
445 | public bool ShouldSerializeDelayBetweenIterationsMs()
446 | {
447 | return DelayBetweenIterationsMs != DelayBetweenIterationsMsDefault;
448 | }
449 |
450 | public bool ShouldSerializeCheckHeaders()
451 | {
452 | return CheckHeaders == false;
453 | }
454 |
455 | public bool ShouldSerializeIncludedDatabases()
456 | {
457 | return IncludedDatabases.Count != 0;
458 | }
459 |
460 | public bool ShouldSerializeExcludedDatabases()
461 | {
462 | return ExcludedDatabases.Count != 0;
463 | }
464 |
465 | public bool ShouldSerializeKillUserConnections()
466 | {
467 | return KillUserConnections == false;
468 | }
469 |
470 | public bool ShouldSerializeHours()
471 | {
472 | return Hours != DefaultHours && Hours.Count > 0;
473 | }
474 |
475 | public bool ShouldSerializeMaxThreads()
476 | {
477 | return MaxThreads != MaxThreadsDefault;
478 | }
479 |
480 | public bool ShouldSerializeMaxProcessingTimeMins()
481 | {
482 | return MaxProcessingTimeMins != MaxProcessingTimeMinsDefault;
483 | }
484 |
485 | public bool ShouldSerializeMaxBackupAgeForInitialization()
486 | {
487 | return MaxBackupAgeForInitialization != MaxBackupAgeForInitializationDefault;
488 | }
489 |
490 | public bool ShouldSerializePollForNewDatabasesFrequency()
491 | {
492 | return PollForNewDatabasesFrequency != PollForNewDatabasesFrequencyDefault;
493 | }
494 |
495 | public bool ShouldSerializeKillUserConnectionsWithRollbackAfter()
496 | {
497 | return KillUserConnectionsWithRollbackAfter != KillUserConnectionsWithRollbackAfterDefault;
498 | }
499 |
500 | #endregion Serialization
501 |
502 | public void ValidateConfig()
503 | {
504 | if (!string.IsNullOrEmpty(ContainerUrl) && string.IsNullOrEmpty(SASToken) && ContainerUrl.StartsWith("https://", StringComparison.InvariantCultureIgnoreCase))
505 | {
506 | var message = "SASToken is required with ContainerUrl";
507 | Log.Error(message);
508 | throw new ArgumentException(message);
509 | }
510 | if (string.IsNullOrEmpty(Destination))
511 | {
512 | throw new ValidationException("Destination connection string should be configured");
513 | }
514 |
515 | if (string.IsNullOrEmpty(LogFilePath))
516 | {
517 | throw new ValidationException("LogFilePath should be configured");
518 | }
519 | if (AppConfig.Config.EncryptionRequired)
520 | {
521 | Log.Information("Saving config with encryption.");
522 | AppConfig.Config.Save();
523 | }
524 | if (_hours.Count == 0)
525 | {
526 | _hours = DefaultHours;
527 | }
528 | }
529 |
530 | #region CommandLine
531 |
532 | public bool ApplyCommandLineOptions(string[] args)
533 | {
534 | var serviceArgs = new[] { "-displayname", "-servicename" };
535 | if (args.Length == 0) return false;
536 | // We might have -displayname or -servicename as arguments when running as a service. In this case we don't want to process the command line arguments
537 | if (args.Any(arg => serviceArgs.Contains(arg, StringComparer.OrdinalIgnoreCase)))
538 | {
539 | return false;
540 | }
541 | var cfg = AppConfig.Config;
542 |
543 | var errorCount = 0;
544 | var run = false;
545 | var result = Parser.Default.ParseArguments(args)
546 | .WithParsed(opts =>
547 | {
548 | try
549 | {
550 | if (opts.Destination != null)
551 | {
552 | Destination = opts.Destination;
553 | }
554 |
555 | if (opts.LogFilePath != null)
556 | {
557 | LogFilePath = opts.LogFilePath;
558 | }
559 |
560 | if (opts.SASToken != null)
561 | {
562 | SASToken = opts.SASToken;
563 | }
564 |
565 | if (opts.SourceConnectionString != null)
566 | {
567 | SourceConnectionString = opts.SourceConnectionString;
568 | }
569 |
570 | if (opts.MSDBPathFind != null)
571 | {
572 | MSDBPathFind = opts.MSDBPathFind;
573 | }
574 |
575 | if (opts.MSDBPathReplace != null)
576 | {
577 | MSDBPathReplace = opts.MSDBPathReplace;
578 | }
579 |
580 | if (opts.FullFilePath != null)
581 | {
582 | FullFilePath = opts.FullFilePath;
583 | }
584 |
585 | if (opts.DiffFilePath != null)
586 | {
587 | DiffFilePath = opts.DiffFilePath;
588 | }
589 |
590 | if (opts.ReadOnlyFilePath != null)
591 | {
592 | ReadOnlyFilePath = opts.ReadOnlyFilePath;
593 | }
594 |
595 | if (opts.RecoverPartialBackupWithoutReadOnly != null)
596 | {
597 | RecoverPartialBackupWithoutReadOnly = (bool)opts.RecoverPartialBackupWithoutReadOnly;
598 | }
599 |
600 | if (opts.PollForNewDatabasesFrequency != null)
601 | {
602 | PollForNewDatabasesFrequency = (int)opts.PollForNewDatabasesFrequency;
603 | }
604 |
605 | if (opts.MaxBackupAgeForInitialization != null)
606 | {
607 | MaxBackupAgeForInitialization = (int)opts.MaxBackupAgeForInitialization;
608 | }
609 |
610 | if (opts.MoveDataFolder != null)
611 | {
612 | MoveDataFolder = opts.MoveDataFolder;
613 | }
614 |
615 | if (opts.MoveLogFolder != null)
616 | {
617 | MoveLogFolder = opts.MoveLogFolder;
618 | }
619 |
620 | if (opts.MoveFileStreamFolder != null)
621 | {
622 | MoveFileStreamFolder = opts.MoveFileStreamFolder;
623 | }
624 |
625 | if (opts.InitializeSimple != null)
626 | {
627 | InitializeSimple = (bool)opts.InitializeSimple;
628 | }
629 |
630 | if (opts.IncludedDatabases != null && args.Contains("--IncludedDatabases"))
631 | {
632 | IncludedDatabases = opts.IncludedDatabases.Where(dbs => !string.IsNullOrEmpty(dbs))
633 | .ToHashSet();
634 | }
635 |
636 | if (opts.ExcludedDatabases != null && args.Contains("--ExcludedDatabases"))
637 | {
638 | ExcludedDatabases = opts.ExcludedDatabases.Where(dbs => !string.IsNullOrEmpty(dbs))
639 | .ToHashSet();
640 | }
641 |
642 | if (opts.OffsetMins != null)
643 | {
644 | OffsetMins = (int)opts.OffsetMins;
645 | }
646 |
647 | if (opts.CheckHeaders != null)
648 | {
649 | CheckHeaders = (bool)opts.CheckHeaders;
650 | }
651 |
652 | if (opts.RestoreDelayMins != null)
653 | {
654 | RestoreDelayMins = (int)opts.RestoreDelayMins;
655 | }
656 |
657 | if (opts.StopAt != null)
658 | {
659 | StopAt = (DateTime)opts.StopAt;
660 | }
661 |
662 | if (opts.LogRestoreScheduleCron != null)
663 | {
664 | try
665 | {
666 | LogRestoreScheduleCron = opts.LogRestoreScheduleCron;
667 | }
668 | catch (Exception ex)
669 | {
670 | Log.Error(ex, "Error parsing LogRestoreScheduleCron");
671 | errorCount++;
672 | }
673 |
674 | DelayBetweenIterationsMs = DelayBetweenIterationsMsDefault;
675 | }
676 |
677 | if (opts.DelayBetweenIterationsMs != null)
678 | {
679 | DelayBetweenIterationsMs = (int)opts.DelayBetweenIterationsMs;
680 | LogRestoreScheduleCron = null;
681 | }
682 |
683 | if (opts.MaxThreads != null)
684 | {
685 | MaxThreads = (int)opts.MaxThreads;
686 | }
687 |
688 | if (opts.ContainerUrl != null)
689 | {
690 | ContainerUrl = opts.ContainerUrl;
691 | }
692 |
693 | if (opts.PollForNewDatabasesCron != null)
694 | {
695 | try
696 | {
697 | PollForNewDatabasesCron = opts.PollForNewDatabasesCron;
698 | }
699 | catch (Exception ex)
700 | {
701 | Log.Error(ex, "Error parsing PollForNewDatabasesCron");
702 | errorCount++;
703 | }
704 | }
705 |
706 | if (opts.IncludeDatabase != null)
707 | {
708 | IncludedDatabases.Add(opts.IncludeDatabase);
709 | }
710 |
711 | if (opts.ExcludeDatabase != null)
712 | {
713 | ExcludedDatabases.Add(opts.ExcludeDatabase);
714 | }
715 |
716 | if (opts.Hours != null && opts.Hours.Any())
717 | {
718 | Hours = opts.Hours.ToHashSet();
719 | }
720 |
721 | if (opts.StandbyFileName != null)
722 | {
723 | StandbyFileName = opts.StandbyFileName;
724 | }
725 |
726 | if (opts.KillUserConnections != null)
727 | {
728 | KillUserConnections = (bool)opts.KillUserConnections;
729 | }
730 |
731 | if (opts.KillUserConnectionsWithRollbackAfter != null)
732 | {
733 | KillUserConnectionsWithRollbackAfter = (int)opts.KillUserConnectionsWithRollbackAfter;
734 | }
735 |
736 | if (opts.MaxProcessingTimeMins != null)
737 | {
738 | MaxProcessingTimeMins = (int)opts.MaxProcessingTimeMins;
739 | }
740 |
741 | if (opts.AccessKey != null)
742 | {
743 | AccessKey = opts.AccessKey;
744 | }
745 |
746 | if (opts.SecretKey != null)
747 | {
748 | SecretKey = opts.SecretKey;
749 | }
750 |
751 | if (opts.RestoreDatabaseNamePrefix != null)
752 | {
753 | RestoreDatabaseNamePrefix = opts.RestoreDatabaseNamePrefix == string.Empty ? null : opts.RestoreDatabaseNamePrefix;
754 | }
755 |
756 | if (opts.RestoreDatabaseNameSuffix != null)
757 | {
758 | RestoreDatabaseNameSuffix = opts.RestoreDatabaseNameSuffix == string.Empty ? null : opts.RestoreDatabaseNameSuffix;
759 | }
760 | if (opts.OldName != null)
761 | {
762 | DatabaseNameMapping ??= new Dictionary();
763 |
764 | // Remove existing key with a case insensitive comparison
765 | DatabaseNameMapping = DatabaseNameMapping.Where(kvp => !string.Equals(kvp.Key, opts.OldName, StringComparison.OrdinalIgnoreCase)).ToDictionary();
766 |
767 | if (opts.NewName != null)
768 | {
769 | // Check if value already exists with a case insensitive comparison
770 | if (DatabaseNameMapping.Any(kvp => string.Equals(kvp.Value, opts.NewName, StringComparison.OrdinalIgnoreCase)))
771 | {
772 | Log.Error("A mapping already exists for {NewName}", opts.NewName);
773 | Console.WriteLine(JsonConvert.SerializeObject(DatabaseNameMapping, Formatting.Indented));
774 | errorCount++;
775 | }
776 | else
777 | {
778 | Log.Information("Database {OldName} will be restored as {NewName}",
779 | opts.OldName, opts.NewName);
780 | DatabaseNameMapping.Add(opts.OldName, opts.NewName);
781 | }
782 | }
783 | else
784 | {
785 | Log.Information("DatabaseNameMapping removed for {OldName}", opts.OldName);
786 | }
787 | if (DatabaseNameMapping.Count == 0)
788 | {
789 | DatabaseNameMapping = null;
790 | }
791 | }
792 | else if (opts.NewName != null)
793 | {
794 | Log.Error("NewName specified without OldName");
795 | errorCount++;
796 | }
797 | run = opts.Run;
798 | }
799 | catch (Exception ex)
800 | {
801 | Log.Error(ex, "Error parsing command line options");
802 | errorCount++;
803 | }
804 | }
805 | );
806 |
807 | if (run && errorCount == 0 && result.Tag == ParserResultType.Parsed)
808 | {
809 | Log.Information("Running without saving changes to the config.");
810 | return true;
811 | }
812 | else if (errorCount == 0 && result.Tag == ParserResultType.Parsed)
813 | {
814 | Save();
815 | Console.WriteLine("Configuration updated:");
816 | Console.WriteLine(File.ReadAllText(ConfigFile));
817 | Log.Information("Configuration updated. Restart the service.");
818 | Environment.Exit(0);
819 | }
820 | else if (result.Errors.Any(ex => ex is HelpRequestedError))
821 | {
822 | if (File.Exists(ConfigFile))
823 | {
824 | Console.WriteLine("Current Config:");
825 | Console.WriteLine(File.ReadAllText(ConfigFile));
826 | }
827 | else
828 | {
829 | Console.WriteLine("No config file found. Use the command line options to configure the service.");
830 | }
831 |
832 | Environment.Exit(0);
833 | }
834 | else if (result.Errors.Any(ex => ex is VersionRequestedError))
835 | {
836 | Environment.Exit(0);
837 | }
838 | else
839 | {
840 | Log.Error("Configuration not updated. Please check the command line options.{args}", args);
841 | Environment.Exit(1);
842 | }
843 |
844 | return true;
845 | }
846 |
847 | #endregion CommandLine
848 |
849 | public void Save()
850 | {
851 | // Read the existing appsettings.json content
852 | var configFileContent = File.Exists(ConfigFile) ? File.ReadAllText(ConfigFile) : "{}";
853 | var configJson = JObject.Parse(configFileContent);
854 |
855 | // Serialize the current instance of Config to JSON
856 | var serializedConfig = JsonConvert.SerializeObject(this, Formatting.Indented);
857 |
858 | // Parse the serialized config as a JObject
859 | var updatedConfigSection = JObject.Parse(serializedConfig);
860 |
861 | // Directly set the "Config" section to the new configuration
862 | // This replaces the entire "Config" section with your updated configuration
863 | configJson["Config"] = updatedConfigSection;
864 |
865 | // Write the updated JSON back to the appsettings.json file
866 | File.WriteAllText(ConfigFile, configJson.ToString(Formatting.Indented));
867 | }
868 |
869 | public override string ToString()
870 | {
871 | var properties = GetType().GetProperties().Where(p => !Attribute.IsDefined(p, typeof(JsonIgnoreAttribute))).OrderBy(p => p.Name);
872 | var stringBuilder = new StringBuilder();
873 | foreach (var property in properties)
874 | {
875 | var value = property.GetValue(this);
876 | if (value is HashSet hashSet)
877 | {
878 | stringBuilder.AppendLine($"{property.Name}: {string.Join(", ", hashSet)}");
879 | }
880 | else if (value is HashSet stringHashSet)
881 | {
882 | stringBuilder.AppendLine($"{property.Name}: {string.Join(", ", stringHashSet)}");
883 | }
884 | else
885 | {
886 | stringBuilder.AppendLine($"{property.Name}: {value}");
887 | }
888 | }
889 | return stringBuilder.ToString();
890 | }
891 | }
892 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/DataHelper.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.Data.SqlClient;
2 | using SerilogTimings;
3 | using System.Data;
4 | using System.Numerics;
5 | using System.Text;
6 |
7 | namespace LogShippingService
8 | {
9 | public class DataHelper
10 | {
11 | private static Config Config => AppConfig.Config;
12 |
13 | public static DataTable GetDataTable(string sql, string connectionString)
14 | {
15 | using var cn = new SqlConnection(connectionString);
16 | using var cmd = new SqlCommand(sql, cn) { CommandTimeout = 0 };
17 | using var da = new SqlDataAdapter(cmd);
18 | var dt = new DataTable();
19 | da.Fill(dt);
20 | return dt;
21 | }
22 |
23 | public static void Execute(string sql, string connectionString)
24 | {
25 | using var cn = new SqlConnection(connectionString);
26 | using var cmd = new SqlCommand(sql, cn) { CommandTimeout = 0 };
27 | cn.Open();
28 | cmd.ExecuteNonQuery();
29 | }
30 |
31 | public static void ExecuteWithTiming(string sql, string connectionString)
32 | {
33 | using (var op = Operation.Begin(sql))
34 | {
35 | Execute(sql, connectionString);
36 | op.Complete();
37 | }
38 | }
39 |
40 | public static string GetHeaderOnlyScript(List files, BackupHeader.DeviceTypes type)
41 | {
42 | var from = GetFromDisk(files, type);
43 | if (string.IsNullOrEmpty(from)) { return string.Empty; }
44 | StringBuilder builder = new();
45 | builder.AppendLine($"RESTORE HEADERONLY ");
46 | builder.AppendLine(from);
47 | return builder.ToString();
48 | }
49 |
50 | public static string GetFileListOnlyScript(List files, BackupHeader.DeviceTypes type)
51 | {
52 | var from = GetFromDisk(files, type);
53 | if (string.IsNullOrEmpty(from)) { return string.Empty; }
54 | StringBuilder builder = new();
55 | builder.AppendLine($"RESTORE FILELISTONLY ");
56 | builder.AppendLine(from);
57 | return builder.ToString();
58 | }
59 |
60 | public static string GetRestoreDbScript(List files, string db, BackupHeader.DeviceTypes type,
61 | bool withThrowErrorIfExists, Dictionary? fileMoves = null)
62 | {
63 | var from = GetFromDisk(files, type);
64 | if (string.IsNullOrEmpty(from)) { return string.Empty; }
65 | StringBuilder builder = new();
66 | if (withThrowErrorIfExists)
67 | {
68 | builder.AppendLine($"IF DB_ID(" + db.SqlSingleQuote() + ") IS NOT NULL");
69 | builder.AppendLine("BEGIN");
70 | builder.AppendLine("\tRAISERROR('Database already exists',11,1)");
71 | builder.AppendLine("\tRETURN");
72 | builder.AppendLine("END");
73 | builder.AppendLine();
74 | }
75 | builder.AppendLine($"RESTORE DATABASE {db.SqlQuote()} ");
76 | builder.AppendLine(from);
77 | builder.AppendLine("WITH NORECOVERY");
78 | if (fileMoves is { Count: > 0 })
79 | {
80 | foreach (var fileMove in fileMoves)
81 | {
82 | builder.AppendLine(",MOVE " + fileMove.Key.SqlSingleQuote() + " TO " + fileMove.Value.SqlSingleQuote());
83 | }
84 | }
85 | return builder.ToString();
86 | }
87 |
88 | public static Dictionary GetFileMoves(List files, BackupHeader.DeviceTypes type, string connectionString, string? dataFolder, string? logFolder, string? fileStreamFolder, string sourceDb, string targetDb)
89 | {
90 | Dictionary fileMoves = new();
91 | // Check if we need to move the files
92 | if (string.IsNullOrEmpty(dataFolder) && string.IsNullOrEmpty(logFolder) && string.IsNullOrEmpty(fileStreamFolder) && string.Equals(sourceDb, targetDb, StringComparison.OrdinalIgnoreCase)) { return fileMoves; }
93 | var list = BackupFileListRow.GetFileList(files, connectionString, type);
94 | foreach (var file in list)
95 | {
96 | var fileName = file.FileName;
97 | var movePath = file.Type switch
98 | {
99 | 'L' => logFolder,
100 | 'S' => fileStreamFolder,
101 | _ => dataFolder
102 | };
103 | // Create a new filename if the target database is different from the source database name. This should avoid filename conflicts
104 | if (!string.Equals(sourceDb, targetDb, StringComparison.OrdinalIgnoreCase))
105 | {
106 | fileName = (targetDb + "_" + file.LogicalName + Path.GetExtension(file.PhysicalName)).RemoveInvalidFileNameChars();
107 | }
108 | // Set movePath to the source folder if we don't have a location specified
109 | if (string.IsNullOrEmpty(movePath))
110 | {
111 | movePath = Path.GetDirectoryName(file.PhysicalName) ?? throw new InvalidOperationException();
112 | }
113 | fileMoves.Add(file.LogicalName, Path.Combine(movePath, fileName));
114 | }
115 | return fileMoves;
116 | }
117 |
118 | public static Dictionary GetFileMoves(List files, BackupHeader.DeviceTypes type, string sourceDb, string targetDb)
119 | {
120 | return GetFileMoves(files, type, Config.Destination, Config.MoveDataFolder, Config.MoveLogFolder,
121 | Config.MoveFileStreamFolder, sourceDb, targetDb);
122 | }
123 |
124 | public static string GetFromDisk(List files, BackupHeader.DeviceTypes type)
125 | {
126 | StringBuilder builder = new();
127 | var i = 0;
128 | builder.AppendLine("FROM");
129 | foreach (var file in files)
130 | {
131 | if (i > 0)
132 | {
133 | builder.AppendLine(",");
134 | }
135 | switch (type)
136 | {
137 | case BackupHeader.DeviceTypes.Disk:
138 | builder.Append($"DISK = N{file.SqlSingleQuote()}");
139 | break;
140 |
141 | case BackupHeader.DeviceTypes.Url:
142 | builder.Append($"URL = N{file.SqlSingleQuote()}");
143 | break;
144 |
145 | default:
146 | throw new ArgumentException("Invalid DeviceType");
147 | }
148 |
149 | i++;
150 | }
151 | return builder.ToString();
152 | }
153 |
154 | public static BigInteger GetRedoStartLSNForDB(string db, string connectionString)
155 | {
156 | using var cn = new SqlConnection(connectionString);
157 | using var cmd = new SqlCommand(SqlStrings.GetRedoStartLSN, cn) { CommandTimeout = 0 };
158 | cmd.Parameters.AddWithValue("@db", db);
159 | cn.Open();
160 | return BigInteger.Parse(cmd.ExecuteScalar().ToString() ?? throw new InvalidOperationException());
161 | }
162 | }
163 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/DatabaseInfo.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.Data.SqlClient;
2 |
3 | namespace LogShippingService
4 | {
5 | public class DatabaseInfo
6 | {
7 | public string Name { get; set; } = null!;
8 | public short RecoveryModel { get; set; }
9 | public short State { get; set; }
10 | public bool IsInStandby { get; set; }
11 |
12 | public static List GetDatabaseInfo(string connectionString)
13 | {
14 | List databaseInfos = new();
15 |
16 | using var cn = new SqlConnection(connectionString);
17 |
18 | cn.Open();
19 |
20 | using SqlCommand command = new(SqlStrings.GetUserDatabases, cn);
21 |
22 | using var reader = command.ExecuteReader();
23 |
24 | while (reader.Read())
25 | {
26 | DatabaseInfo info = new()
27 | {
28 | Name = reader.GetString(0),
29 | RecoveryModel = reader.GetByte(1),
30 | State = reader.GetByte(2),
31 | IsInStandby = reader.GetBoolean(3)
32 | };
33 |
34 | // Add the object to the list.
35 | databaseInfos.Add(info);
36 | }
37 |
38 | // Return the list.
39 | return databaseInfos;
40 | }
41 | }
42 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/DatabaseInitializerBase.cs:
--------------------------------------------------------------------------------
1 | using Serilog;
2 | using static LogShippingService.BackupHeader;
3 | using System.Reflection.PortableExecutable;
4 | using LogShippingService.FileHandling;
5 | using SerilogTimings;
6 |
7 | namespace LogShippingService
8 | {
9 | public abstract class DatabaseInitializerBase
10 | {
11 | protected abstract void PollForNewDBs(CancellationToken stoppingToken);
12 |
13 | protected abstract void DoProcessDB(string sourceDb, string targetDb);
14 |
15 | private static Config Config => AppConfig.Config;
16 |
17 | protected void ProcessDB(string sourceDb, CancellationToken stoppingToken)
18 | {
19 | var targetDb = GetDestinationDatabaseName(sourceDb);
20 | if (!IsValidForInitialization(sourceDb, targetDb)) return;
21 | if (stoppingToken.IsCancellationRequested) return;
22 | try
23 | {
24 | if (LogShipping.InitializingDBs.TryAdd(sourceDb.ToLower(), sourceDb)) // To prevent log restores until initialization is complete
25 | {
26 | DoProcessDB(sourceDb, targetDb);
27 | }
28 | else
29 | {
30 | Log.Error("{sourceDb} is already initializing", sourceDb);
31 | }
32 | }
33 | catch (Exception ex)
34 | {
35 | Log.Error(ex, "Error initializing new database from backup {sourceDb}", sourceDb);
36 | }
37 | finally
38 | {
39 | LogShipping.InitializingDBs.TryRemove(sourceDb.ToLower(), out _); // Log restores can start after restore operations have completed
40 | }
41 | }
42 |
43 | protected List? DestinationDBs;
44 |
45 | public bool IsStopped { get; private set; }
46 |
47 | public abstract bool IsValidated { get; }
48 |
49 | public bool IsValidForInitialization(string sourceDb, string targetDb)
50 | {
51 | if (DestinationDBs == null || DestinationDBs.Exists(d => string.Equals(d.Name, targetDb, StringComparison.CurrentCultureIgnoreCase))) return false;
52 | var systemDbs = new[] { "master", "model", "msdb" };
53 | if (systemDbs.Any(s => s.Equals(sourceDb, StringComparison.OrdinalIgnoreCase))) return false;
54 | if (systemDbs.Any(s => s.Equals(targetDb, StringComparison.OrdinalIgnoreCase))) return false;
55 | return LogShipping.IsIncludedDatabase(sourceDb) || LogShipping.IsIncludedDatabase(targetDb);
56 | }
57 |
58 | public async Task RunPollForNewDBs(CancellationToken stoppingToken)
59 | {
60 | if (!IsValidated)
61 | {
62 | IsStopped = true;
63 | return;
64 | }
65 |
66 | long i = 0;
67 | while (!stoppingToken.IsCancellationRequested)
68 | {
69 | await WaitForNextInitialization(i, stoppingToken);
70 | i++;
71 | if (stoppingToken.IsCancellationRequested) return;
72 | try
73 | {
74 | DestinationDBs = DatabaseInfo.GetDatabaseInfo(Config.Destination);
75 | }
76 | catch (Exception ex)
77 | {
78 | Log.Error(ex, "Error getting destination databases.");
79 | break;
80 | }
81 | try
82 | {
83 | using (var op = Operation.Begin($"Initialize new databases iteration {i}"))
84 | {
85 | PollForNewDBs(stoppingToken);
86 | op.Complete();
87 | }
88 | }
89 | catch (Exception ex)
90 | {
91 | Log.Error(ex, "Error running poll for new DBs");
92 | }
93 | }
94 | Log.Information("Poll for new DBs is shutdown");
95 | IsStopped = true;
96 | }
97 |
98 | ///
99 | /// Wait for the required time before starting the next iteration. Either a delay in milliseconds or a cron schedule can be used. Also waits until active hours if configured.
100 | ///
101 | private static async Task WaitForNextInitialization(long count, CancellationToken stoppingToken)
102 | {
103 | var nextIterationStart = DateTime.Now.AddMinutes(Config.PollForNewDatabasesFrequency);
104 | if (Config.UsePollForNewDatabasesCron)
105 | {
106 | var next = Config.PollForNewDatabasesCronExpression?.GetNextOccurrence(DateTimeOffset.Now, TimeZoneInfo.Local);
107 | if (next.HasValue) // null can be returned if the value is unreachable. e.g. 30th Feb. It's not expected, but log a warning and fall back to default delay if it happens.
108 | {
109 | nextIterationStart = next.Value.DateTime;
110 | }
111 | else
112 | {
113 | Log.Warning("No next occurrence found for PollForNewDatabasesCron. Using default delay. {Delay}mins", Config.PollForNewDatabasesFrequency);
114 | }
115 | }
116 |
117 | if (Config.UsePollForNewDatabasesCron ||
118 | count > 0) // Only apply delay on first iteration if using a cron schedule
119 | {
120 | Log.Information("Next new database initialization will start at {nextIterationStart}", nextIterationStart);
121 | await Waiter.WaitUntilTime(nextIterationStart, stoppingToken);
122 | }
123 | // If active hours are configured, wait until the next active period
124 | await Waiter.WaitUntilActiveHours(stoppingToken);
125 | }
126 |
127 | protected static void ProcessRestore(string sourceDb, string targetDb, List fullFiles, List diffFiles, BackupHeader.DeviceTypes deviceType)
128 | {
129 | var fullHeader = BackupHeader.GetHeaders(fullFiles, Config.Destination, deviceType);
130 |
131 | if (fullHeader.Count > 1)
132 | {
133 | Log.Error("Backup header returned multiple rows");
134 | return;
135 | }
136 | else if (fullHeader.Count == 0)
137 | {
138 | Log.Error("Error reading backup header. 0 rows returned.");
139 | return;
140 | }
141 | else if (!string.Equals(fullHeader[0].DatabaseName, sourceDb, StringComparison.CurrentCultureIgnoreCase))
142 | {
143 | Log.Error("Backup is for {sourceDb}. Expected {expectedDB}. {fullFiles}", fullHeader[0].DatabaseName, sourceDb, fullFiles);
144 | return;
145 | }
146 | else if (fullHeader[0].RecoveryModel == "SIMPLE" && !Config.InitializeSimple)
147 | {
148 | Log.Warning("Skipping initialization of {sourceDb} due to SIMPLE recovery model. InitializeSimple can be set to alter this behaviour for disaster recovery purposes.", sourceDb);
149 | return;
150 | }
151 | else if (fullHeader[0].BackupType is not (BackupHeader.BackupTypes.DatabaseFull or BackupHeader.BackupTypes.Partial))
152 | {
153 | Log.Error("Unexpected backup type {type}. {fullFiles}", fullHeader[0].BackupType, fullFiles);
154 | }
155 | if (fullHeader[0].BackupType == BackupHeader.BackupTypes.Partial)
156 | {
157 | Log.Warning("Warning. Initializing {targetDb} from a PARTIAL backup. Additional steps might be required to restore READONLY filegroups. Check sys.master_files to ensure no files are in RECOVERY_PENDING state.", targetDb);
158 | }
159 |
160 | var moves = DataHelper.GetFileMoves(fullFiles, deviceType, Config.Destination, Config.MoveDataFolder, Config.MoveLogFolder,
161 | Config.MoveFileStreamFolder, sourceDb, targetDb);
162 | var restoreScript = DataHelper.GetRestoreDbScript(fullFiles, targetDb, deviceType, true, moves);
163 | // Restore FULL
164 | DataHelper.ExecuteWithTiming(restoreScript, Config.Destination);
165 |
166 | if (diffFiles.Count <= 0) return;
167 |
168 | // Check header for DIFF
169 | var diffHeader =
170 | BackupHeader.GetHeaders(diffFiles, Config.Destination, deviceType);
171 |
172 | if (IsDiffApplicable(fullHeader, diffHeader))
173 | {
174 | // Restore DIFF is applicable
175 | restoreScript = DataHelper.GetRestoreDbScript(diffFiles, targetDb, deviceType, false);
176 | DataHelper.ExecuteWithTiming(restoreScript, Config.Destination);
177 | }
178 | }
179 |
180 | public static bool IsDiffApplicable(List fullHeaders, List diffHeaders)
181 | {
182 | if (fullHeaders.Count == 1 && diffHeaders.Count == 1)
183 | {
184 | return IsDiffApplicable(fullHeaders[0], diffHeaders[0]);
185 | }
186 | return false;
187 | }
188 |
189 | public static bool IsDiffApplicable(BackupHeader full, BackupHeader diff) => full.CheckpointLSN == diff.DifferentialBaseLSN && full.BackupSetGUID == diff.DifferentialBaseGUID && diff.BackupType is BackupHeader.BackupTypes.DatabaseDiff or BackupHeader.BackupTypes.PartialDiff;
190 |
191 | protected static bool ValidateHeader(BackupFile file, string db, ref Guid backupSetGuid, BackupTypes backupType)
192 | {
193 | if (file.Headers is { Count: 1 })
194 | {
195 | var header = file.FirstHeader;
196 | if (!string.Equals(header.DatabaseName, db, StringComparison.OrdinalIgnoreCase))
197 | {
198 | Log.Warning("Skipping {file}. Backup is for {HeaderDB}. Expected {ExpectedDB}", file.FilePath, header.DatabaseName, db);
199 | return false;
200 | }
201 |
202 | if (header.BackupType != backupType)
203 | {
204 | Log.Warning("Skipping {file} for {sourceDb}. Backup type is {BackupType}. Expected {ExpectedBackupType}", file.FilePath, db, header.BackupType, backupType);
205 | return false;
206 | }
207 | var thisGUID = header.BackupSetGUID;
208 | if (backupSetGuid == Guid.Empty)
209 | {
210 | backupSetGuid = thisGUID; // First file in backup set
211 | }
212 | else if (backupSetGuid != thisGUID)
213 | {
214 | return false; // Belongs to a different backup set
215 | }
216 | return true;
217 | }
218 | else
219 | {
220 | Log.Warning($"Backup file contains multiple backups and will be skipped. {file.FilePath}");
221 | return false;
222 | }
223 | }
224 |
225 | public static string GetDestinationDatabaseName(string sourceDB)
226 | {
227 | if (Config.SourceToDestinationMapping.TryGetValue(sourceDB.ToLower(), out var targetDB))
228 | {
229 | return targetDB;
230 | }
231 | return Config.RestoreDatabaseNamePrefix + sourceDB + Config.RestoreDatabaseNameSuffix;
232 | }
233 |
234 | public static string GetSourceDatabaseName(string destinationDB)
235 | {
236 | if (Config.DestinationToSourceMapping.TryGetValue(destinationDB.ToLower(), out var _sourceDB))
237 | {
238 | return _sourceDB;
239 | }
240 | var prefix = Config.RestoreDatabaseNamePrefix ?? string.Empty;
241 | var suffix = Config.RestoreDatabaseNameSuffix ?? string.Empty;
242 |
243 | // remove the prefix
244 | var sourceDB = destinationDB.StartsWith(prefix) ? destinationDB[prefix.Length..] : destinationDB;
245 |
246 | // remove the suffix
247 | sourceDB = sourceDB.EndsWith(suffix) ? sourceDB[..^suffix.Length] : sourceDB;
248 |
249 | return sourceDB;
250 | }
251 |
252 | public static string GetDatabaseIdentifier(string sourceDb, string targetDb) =>
253 | string.Equals(sourceDb, targetDb, StringComparison.OrdinalIgnoreCase) ? targetDb : $"{targetDb} [From: {sourceDb}]";
254 | }
255 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/DatabaseInitializerFromDiskOrUrl.cs:
--------------------------------------------------------------------------------
1 | using System.Text;
2 | using LogShippingService.FileHandling;
3 | using Serilog;
4 |
5 | namespace LogShippingService
6 | {
7 | public class DatabaseInitializerFromDiskOrUrl : DatabaseInitializerBase
8 | {
9 | public BackupHeader.DeviceTypes DeviceType => Config.DeviceType;
10 | private static Config Config => AppConfig.Config;
11 |
12 | public override bool IsValidated
13 | {
14 | get
15 | {
16 | if (string.IsNullOrEmpty(Config.FullFilePath)) return false;
17 | if (!Config.FullFilePath.Contains(Config.DatabaseToken)) return false;
18 | if (Config.UsePollForNewDatabasesCron)
19 | {
20 | Log.Information("New DBs initialized from {type} on cron schedule: {cron}", DeviceType, Config.PollForNewDatabasesCron);
21 | }
22 | else
23 | {
24 | Log.Information("New DBs initialized from {type} every {interval} mins.", DeviceType, Config.PollForNewDatabasesFrequency);
25 | }
26 | return true;
27 | }
28 | }
29 |
30 | protected override void PollForNewDBs(CancellationToken stoppingToken)
31 | {
32 | if (string.IsNullOrEmpty(Config.FullFilePath)) return;
33 |
34 | Parallel.ForEach(FileHandler.FileHandlerInstance.GetDatabases(),
35 | new ParallelOptions { MaxDegreeOfParallelism = Config.MaxThreads },
36 | (database, state) =>
37 | {
38 | if (stoppingToken.IsCancellationRequested)
39 | {
40 | state.Stop(); // Stop the loop if cancellation is requested
41 | }
42 |
43 | ProcessDB(database, stoppingToken);
44 | });
45 | }
46 |
47 | protected override void DoProcessDB(string sourceDb, string targetDb)
48 | {
49 | var fullFolder = Config.FullFilePath?.Replace(Config.DatabaseToken, sourceDb);
50 | var diffFolder = Config.DiffFilePath?.Replace(Config.DatabaseToken, sourceDb);
51 | var readOnlyFolder = Config.ReadOnlyFilePath?.Replace(Config.DatabaseToken, sourceDb);
52 | if (fullFolder == null) { return; }
53 |
54 | var isPartial = false;
55 | List fullFiles;
56 | List diffFiles = new();
57 | string? readOnlySQL = null;
58 | var dbIdentifier = GetDatabaseIdentifier(sourceDb,targetDb);
59 | try
60 | {
61 | fullFiles = GetFilesForLastBackup(fullFolder, sourceDb, BackupHeader.BackupTypes.DatabaseFull);
62 | if (fullFiles.Count == 0 && (!string.IsNullOrEmpty(readOnlyFolder) || Config.RecoverPartialBackupWithoutReadOnly))
63 | {
64 | Log.Information("No full backups found for {db}. Checking for partial backups.", dbIdentifier);
65 | fullFiles = GetFilesForLastBackup(fullFolder, sourceDb, BackupHeader.BackupTypes.Partial);
66 | if (fullFiles.Count == 0)
67 | {
68 | throw new Exception($"No backup files for {sourceDb} found in {fullFolder}");
69 | }
70 | else
71 | {
72 | isPartial = true;
73 | var files = fullFiles[0].BackupFileList;
74 | if (files.All(f => f.IsPresent)) // We did a partial backup without any readonly filegroups
75 | {
76 | Log.Warning("Partial backup was used for {db} but backup includes all files.", dbIdentifier);
77 | }
78 | else if (!string.IsNullOrEmpty(readOnlyFolder))
79 | {
80 | readOnlySQL = GetReadOnlyRestoreCommand(fullFiles, readOnlyFolder, sourceDb,targetDb, dbIdentifier, DeviceType);
81 | Log.Debug("Restore command for readonly: {ReadOnlySQL}", readOnlySQL);
82 | if (string.IsNullOrEmpty(readOnlySQL) & !Config.RecoverPartialBackupWithoutReadOnly)
83 | {
84 | throw new Exception($"Unable to find readonly backups for {sourceDb}. To recover databases anyway use 'RecoverPartialBackupWithoutReadOnly'");
85 | }
86 | else if (string.IsNullOrEmpty(readOnlySQL))
87 | {
88 | Log.Warning("Unable to find readonly backups for {db}. Restore of partial backup will proceed. Restore READONLY filegroups manually.", dbIdentifier);
89 | }
90 | }
91 | Log.Warning("Restoring {db} from PARTIAL backup.", dbIdentifier);
92 | }
93 | }
94 | else if (fullFiles.Count == 0)
95 | {
96 | throw new Exception($"No full backups found for {sourceDb}");
97 | }
98 | }
99 | catch (Exception ex)
100 | {
101 | Log.Error(ex, "Error getting files for last FULL backup for {db} in {fullFolder}", dbIdentifier, fullFolder);
102 | return;
103 | }
104 |
105 | try
106 | {
107 | if (!string.IsNullOrEmpty(diffFolder))
108 | {
109 | diffFiles = GetFilesForLastBackup(diffFolder, sourceDb, isPartial ? BackupHeader.BackupTypes.PartialDiff : BackupHeader.BackupTypes.DatabaseDiff);
110 | if (diffFiles.Count == 0)
111 | {
112 | Log.Warning("No DIFF backups files for {db} found in {diffFolder}", dbIdentifier, diffFolder);
113 | }
114 | }
115 | else
116 | {
117 | Log.Warning("Diff backup folder {folder} does not exist.", diffFolder);
118 | }
119 | }
120 | catch (Exception ex)
121 | {
122 | Log.Warning(ex, "Error getting files for last DIFF backup for {db} in {diffFolder}. Restore will continue with FULL backup", dbIdentifier, diffFolder);
123 | }
124 |
125 | ProcessRestore(sourceDb, targetDb, fullFiles.GetFileList(), diffFiles.GetFileList(), DeviceType);
126 | if (!string.IsNullOrEmpty(readOnlySQL))
127 | {
128 | Log.Information("Restoring READONLY backups for {db}", dbIdentifier);
129 | DataHelper.ExecuteWithTiming(readOnlySQL, Config.Destination);
130 | }
131 | }
132 |
133 | public static string GetReadOnlyRestoreCommand(List fullFiles, string path, string sourceDb,string targetDb,string dbIdentifier, BackupHeader.DeviceTypes deviceType)
134 | {
135 | List fileSets = new();
136 | // Get files that are don't exist in our partial backup
137 | var missing = fullFiles[0].BackupFileList.Where(f => !f.IsPresent).ToList();
138 |
139 | if (missing.Count == 0)
140 | {
141 | Log.Warning("All files are present in the backup. Nothing to restore");
142 | return string.Empty;
143 | }
144 | Log.Information("Looking for readonly backups for {db}. Missing files: {files}", dbIdentifier, missing.Select(f => f.LogicalName).ToList());
145 | var familyGUID = fullFiles[0].FirstHeader.FamilyGUID;
146 |
147 | ReadOnlyBackupSet? backupSet = null;
148 | foreach (var backupFile in FileHandler.FileHandlerInstance.GetFiles(path, "*.BAK", DateTime.MinValue, false))
149 | {
150 | if (backupSet != null && backupSet.BackupFiles[0].FirstHeader.BackupSetGUID == backupFile.FirstHeader.BackupSetGUID) // File is part of the same set as previous. Add to previous backupset and continue
151 | {
152 | backupSet.BackupFiles.Add(backupFile);
153 | continue;
154 | }
155 | else if (missing.Count == 0) // We can restore all out
156 | {
157 | break;
158 | }
159 | if (backupFile.FirstHeader.FamilyGUID == familyGUID) // Backup belongs to the DB we are restoring
160 | {
161 | backupSet = new ReadOnlyBackupSet
162 | {
163 | ToRestore = missing.Where(mf => backupFile.BackupFileList.Any(f =>
164 | mf.UniqueId == f.UniqueId & mf.ReadOnlyLSN == f.ReadOnlyLSN)).ToList()
165 | };
166 | backupSet.BackupFiles.Add(backupFile);
167 |
168 | if (backupSet.ToRestore.Any())
169 | {
170 | fileSets.Add(backupSet);
171 | missing = missing.Where(m => backupSet.ToRestore.All(f => f.UniqueId != m.UniqueId)).ToList();
172 | }
173 | }
174 | }
175 |
176 | if (missing.Count != 0)
177 | {
178 | Log.Error("File restore error for {db}. Missing backup for files {files}", sourceDb, dbIdentifier, missing.Select(f => f.LogicalName).ToList());
179 | return string.Empty;
180 | }
181 | StringBuilder builder = new();
182 | foreach (var fileSet in fileSets)
183 | {
184 | builder.AppendLine($"RESTORE DATABASE {targetDb.SqlQuote()}");
185 | builder.AppendLine(string.Join(",\n", fileSet.ToRestore.Select(f => "FILE = " + f.LogicalName.SqlSingleQuote())));
186 | builder.AppendLine(DataHelper.GetFromDisk(fileSet.BackupFiles.GetFileList(), deviceType));
187 | builder.AppendLine("WITH NORECOVERY");
188 | builder.AppendLine();
189 | }
190 | return builder.ToString();
191 | }
192 |
193 | public static List GetFilesForLastBackup(string folder, string db, BackupHeader.BackupTypes backupType)
194 | {
195 | List fileList = new();
196 | var directory = new DirectoryInfo(folder);
197 |
198 | var files = FileHandler.FileHandlerInstance.GetFiles(folder, "*.bak",
199 | DateTime.Now.AddDays(-Config.MaxBackupAgeForInitialization), false);
200 |
201 | BackupFile? previousFile = null;
202 | var backupSetGuid = Guid.Empty;
203 | foreach (var file in files.TakeWhile(file => previousFile == null || file.LastModifiedUtc >= previousFile.LastModifiedUtc.AddMinutes(-60))) // Backups that are part of the same set should have similar last write time
204 | {
205 | try
206 | {
207 | if (!ValidateHeader(file, db, ref backupSetGuid, backupType)) continue;
208 | fileList.Add(file);
209 | previousFile = file;
210 | }
211 | catch (Exception ex)
212 | {
213 | Log.Error(ex, "Error reading backup header for {file}", file.FilePath);
214 | continue;
215 | }
216 | }
217 | return fileList;
218 | }
219 | }
220 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/DatabaseInitializerFromMSDB.cs:
--------------------------------------------------------------------------------
1 | using Serilog;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Data;
5 | using System.Linq;
6 | using System.Numerics;
7 | using System.Runtime.CompilerServices;
8 | using System.Text;
9 | using System.Threading.Tasks;
10 | using Microsoft.Identity.Client;
11 | using SerilogTimings;
12 | using System.ServiceProcess;
13 | using Microsoft.Data.SqlClient;
14 | using Microsoft.Extensions.FileProviders.Physical;
15 | using static LogShippingService.BackupHeader;
16 |
17 | namespace LogShippingService
18 | {
19 | public class DatabaseInitializerFromMSDB : DatabaseInitializerBase
20 | {
21 | private static Config Config => AppConfig.Config;
22 |
23 | public override bool IsValidated
24 | {
25 | get
26 | {
27 | if (string.IsNullOrEmpty(Config.SourceConnectionString))
28 | {
29 | return false;
30 | }
31 | if (Config.UsePollForNewDatabasesCron)
32 | {
33 | Log.Information("New DBs initialized from msdb history on cron schedule: {cron}", Config.PollForNewDatabasesCron);
34 | }
35 | else
36 | {
37 | Log.Information("New DBs initialized from msdb history every {interval} mins.", Config.PollForNewDatabasesFrequency);
38 | }
39 | return true;
40 | }
41 | }
42 |
43 | ///
44 | /// Check for new DBs in the source connection that don't exist in the destination.
45 | ///
46 | protected override void PollForNewDBs(CancellationToken stoppingToken)
47 | {
48 | List newDBs;
49 | using (Operation.Time("Polling for new databases using msdb history"))
50 | {
51 | newDBs = GetNewDatabases();
52 | }
53 |
54 | Log.Information("NewDBs:{Count}", newDBs.Count);
55 | Parallel.ForEach(newDBs.AsEnumerable(),
56 | new ParallelOptions() { MaxDegreeOfParallelism = Config.MaxThreads },
57 | newDb =>
58 | {
59 | ProcessDB(newDb.Name, stoppingToken);
60 | });
61 | }
62 |
63 | ///
64 | /// Get the last FULL/DIFF backup for the database from msdb history & restore
65 | ///
66 | /// Database name
67 | protected override void DoProcessDB(string sourceDb, string targetDb)
68 | {
69 | if (Config.SourceConnectionString == null) return;
70 | var dbIdentifier = GetDatabaseIdentifier(sourceDb, targetDb);
71 | Log.Information("Initializing new database: {db}", dbIdentifier);
72 | var lastFull = new LastBackup(sourceDb, Config.SourceConnectionString, BackupHeader.BackupTypes.DatabaseFull);
73 | var lastDiff = new LastBackup(sourceDb, Config.SourceConnectionString, BackupHeader.BackupTypes.DatabaseDiff);
74 | ReplacePaths(ref lastFull); // Replace paths if necessary. e.g. Convert local path to UNC path
75 | ReplacePaths(ref lastDiff); // Replace paths if necessary. e.g. Convert local path to UNC path
76 | if (lastFull.FileList.Count == 0)
77 | {
78 | Log.Error("No backups available to initialize {db}", dbIdentifier);
79 | return;
80 | }
81 |
82 | Log.Debug("Last full for {db}: {lastFull}", dbIdentifier, lastFull.BackupFinishDate);
83 | Log.Debug("Last diff for {db}: {lastDiff}", dbIdentifier, lastDiff.BackupFinishDate);
84 |
85 | var fullHeader = lastFull.GetHeader(Config.Destination);
86 |
87 | lastFull.Restore(targetDb);
88 |
89 | // Check if diff backup should be applied
90 | if (lastDiff.BackupFinishDate <= lastFull.BackupFinishDate) return;
91 |
92 | var diffHeader = lastDiff.GetHeader(Config.Destination);
93 | if (IsDiffApplicable(fullHeader, diffHeader))
94 | {
95 | lastDiff.Restore(targetDb);
96 | }
97 | }
98 |
99 | ///
100 | /// Replace paths if path replacement is configured. e.g. Convert local path to UNC path
101 | ///
102 | ///
103 | private static void ReplacePaths(ref LastBackup backup)
104 | {
105 | if (Config.MSDBPathFind == null || Config.MSDBPathReplace == null) return;
106 | for (var i = 0; i < backup.FileList.Count; i++)
107 | {
108 | backup.FileList[i] = backup.FileList[i].Replace(Config.MSDBPathFind, Config.MSDBPathReplace);
109 | }
110 | }
111 |
112 | ///
113 | /// Get a list of databases that exist in the source connection that don't exist in the destination. Only include ONLINE databases with FULL or BULK LOGGED recovery model
114 | ///
115 | ///
116 | private List GetNewDatabases()
117 | {
118 | if (Config.SourceConnectionString == null) return new List();
119 | if (DestinationDBs == null) return new List();
120 |
121 | var sourceDBs = DatabaseInfo.GetDatabaseInfo(Config.SourceConnectionString);
122 |
123 | sourceDBs = sourceDBs.Where(db => (db.RecoveryModel is 1 or 2 || Config.InitializeSimple) && db.State == 0).ToList();
124 |
125 | var newDBs = sourceDBs.Where(db =>
126 | !DestinationDBs.Any(destDb => destDb.Name.Equals(db.Name, StringComparison.OrdinalIgnoreCase))).ToList();
127 |
128 | return newDBs;
129 | }
130 | }
131 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/EmbededResourceReader.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Reflection;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 |
8 | namespace LogShippingService
9 | {
10 |
11 | public class EmbeddedResourceReader
12 | {
13 | public static string? ReadResourceText(string resourceFileName)
14 | {
15 | // Get the current assembly that contains the embedded resource
16 | var assembly = Assembly.GetExecutingAssembly();
17 | // Create the resource path
18 | var resourcePath = assembly.GetName().Name + "." + resourceFileName.Replace(" ", "_").Replace("\\", ".").Replace("/", ".");
19 |
20 | // Use a stream to read the embedded resource
21 | using var stream = assembly.GetManifestResourceStream(resourcePath);
22 | if (stream == null) return null; // Resource not found
23 |
24 | using var reader = new StreamReader(stream);
25 | return reader.ReadToEnd(); // Read the content as string
26 | }
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/EncryptionHelper.cs:
--------------------------------------------------------------------------------
1 | using System.Security.Cryptography;
2 | using System.Text;
3 |
4 | namespace LogShippingService
5 | {
6 | internal class EncryptionHelper
7 | {
8 | private static readonly string EncryptionPrefix = "encrypted:";
9 |
10 | public static string EncryptWithMachineKey(string value)
11 | {
12 | byte[] valueBytes = Encoding.UTF8.GetBytes(value);
13 | byte[] encryptedBytes = ProtectedData.Protect(valueBytes, null, DataProtectionScope.LocalMachine);
14 | return EncryptionPrefix + Convert.ToBase64String(encryptedBytes);
15 | }
16 |
17 | public static string? DecryptWithMachineKey(string? encryptedValue)
18 | {
19 | if (encryptedValue == null) return encryptedValue;
20 | encryptedValue = encryptedValue.RemovePrefix(EncryptionPrefix);
21 | byte[] encryptedBytes = Convert.FromBase64String(encryptedValue);
22 | byte[] decryptedBytes = ProtectedData.Unprotect(encryptedBytes, null, DataProtectionScope.LocalMachine);
23 | return Encoding.UTF8.GetString(decryptedBytes);
24 | }
25 |
26 | public static bool IsEncrypted(string? value)
27 | {
28 | return value != null && value.StartsWith(EncryptionPrefix);
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/ExtensionMethods.cs:
--------------------------------------------------------------------------------
1 | namespace LogShippingService
2 | {
3 | public static class ExtensionMethods
4 | {
5 | public static string SqlSingleQuote(this string str)
6 | {
7 | return "'" + str.Replace("'", "''") + "'";
8 | }
9 |
10 | public static string SqlQuote(this string str)
11 | {
12 | return "[" + str.Replace("]", "]]") + "]";
13 | }
14 |
15 | public static string RemovePrefix(this string str, string prefix)
16 | {
17 | return str.StartsWith(prefix) ? str[prefix.Length..] : str;
18 | }
19 |
20 | ///
21 | /// Returns the char associated with the backup type (consistent with msdb.dbo.backupset)
22 | ///
23 | ///
24 | /// Char associated with the backup type in msdb.dbo.backupset
25 | ///
26 | public static char ToBackupTypeChar(this BackupHeader.BackupTypes backupType)
27 | {
28 | return backupType switch
29 | {
30 | BackupHeader.BackupTypes.DatabaseFull => 'D',
31 | BackupHeader.BackupTypes.DatabaseDiff => 'I',
32 | BackupHeader.BackupTypes.File => 'F',
33 | BackupHeader.BackupTypes.FileDiff => 'G',
34 | BackupHeader.BackupTypes.Partial => 'P',
35 | BackupHeader.BackupTypes.PartialDiff => 'Q',
36 | _ => throw new ArgumentOutOfRangeException(nameof(backupType), backupType, null)
37 | };
38 | }
39 |
40 | public static List GetFileList(this List backupFiles)
41 | {
42 | return backupFiles.Select(f => f.FilePath).ToList();
43 | }
44 |
45 | public static string RemoveInvalidFileNameChars(this string filename)
46 | {
47 | return string.Concat(filename.Split(Path.GetInvalidFileNameChars()));
48 | }
49 | }
50 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/FileHandling/AzureBlobFileHandler.cs:
--------------------------------------------------------------------------------
1 | using Azure.Storage.Blobs;
2 | using Azure.Storage.Blobs.Models;
3 | using System;
4 | using System.Collections.Generic;
5 | using System.Linq;
6 | using System.Text;
7 | using System.Threading.Tasks;
8 | using Serilog;
9 | using System.Collections.Concurrent;
10 |
11 | namespace LogShippingService.FileHandling
12 | {
13 | internal class AzureBlobFileHandler : FileHandlerBase
14 | {
15 | internal static readonly char[] separator = { '/' };
16 |
17 | public static List GetFoldersForAzBlob(List prefixes)
18 | {
19 | var containerUri = new Uri(Config.ContainerUrl);
20 | var containerClient = new BlobContainerClient(new Uri($"{containerUri}{Config.SASToken}"));
21 |
22 | // Use a thread-safe collection to store folders from multiple threads
23 | var folders = new ConcurrentBag();
24 |
25 | // Parallelize the processing of each prefix
26 | Parallel.ForEach(prefixes, prefix =>
27 | {
28 | var results = containerClient.GetBlobsByHierarchy(prefix: prefix, delimiter: "/").AsPages();
29 |
30 | results.SelectMany(blobPage => blobPage.Values)
31 | .Where(item => item.IsPrefix)
32 | .Select(item => item.Prefix.TrimEnd(separator).Split(separator).LastOrDefault())
33 | .Where(folderName => !string.IsNullOrEmpty(folderName))
34 | .Distinct()
35 | .ToList()
36 | .ForEach(folderName => folders.Add(folderName!));
37 | });
38 |
39 | return folders.Distinct().ToList();
40 | }
41 |
42 | public IEnumerable GetFiles(List paths, string pattern, DateTime maxAge, bool ascending)
43 | {
44 | var containerUri = new Uri(Config.ContainerUrl + Config.SASToken);
45 | var containerClient = new BlobContainerClient(containerUri);
46 |
47 | // Temporarily store the filtered blobs from each path
48 | var allFilteredBlobs = new ConcurrentBag();
49 |
50 | Parallel.ForEach(paths, path =>
51 | {
52 | var blobItems = containerClient.GetBlobs(BlobTraits.Metadata, BlobStates.None, path)
53 | .Where(blobItem => IsFileNameMatchingPattern(blobItem.Name, pattern) &&
54 | blobItem.Properties.LastModified.GetValueOrDefault(DateTimeOffset.MinValue).UtcDateTime>= maxAge);
55 |
56 | foreach (var blobItem in blobItems)
57 | {
58 | allFilteredBlobs.Add(blobItem);
59 | }
60 | });
61 |
62 | // Sort the blobs based on the ascending flag
63 | var sortedBlobs = ascending
64 | ? allFilteredBlobs.OrderBy(blobItem => blobItem.Properties.LastModified.GetValueOrDefault(DateTimeOffset.MinValue).UtcDateTime)
65 | : allFilteredBlobs.OrderByDescending(blobItem => blobItem.Properties.LastModified.GetValueOrDefault(DateTimeOffset.MinValue).UtcDateTime);
66 |
67 | // Yield return each BackupFile
68 | foreach (var blobItem in sortedBlobs)
69 | {
70 | yield return new BackupFile(
71 | $"{Config.ContainerUrl}/{blobItem.Name}",
72 | BackupHeader.DeviceTypes.Url,
73 | blobItem.Properties.LastModified!.Value.UtcDateTime);
74 | }
75 | }
76 |
77 | public override IEnumerable GetFiles(string path, string pattern, DateTime maxAge, bool ascending)
78 | {
79 | var paths = path.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries).ToList();
80 | return GetFiles(paths, pattern, maxAge, ascending);
81 | }
82 |
83 | protected override IEnumerable GetDatabasesSpecific()
84 | {
85 | if (Config.FullFilePath == null) { return new List(); }
86 | var paths = Config.FullFilePath.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries);
87 | var dbRoots = paths.Select(path =>
88 | {
89 | var tokenIndex = path.IndexOf(Config.DatabaseToken, StringComparison.OrdinalIgnoreCase);
90 | return tokenIndex != -1 ? path[..tokenIndex] : path;
91 | }).ToList();
92 |
93 | Log.Information("Polling for new databases from Azure Blob. Folders in path(s): {path}", dbRoots);
94 | return GetFoldersForAzBlob(dbRoots);
95 | }
96 | }
97 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/FileHandling/DiskFileHandler.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 | using Serilog;
7 |
8 | namespace LogShippingService.FileHandling
9 | {
10 | internal class DiskFileHandler : FileHandlerBase
11 | {
12 | public override IEnumerable GetFiles(string pathsCSV, string pattern, DateTime maxAge, bool ascending)
13 | {
14 | var paths = pathsCSV.Split(',')
15 | .Select(path => path.Trim())
16 | .Where(path => !string.IsNullOrEmpty(path))
17 | .ToList();
18 |
19 | return GetFiles(paths, pattern, maxAge, ascending);
20 | }
21 |
22 | public IEnumerable GetFiles(List paths, string pattern, DateTime maxAge, bool ascending)
23 | {
24 | if (!paths.Any(Directory.Exists))
25 | {
26 | throw new DirectoryNotFoundException($"GetFilesFromDisk: None of the provided folders exist. {string.Join(",",paths)}");
27 | }
28 |
29 | // Use EnumerateFiles for better performance with large directories
30 | var allFiles = paths.AsParallel()
31 | .Where(Directory.Exists)
32 | .SelectMany(path => new DirectoryInfo(path).EnumerateFiles(pattern))
33 | .Where(f => f.LastWriteTimeUtc >= maxAge);
34 |
35 | // Apply sorting only after filtering
36 | var sortedFiles = ascending ? allFiles.OrderBy(f => f.LastWriteTimeUtc) : allFiles.OrderByDescending(f => f.LastWriteTimeUtc);
37 |
38 | // Delay materialization and map to BackupFile objects
39 | foreach (var file in sortedFiles)
40 | {
41 | yield return new BackupFile(file.FullName, BackupHeader.DeviceTypes.Disk, file.LastWriteTimeUtc);
42 | }
43 | }
44 |
45 |
46 |
47 | protected override IEnumerable GetDatabasesSpecific()
48 | {
49 | if (string.IsNullOrEmpty(Config.FullFilePath)) return Enumerable.Empty();
50 |
51 | // Config.FullFilePath might be comma-separated list of paths, split and process each
52 | var databases = Config.FullFilePath.Split(',')
53 | .Select(path => path.Trim())
54 | .Where(path => !string.IsNullOrEmpty(path))
55 | .SelectMany(path =>
56 | {
57 | // Find where the database token is in the path, or return an empty sequence if not found
58 | var dbRootIndex = path.IndexOf(Config.DatabaseToken, StringComparison.OrdinalIgnoreCase);
59 | if (dbRootIndex == -1) return Enumerable.Empty();
60 | // Get the root path up to the database token. Folders within this path gives us the database names
61 | var dbRoot = path[..dbRootIndex];
62 | Log.Information("Polling for new databases from disk. Folders in path: {path}", dbRoot);
63 |
64 | // Get names of subdirectories in the root path (database names)
65 | try
66 | {
67 | return Directory.EnumerateDirectories(dbRoot).Select(dir => Path.GetFileName(dir)!);
68 | }
69 | catch (Exception ex)
70 | {
71 | Log.Error(ex, "Failed to enumerate directories in path: {path}", dbRoot);
72 | return Enumerable.Empty();
73 | }
74 | })
75 | .Distinct(); // Ensure unique database names
76 |
77 | return databases;
78 | }
79 |
80 | }
81 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/FileHandling/FileHandler.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace LogShippingService.FileHandling
8 | {
9 | internal class FileHandler
10 | {
11 | private static FileHandlerBase? _fileHandler;
12 |
13 | public static FileHandlerBase FileHandlerInstance => _fileHandler ??= GetFileHandler();
14 |
15 | public static FileHandlerBase GetFileHandler()
16 | {
17 | return AppConfig.Config.FileHandlerType switch
18 | {
19 | Config.FileHandlerTypes.Disk => new DiskFileHandler(),
20 | Config.FileHandlerTypes.AzureBlob => new AzureBlobFileHandler(),
21 | Config.FileHandlerTypes.S3 => new S3FileHandler(),
22 | _ => throw new NotImplementedException($"FileHandlerType '{AppConfig.Config.FileHandlerType}' not implemented.")
23 | };
24 | }
25 | }
26 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/FileHandling/FileHandlerBase.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Configuration;
4 | using System.Linq;
5 | using System.Text;
6 | using System.Text.RegularExpressions;
7 | using System.Threading.Tasks;
8 | using Serilog;
9 |
10 | namespace LogShippingService.FileHandling
11 | {
12 | public abstract class FileHandlerBase
13 | {
14 | public static Config.FileHandlerTypes FileHandlerType => AppConfig.Config.FileHandlerType;
15 |
16 | public static Config Config => AppConfig.Config;
17 |
18 | public abstract IEnumerable GetFiles(string path, string pattern, DateTime maxAge, bool ascending);
19 |
20 | public virtual IEnumerable GetDatabases()
21 | {
22 | if (string.IsNullOrEmpty(Config.FullFilePath)) return new List();
23 | if (Config.IncludedDatabases.Count > 0)
24 | {
25 | Log.Information("Polling for new databases. Using IncludedDatabases list. {Included}", Config.IncludedDatabases);
26 | return Config.IncludedDatabases;
27 | }
28 |
29 | // Let derived classes handle specific behavior
30 | return GetDatabasesSpecific();
31 | }
32 |
33 | protected abstract IEnumerable GetDatabasesSpecific();
34 |
35 | // Common methods or properties can be defined here
36 | public static bool IsFileNameMatchingPattern(string fileName, string searchPattern)
37 | {
38 | var pattern = "^" + Regex.Escape(searchPattern)
39 | .Replace(@"\*", ".*")
40 | .Replace(@"\?", ".") + "$";
41 |
42 | return Regex.IsMatch(fileName, pattern, RegexOptions.IgnoreCase);
43 | }
44 | }
45 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/FileHandling/S3FileHandler.cs:
--------------------------------------------------------------------------------
1 | using Amazon.Runtime;
2 | using Amazon.S3.Model;
3 | using Amazon.S3;
4 | using Amazon;
5 | using System.Web;
6 | using Serilog;
7 |
8 | namespace LogShippingService.FileHandling
9 | {
10 | internal class S3FileHandler : FileHandlerBase
11 | {
12 | public override IEnumerable GetFiles(string path, string pattern, DateTime maxAge, bool ascending)
13 | {
14 | var paths = path.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries).ToList();
15 | return GetFilesFromUrlsS3(paths, pattern, maxAge, ascending).Result;
16 | }
17 |
18 | protected override IEnumerable GetDatabasesSpecific()
19 | {
20 | if (Config.FullFilePath == null) { return new List(); }
21 | // Split the full file path on comma to get individual S3 paths
22 | var s3Paths = Config.FullFilePath.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries);
23 |
24 | // Process each S3 path to generate dbRoot values
25 | var dbRootList = s3Paths.Select(s3Path =>
26 | {
27 | var s3Uri = new S3Uri(s3Path.Trim());
28 | var key = s3Uri.Key[..s3Uri.Key.IndexOf(HttpUtility.UrlEncode(Config.DatabaseToken), StringComparison.OrdinalIgnoreCase)];
29 | return $"s3://{s3Uri.Uri.Host}/{key}";
30 | }).ToList();
31 |
32 | Log.Information("Polling for new databases from S3. Prefix: {prefix}", dbRootList);
33 | return ListFoldersFromS3Paths(dbRootList).Result;
34 | }
35 |
36 | public static async Task> GetFilesFromUrlsS3(List paths, string pattern, DateTime MaxAge, bool ascending)
37 | {
38 | // Use Task.WhenAll to await all the tasks initiated for each path
39 | var tasks = paths.Select(path => GetFilesFromUrlS3(path, pattern, MaxAge)).ToList();
40 | var filesFromAllPaths = await Task.WhenAll(tasks);
41 |
42 | // Flatten the results and order them
43 | var allFiles = filesFromAllPaths.SelectMany(files => files);
44 | return ascending ? allFiles.OrderBy(file => file.LastModifiedUtc) : allFiles.OrderByDescending(file => file.LastModifiedUtc);
45 | }
46 |
47 | private static async Task> GetFilesFromUrlS3(string path, string pattern, DateTime MaxAge)
48 | {
49 | var s3Uri = new S3Uri(path);
50 | var request = new ListObjectsV2Request
51 | {
52 | BucketName = s3Uri.Bucket,
53 | Prefix = s3Uri.Key
54 | };
55 |
56 | var s3Client = GetS3Client(s3Uri.Region);
57 | var files = new List();
58 | ListObjectsV2Response response;
59 |
60 | do
61 | {
62 | response = await s3Client.ListObjectsV2Async(request);
63 | if (response.S3Objects is { Count: > 0 })
64 | {
65 | var matchingFiles = response.S3Objects
66 | .Where(s3Object =>
67 | IsFileNameMatchingPattern(s3Object.Key, pattern) &&
68 | s3Object.LastModified?.ToUniversalTime() >= MaxAge)
69 | .Select(s3Object => new BackupFile($"s3://{s3Uri.Uri.Host}/{s3Object.Key}",
70 | BackupHeader.DeviceTypes.Url,
71 | s3Object.LastModified?.ToUniversalTime() ?? DateTime.MinValue));
72 |
73 | files.AddRange(matchingFiles);
74 | }
75 |
76 | request.ContinuationToken = response.NextContinuationToken;
77 | } while (response.IsTruncated==true);
78 |
79 | return files;
80 | }
81 |
82 | private static AmazonS3Client GetS3Client(RegionEndpoint region)
83 | {
84 | AWSCredentials cred;
85 | if (Config.AccessKey == null || Config.SecretKey == null)
86 | {
87 | cred = new InstanceProfileAWSCredentials();
88 | }
89 | else
90 | {
91 | cred = new BasicAWSCredentials(Config.AccessKey, Config.SecretKey);
92 | }
93 |
94 | var config = new AmazonS3Config
95 | {
96 | RegionEndpoint = region
97 | };
98 |
99 | return new AmazonS3Client(cred, config);
100 | }
101 |
102 | public static async Task> ListFoldersFromS3Paths(List paths)
103 | {
104 | // Initiate folder listing tasks for each path in parallel
105 | var tasks = paths.Select(ListFoldersFromS3SinglePath).ToList();
106 | var foldersFromAllPaths = await Task.WhenAll(tasks);
107 |
108 | // Flatten results and remove duplicates
109 | var allFolders = foldersFromAllPaths.SelectMany(folders => folders).Distinct().ToList();
110 |
111 | return allFolders;
112 | }
113 |
114 | public static async Task> ListFoldersFromS3SinglePath(string path)
115 | {
116 | var s3Uri = new S3Uri(path);
117 | var s3Client = GetS3Client(s3Uri.Region);
118 | var request = new ListObjectsV2Request
119 | {
120 | BucketName = s3Uri.Bucket,
121 | Prefix = s3Uri.Key,
122 | Delimiter = "/" // Using slash as delimiter to simulate folders
123 | };
124 |
125 | var folders = new List();
126 | ListObjectsV2Response response;
127 |
128 | do
129 | {
130 | response = await s3Client.ListObjectsV2Async(request);
131 | // Add all common prefixes (folders) to the list
132 | if (response.CommonPrefixes != null)
133 | {
134 | folders.AddRange(response.CommonPrefixes.Select(f => Path.GetFileName(f.TrimEnd('/'))));
135 | }
136 | request.ContinuationToken = response.NextContinuationToken;
137 | } while (response.IsTruncated == true);
138 |
139 | return folders;
140 | }
141 | }
142 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/HeaderVerificationException.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace LogShippingService
8 | {
9 | public class HeaderVerificationException : Exception
10 | {
11 |
12 | public BackupHeader.HeaderVerificationStatus VerificationStatus { get; }
13 |
14 | public HeaderVerificationException(string message, BackupHeader.HeaderVerificationStatus status)
15 | : base(message)
16 | {
17 | VerificationStatus = status;
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/LastBackup.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.Data.SqlClient;
2 | using System.Data;
3 |
4 | namespace LogShippingService
5 | {
6 | ///
7 | /// Get info for the last backup for a specified database from msdb
8 | ///
9 | public class LastBackup
10 | {
11 | public bool HasBackup => FileList.Count > 0;
12 | public List FileList = new();
13 | public BackupHeader.DeviceTypes DeviceType = BackupHeader.DeviceTypes.Unknown;
14 | public string DatabaseName;
15 | public DateTime BackupFinishDate = DateTime.MinValue;
16 | public BackupHeader.BackupTypes BackupType { get; private set; }
17 |
18 | private static Config Config => AppConfig.Config;
19 |
20 | public LastBackup(string databaseName, string connectionString, BackupHeader.BackupTypes type)
21 | {
22 | BackupType = type;
23 | DatabaseName = databaseName;
24 | var lastBackup = GetFilesForLastBackup(databaseName, type.ToBackupTypeChar(), connectionString);
25 | foreach (DataRow row in lastBackup.Rows)
26 | {
27 | if (row["device_type"] is not DBNull && DeviceType == BackupHeader.DeviceTypes.Unknown)
28 | {
29 | var deviceTypeInt = Convert.ToInt32(row["device_type"]);
30 | BackupFinishDate = (DateTime)row["backup_finish_date"];
31 | if (Enum.IsDefined(typeof(BackupHeader.DeviceTypes), deviceTypeInt))
32 | {
33 | DeviceType = (BackupHeader.DeviceTypes)deviceTypeInt;
34 | }
35 | }
36 | FileList.Add((string)row["physical_device_name"]);
37 | }
38 | }
39 |
40 | public void Restore(string? targetDb = null)
41 | {
42 | targetDb ??= DatabaseName;
43 | Dictionary? moves = null;
44 | if (BackupType == BackupHeader.BackupTypes.DatabaseFull)
45 | {
46 | moves = DataHelper.GetFileMoves(FileList, DeviceType, Config.Destination,
47 | Config.MoveDataFolder,
48 | Config.MoveLogFolder, Config.MoveFileStreamFolder, DatabaseName, targetDb);
49 | }
50 |
51 | var sql = GetRestoreDbScript(moves, targetDb);
52 | DataHelper.ExecuteWithTiming(sql, Config.Destination);
53 | }
54 |
55 | ///
56 | /// Returns the backup header. If backup file has multiple backups and exception will be thrown. Use GetHeaders to return all the header info to support multiple backups in same file
57 | ///
58 | /// Database connection string
59 | ///
60 | ///
61 | public BackupHeader GetHeader(string connectionString)
62 | {
63 | var headers = GetHeaders(connectionString);
64 | return headers.Count switch
65 | {
66 | 1 => headers[0],
67 | > 1 => throw new Exception(
68 | "RESTORE HEADERONLY returned multiple rows. Multiple backups have been written to the same file."),
69 | _ => throw new Exception("RESTORE HEADERONLY returned no rows")
70 | };
71 | }
72 |
73 | ///
74 | /// Return the backup headers associated with the backup
75 | ///
76 | /// Database connection string
77 | ///
78 | public List GetHeaders(string connectionString)
79 | {
80 | return BackupHeader.GetHeaders(FileList, connectionString, DeviceType);
81 | }
82 |
83 | public string GetHeaderOnlyScript() => DataHelper.GetHeaderOnlyScript(FileList, DeviceType);
84 |
85 | public string GetFileListOnlyScript() => DataHelper.GetFileListOnlyScript(FileList, DeviceType);
86 |
87 | public string GetRestoreDbScript(Dictionary? moves, string targetDb) => DataHelper.GetRestoreDbScript(FileList, targetDb, DeviceType, BackupType == BackupHeader.BackupTypes.DatabaseFull, moves);
88 |
89 | internal static DataTable GetFilesForLastBackup(string db, char backupType, string connectionString)
90 | {
91 | using var cn = new SqlConnection(connectionString);
92 | using var cmd = new SqlCommand(SqlStrings.GetFilesForLastBackup, cn) { CommandTimeout = 0 };
93 | using var da = new SqlDataAdapter(cmd);
94 | cmd.Parameters.AddWithValue("@db", db);
95 | cmd.Parameters.AddWithValue("@backup_type", backupType);
96 | cmd.Parameters.AddWithValue("@MaxBackupAgeForInitialization", Config.MaxBackupAgeForInitialization);
97 | var dt = new DataTable();
98 | da.Fill(dt);
99 | return dt;
100 | }
101 |
102 | internal static DataTable GetFilesForLastFullBackup(string db, string connectionString) =>
103 | GetFilesForLastBackup(db, 'D', connectionString);
104 |
105 | internal static DataTable GetFilesForLastDiffBackup(string db, string connectionString) =>
106 | GetFilesForLastBackup(db, 'I', connectionString);
107 | }
108 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/LogShipping.cs:
--------------------------------------------------------------------------------
1 | using LogShippingService.FileHandling;
2 | using Microsoft.Data.SqlClient;
3 | using Microsoft.Extensions.Hosting;
4 | using Serilog;
5 | using SerilogTimings;
6 | using System.Collections.Concurrent;
7 | using System.Data;
8 | using System.Numerics;
9 |
10 | namespace LogShippingService
11 | {
12 | internal class LogShipping : BackgroundService
13 | {
14 | public static ConcurrentDictionary InitializingDBs = new();
15 |
16 | private readonly DatabaseInitializerBase? _initializer;
17 |
18 | private static readonly object locker = new();
19 |
20 | private static Config Config => AppConfig.Config;
21 |
22 | public LogShipping()
23 | {
24 | if (string.IsNullOrEmpty(Config.LogFilePath))
25 | {
26 | var message = "LogFilePath was not specified";
27 | Log.Error(message); throw new Exception(message);
28 | }
29 | if (!string.IsNullOrEmpty(Config.SourceConnectionString))
30 | {
31 | if (Config.UsePollForNewDatabasesCron)
32 | {
33 | Log.Information("New DBs initialized from msdb history on cron schedule: {cron}", Config.PollForNewDatabasesCron);
34 | }
35 | else
36 | {
37 | Log.Information("New DBs initialized from msdb history every {interval} mins.", Config.PollForNewDatabasesFrequency);
38 | }
39 | _initializer = new DatabaseInitializerFromMSDB();
40 | }
41 | else
42 | {
43 | _initializer = new DatabaseInitializerFromDiskOrUrl();
44 | }
45 | }
46 |
47 | protected override async Task ExecuteAsync(CancellationToken stoppingToken)
48 | {
49 | stoppingToken.Register(Stop);
50 | var logRestoreTask = StartProcessing(stoppingToken);
51 |
52 | try
53 | {
54 | if (_initializer != null)
55 | {
56 | await Task.WhenAll(logRestoreTask, _initializer.RunPollForNewDBs(stoppingToken));
57 | }
58 | else
59 | {
60 | await logRestoreTask;
61 | }
62 | }
63 | catch (TaskCanceledException)
64 | {
65 | Log.Information("Processing stopped due to cancellation request");
66 | await Log.CloseAndFlushAsync();
67 | }
68 | catch (Exception ex)
69 | {
70 | Log.Error(ex, "Processing stopped due to unexpected error");
71 | await Log.CloseAndFlushAsync();
72 | Environment.Exit(1);
73 | }
74 | }
75 |
76 | private async Task StartProcessing(CancellationToken stoppingToken)
77 | {
78 | long i = 0;
79 | while (!stoppingToken.IsCancellationRequested)
80 | {
81 | await WaitForNextIteration(i, stoppingToken);
82 | i++;
83 | using (Operation.Time($"Log restore iteration {i}"))
84 | {
85 | Log.Information("Starting log restore iteration {0}", i);
86 | try
87 | {
88 | await Process(stoppingToken);
89 | }
90 | catch (Exception ex)
91 | {
92 | Log.Error(ex, "Unexpected error processing log restores");
93 | }
94 | }
95 | }
96 | Log.Information("Finished processing LOG restores");
97 | }
98 |
99 | ///
100 | /// Wait for the required time before starting the next iteration. Either a delay in milliseconds or a cron schedule can be used. Also waits until active hours if configured.
101 | ///
102 | private static async Task WaitForNextIteration(long count, CancellationToken stoppingToken)
103 | {
104 | var nextIterationStart = DateTime.Now.AddMilliseconds(Config.DelayBetweenIterationsMs);
105 | if (Config.UseLogRestoreScheduleCron)
106 | {
107 | var next = Config.LogRestoreCron?.GetNextOccurrence(DateTimeOffset.Now, TimeZoneInfo.Local);
108 | if (next.HasValue) // null can be returned if the value is unreachable. e.g. 30th Feb. It's not expected, but log a warning and fall back to default delay if it happens.
109 | {
110 | nextIterationStart = next.Value.DateTime;
111 | }
112 | else
113 | {
114 | Log.Warning("No next occurrence found for LogRestoreScheduleCron. Using default delay.");
115 | }
116 | }
117 |
118 | if (Config.UseLogRestoreScheduleCron ||
119 | count > 0) // Only apply delay on first iteration if using a cron schedule
120 | {
121 | Log.Information("Next log restore iteration will start at {nextIterationStart}", nextIterationStart);
122 | await Waiter.WaitUntilTime(nextIterationStart, stoppingToken);
123 | }
124 | // If active hours are configured, wait until the next active period
125 | await Waiter.WaitUntilActiveHours(stoppingToken);
126 | }
127 |
128 | public void Stop()
129 | {
130 | Log.Information("Initiating shutdown...");
131 | }
132 |
133 | private Task Process(CancellationToken stoppingToken)
134 | {
135 | DataTable dt;
136 | using (Operation.Time("GetDatabases"))
137 | {
138 | try
139 | {
140 | dt = GetDatabases();
141 | }
142 | catch (Exception ex)
143 | {
144 | Log.Error(ex, "Error getting databases");
145 | return Task.CompletedTask;
146 | }
147 | }
148 |
149 | Parallel.ForEach(dt.AsEnumerable(), new ParallelOptions() { MaxDegreeOfParallelism = Config.MaxThreads }, row =>
150 | {
151 | if (stoppingToken.IsCancellationRequested || !Waiter.CanRestoreLogsNow) return;
152 | var targetDb = (string)row["Name"];
153 | var sourceDb = DatabaseInitializerBase.GetSourceDatabaseName(targetDb);
154 | if (InitializingDBs.ContainsKey(targetDb.ToLower()))
155 | {
156 | Log.Information("Skipping log restores for {targetDb} due to initialization", targetDb);
157 | return;
158 | }
159 | var fromDate = row["backup_finish_date"] as DateTime? ?? DateTime.MinValue;
160 | fromDate = fromDate.AddMinutes(Config.OffsetMins);
161 | try
162 | {
163 | ProcessDatabase(sourceDb, targetDb, fromDate, stoppingToken);
164 | }
165 | catch (Exception ex)
166 | {
167 | Log.Error(ex, "Error processing database {targetDb}", targetDb);
168 | }
169 | });
170 | return Task.CompletedTask;
171 | }
172 |
173 | public static bool IsIncludedDatabase(string db)
174 | {
175 | var isExcluded = Config.ExcludedDatabases.Count > 0 && Config.ExcludedDatabases.Any(e => e.Equals(db, StringComparison.OrdinalIgnoreCase));
176 | var isIncluded = Config.IncludedDatabases.Count == 0 || Config.IncludedDatabases.Any(e => e.Equals(db, StringComparison.OrdinalIgnoreCase));
177 |
178 | return !isExcluded && isIncluded;
179 | }
180 |
181 | private void ProcessDatabase(string sourceDb, string targetDb, DateTime fromDate, CancellationToken stoppingToken, int processCount = 1, bool reProcess = false)
182 | {
183 | var expectedTarget = DatabaseInitializerBase.GetDestinationDatabaseName(sourceDb);
184 | if (!string.Equals(expectedTarget, targetDb, StringComparison.OrdinalIgnoreCase))
185 | {
186 | Log.Debug("Skipping {targetDb}. Expected target to be {expectedName}", targetDb, expectedTarget);
187 | return;
188 | }
189 | if (!IsIncludedDatabase(targetDb) && !IsIncludedDatabase(sourceDb))
190 | {
191 | Log.Debug("Skipping {targetDb}. Database is excluded.", targetDb);
192 | return;
193 | }
194 | var logFiles = GetFilesForDb(sourceDb, fromDate);
195 | using (var op = Operation.Begin("Restore Logs for {DatabaseName}", targetDb))
196 | {
197 | try
198 | {
199 | RestoreLogs(logFiles, sourceDb, targetDb, reProcess, stoppingToken);
200 | op.Complete();
201 | }
202 | catch (TimeoutException ex) when (ex.Message == "Max processing time exceeded")
203 | {
204 | Log.Warning(
205 | "Max processing time exceeded. Log processing will continue for {targetDb} on the next iteration.",
206 | targetDb);
207 | op.SetException(ex);
208 | }
209 | catch (SqlException ex) when (ex.Number == 4305)
210 | {
211 | HandleTooRecent(ex, sourceDb, targetDb, fromDate, processCount, stoppingToken);
212 | }
213 | catch (HeaderVerificationException ex) when (ex.VerificationStatus ==
214 | BackupHeader.HeaderVerificationStatus.TooRecent)
215 | {
216 | HandleTooRecent(ex, sourceDb, targetDb, fromDate, processCount, stoppingToken);
217 | }
218 | catch (Exception ex)
219 | {
220 | Log.Error(ex, "Error restoring logs for {targetDb}", targetDb);
221 | }
222 | }
223 | }
224 |
225 | private void HandleTooRecent(Exception ex, string sourceDb, string targetDb, DateTime fromDate, int processCount, CancellationToken stoppingToken)
226 | {
227 | switch (processCount)
228 | {
229 | // Too recent
230 | case 1:
231 | Log.Warning(ex, "Log file to recent to apply. Adjusting fromDate by 60min.");
232 | ProcessDatabase(sourceDb, targetDb, fromDate.AddMinutes(-60), stoppingToken, processCount + 1, true);
233 | break;
234 |
235 | case 2:
236 | Log.Warning(ex, "Log file to recent to apply. Adjusting fromDate by 1 day.");
237 | ProcessDatabase(sourceDb, targetDb, fromDate.AddMinutes(-1440), stoppingToken, processCount + 1, true);
238 | break;
239 |
240 | default:
241 | Log.Error(ex, "Log file too recent to apply. Manual intervention might be required.");
242 | break;
243 | }
244 | }
245 |
246 | private static Task RestoreLogs(IEnumerable logFiles, string sourceDb, string targetDb, bool reProcess, CancellationToken stoppingToken)
247 | {
248 | BigInteger? redoStartOrPreviousLastLSN = null;
249 | if (Config.CheckHeaders)
250 | {
251 | redoStartOrPreviousLastLSN = DataHelper.GetRedoStartLSNForDB(targetDb, Config.Destination);
252 | Log.Debug("{targetDb} Redo Start LSN: {RedoStartLSN}", targetDb, redoStartOrPreviousLastLSN);
253 | }
254 |
255 | var maxTime = DateTime.Now.AddMinutes(Config.MaxProcessingTimeMins);
256 | bool breakProcessingFlag = false;
257 | var stopAt = Config.StopAt > DateTime.MinValue && Config.StopAt < DateTime.MaxValue ? ", STOPAT=" + Config.StopAt.ToString("yyyy-MM-ddTHH:mm:ss.fff").SqlSingleQuote() : "";
258 | var earlierLogFound = false;
259 | foreach (var logBackup in logFiles)
260 | {
261 | if (DateTime.Now > maxTime)
262 | {
263 | RestoreWithStandby(targetDb); // Return database to standby mode
264 | // Stop processing logs if max processing time is exceeded. Prevents a single DatabaseName that has fallen behind from impacting other DBs
265 | throw new TimeoutException("Max processing time exceeded");
266 | }
267 | if (stoppingToken.IsCancellationRequested)
268 | {
269 | RestoreWithStandby(targetDb); // Return database to standby mode
270 | Log.Information("Halt log restores for {targetDb} due to stop request", targetDb);
271 | break;
272 | }
273 | if (!Waiter.CanRestoreLogsNow)
274 | {
275 | RestoreWithStandby(targetDb); // Return database to standby mode
276 | Log.Information("Halt log restores for {targetDb} due to Hours configuration", targetDb);
277 | break;
278 | }
279 | if (breakProcessingFlag)
280 | {
281 | break;
282 | }
283 |
284 | var file = logBackup.FilePath.SqlSingleQuote();
285 | var urlOrDisk = Config.DeviceType == BackupHeader.DeviceTypes.Disk ? "DISK" : "URL";
286 | var sql = $"RESTORE LOG {targetDb.SqlQuote()} FROM {urlOrDisk} = {file} WITH NORECOVERY{stopAt}";
287 |
288 | if (Config.CheckHeaders)
289 | {
290 | List headers;
291 | try
292 | {
293 | headers = logBackup.Headers;
294 | }
295 | catch (SqlException ex)
296 | {
297 | Log.Error(ex, "Error reading backup header for {logPath}. Skipping file.", logBackup.FilePath);
298 | continue;
299 | }
300 |
301 | if (headers.Count > 1) // Multiple logical backups in single file. This is now handled, but log a warning as it's unexpected.
302 | {
303 | Log.Warning("Log File {logPath} contains {count} backups. Expected 1, but each will be processed.", logBackup.FilePath, headers.Count);
304 | }
305 |
306 | foreach (var header in headers)
307 | {
308 | sql = $"RESTORE LOG {targetDb.SqlQuote()} FROM {urlOrDisk} = {file} WITH NORECOVERY, FILE = {header.Position}{stopAt}";
309 | if (!string.Equals(header.DatabaseName, sourceDb, StringComparison.OrdinalIgnoreCase))
310 | {
311 | throw new HeaderVerificationException(
312 | $"Header verification failed for {logBackup}. Database: {header.DatabaseName}. Expected a backup for {targetDb}", BackupHeader.HeaderVerificationStatus.WrongDatabase);
313 | }
314 |
315 | if (Config.RestoreDelayMins > 0 && DateTime.Now.Subtract(header.BackupFinishDate).TotalMinutes < Config.RestoreDelayMins)
316 | {
317 | Log.Information("Waiting to restore {logPath} & subsequent files. Backup Finish Date: {BackupFinishDate}. Eligible for restore after {RestoreAfter}, RestoreDelayMins:{RestoreDelay}", logBackup.FilePath, header.BackupFinishDate, header.BackupFinishDate.AddMinutes(Config.RestoreDelayMins), Config.RestoreDelayMins);
318 | breakProcessingFlag = true;
319 | break;
320 | }
321 | else if (header.FirstLSN <= redoStartOrPreviousLastLSN && header.LastLSN == redoStartOrPreviousLastLSN)
322 | {
323 | if (reProcess) // Reprocess previous file if we got a too recent error, otherwise skip it
324 | {
325 | Log.Information("Re-processing {logPath}, FILE={Position}. FirstLSN: {FirstLSN}, LastLSN: {LastLSN}", logBackup.FilePath, header.Position, header.FirstLSN, header.LastLSN);
326 | continue;
327 | }
328 | else
329 | {
330 | Log.Information("Skipping {logPath}, FILE={Position}. Found last log file restored. FirstLSN: {FirstLSN}, LastLSN: {LastLSN}", logBackup.FilePath, header.Position, header.FirstLSN, header.LastLSN);
331 | continue;
332 | }
333 | }
334 | else if (header.FirstLSN <= redoStartOrPreviousLastLSN && header.LastLSN > redoStartOrPreviousLastLSN)
335 | {
336 | Log.Information("Header verification successful for {logPath}, FILE={Position}. FirstLSN: {FirstLSN}, LastLSN: {LastLSN}", logBackup.FilePath, header.Position, header.FirstLSN, header.LastLSN);
337 | }
338 | else if (header.FirstLSN < redoStartOrPreviousLastLSN)
339 | {
340 | earlierLogFound = true;
341 | Log.Information("Skipping {logPath}. A later LSN is required: {RequiredLSN}, FirstLSN: {FirstLSN}, LastLSN: {LastLSN}", logBackup.FilePath, redoStartOrPreviousLastLSN, header.FirstLSN, header.LastLSN);
342 | continue;
343 | }
344 | else if (header.FirstLSN > redoStartOrPreviousLastLSN)
345 | {
346 | if (earlierLogFound && reProcess)
347 | {
348 | // The current log is too recent. We previously adjusted the search date looking for an earlier log. Now we have found log files that are too early to apply, then this log that is too recent
349 | // The log chain appears to be broken, but log an error and continue processing in case the file has a later modified date than expected.
350 | Log.Error("Header verification failed for {FilePath}. An earlier LSN is required: {redoStartOrPreviousLastLSN}, FirstLSN: {FirstLSN}, LastLSN: {LastLSN}. NOTE: We previously found a log that was too early to apply. Log chain might be broken, requiring manual intervention. Continuing to check log files just in case the file has a later modified date than expected.",
351 | logBackup.FilePath,redoStartOrPreviousLastLSN,header.FirstLSN,header.LastLSN);
352 | continue;
353 | }
354 | throw new HeaderVerificationException($"Header verification failed for {logBackup.FilePath}. An earlier LSN is required: {redoStartOrPreviousLastLSN}, FirstLSN: {header.FirstLSN}, LastLSN: {header.LastLSN}", BackupHeader.HeaderVerificationStatus.TooRecent);
355 | }
356 |
357 | var completed = ProcessRestoreCommand(sql, targetDb, file);
358 | if (completed && Config.StopAt != DateTime.MinValue && header.BackupFinishDate >= Config.StopAt)
359 | {
360 | Log.Information("StopAt target reached for {targetDb}. Last log: {logPath}. Backup Finish Date: {BackupFinishDate}. StopAt: {StopAt}", targetDb, logBackup.FilePath, header.BackupFinishDate, Config.StopAt);
361 | lock (locker) // Prevent future processing of this DB
362 | {
363 | Config.ExcludedDatabases.Add(targetDb); // Exclude this DB from future processing
364 | }
365 | breakProcessingFlag = true;
366 | break;
367 | }
368 | redoStartOrPreviousLastLSN = header.LastLSN;
369 | }
370 | }
371 | else
372 | {
373 | ProcessRestoreCommand(sql, targetDb, file);
374 | }
375 | }
376 | RestoreWithStandby(targetDb);
377 | return Task.CompletedTask;
378 | }
379 |
380 | private static bool ProcessRestoreCommand(string sql, string db, string file)
381 | {
382 | try
383 | {
384 | Execute(sql);
385 | return true;
386 | }
387 | catch (SqlException ex) when
388 | (ex.Number == 4326) // Log file is too early to apply, Log error and continue
389 | {
390 | Log.Warning(ex, "Log file is too early to apply. Processing will continue with next file.");
391 | }
392 | catch (SqlException ex) when
393 | (ex.Number == 3203) // Read error. Damaged backup? Log error and continue processing.
394 | {
395 | Log.Error(ex, "Error reading backup file {file} - possible damaged or incomplete backup. Processing will continue with next file.", file);
396 | }
397 | catch (SqlException ex) when
398 | (ex.Number == 3101) // Exclusive access could not be obtained because the database is in use. Kill user connections and retry.
399 | {
400 | if (!KillUserConnections(db)) return false;
401 | Execute(sql);
402 | return true;
403 | }
404 | catch (SqlException ex) when (ex.Number == 4319)
405 | {
406 | Log.Warning(ex,
407 | "A previous restore operation was interrupted for {targetDb}. Attempting to fix automatically with RESTART option", db);
408 | sql += ",RESTART";
409 | try
410 | {
411 | Execute(sql);
412 | return true;
413 | }
414 | catch (Exception ex2)
415 | {
416 | Log.Error(ex2, "Error running RESTORE with RESTART option. {sql}. Skipping file and trying next in sequence.", sql);
417 | }
418 | }
419 |
420 | return false;
421 | }
422 |
423 | private static bool KillUserConnections(string db)
424 | {
425 | if (Config.KillUserConnections)
426 | {
427 | var sql = $"IF DATABASEPROPERTYEX({db.SqlSingleQuote()},'IsInStandBy')=1\n";
428 | sql += "BEGIN\n";
429 | sql += $"\tALTER DATABASE {db.SqlQuote()} SET SINGLE_USER WITH ROLLBACK AFTER {Config.KillUserConnectionsWithRollbackAfter}\n";
430 | sql += $"\tRESTORE DATABASE {db.SqlQuote()} WITH NORECOVERY\n";
431 | sql += "END\n";
432 | Log.Warning("User connections to {targetDb} are preventing restore operations. Sessions will be killed after {seconds}. {sql}", db, Config.KillUserConnectionsWithRollbackAfter, sql);
433 | try
434 | {
435 | Execute(sql);
436 | return true;
437 | }
438 | catch (Exception ex)
439 | {
440 | Log.Error(ex, "Error killing user connections for {targetDb}. {sql}", db, sql);
441 | return false;
442 | }
443 | }
444 | else
445 | {
446 | Log.Error("User connections to {targetDb} are preventing restore operations. Consider enabling KillUserConnections in config");
447 | return false;
448 | }
449 | }
450 |
451 | private static void RestoreWithStandby(string db)
452 | {
453 | if (string.IsNullOrEmpty(Config.StandbyFileName)) return;
454 | var standby = Config.StandbyFileName.Replace(Config.DatabaseToken, db);
455 | var sql = $"IF DATABASEPROPERTYEX({db.SqlSingleQuote()},'IsInStandBy') = 0 RESTORE DATABASE {db.SqlQuote()} WITH STANDBY = {standby.SqlSingleQuote()}";
456 | try
457 | {
458 | Execute(sql);
459 | }
460 | catch (Exception ex)
461 | {
462 | Log.Error(ex, "Error running {sql}", sql);
463 | }
464 | }
465 |
466 | private static void Execute(string sql)
467 | {
468 | DataHelper.ExecuteWithTiming(sql, Config.Destination);
469 | }
470 |
471 | public static DataTable GetDatabases()
472 | {
473 | using var cn = new SqlConnection(Config.Destination);
474 | using var cmd = new SqlCommand(SqlStrings.GetDatabases, cn) { CommandTimeout = 0 };
475 | using var da = new SqlDataAdapter(cmd);
476 | var dt = new DataTable();
477 | da.Fill(dt);
478 | return dt;
479 | }
480 |
481 | private static IEnumerable GetFilesForDb(string db, DateTime fromDate)
482 | {
483 | var path = Config.LogFilePath!.Replace(Config.DatabaseToken, db);
484 | IEnumerable logFiles;
485 |
486 | using (var op = Operation.Begin("Get logs for {DatabaseName} after {date} (Offset:{offset}) from {path}", db,
487 | fromDate, Config.OffsetMins, path))
488 | {
489 | logFiles = FileHandler.FileHandlerInstance.GetFiles(path, "*.trn", fromDate, true);
490 | op.Complete();
491 | }
492 |
493 | return logFiles;
494 | }
495 | }
496 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/LogShippingService.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | net8.0-windows7.0
6 | enable
7 | enable
8 | Services_5724.ico
9 | false
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 | Always
51 |
52 |
53 | Always
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/NamedLocker.cs:
--------------------------------------------------------------------------------
1 | using System.Collections.Concurrent;
2 |
3 | namespace LogShippingService
4 | {
5 | // http://johnculviner.com/achieving-named-lock-locker-functionality-in-c-4-0/
6 | public class NamedLocker
7 | {
8 | private readonly ConcurrentDictionary _lockDict = new();
9 |
10 | //get a lock for use with a lock(){} block
11 | public object GetLock(string name)
12 | {
13 | return _lockDict.GetOrAdd(name, s => new object());
14 | }
15 |
16 | //run a short lock inline using a lambda
17 | public TResult RunWithLock(string name, Func body)
18 | {
19 | lock (_lockDict.GetOrAdd(name, s => new object()))
20 | return body();
21 | }
22 |
23 | //run a short lock inline using a lambda
24 | public void RunWithLock(string name, Action body)
25 | {
26 | lock (_lockDict.GetOrAdd(name, s => new object()))
27 | body();
28 | }
29 |
30 | //remove an old lock object that is no longer needed
31 | public void RemoveLock(string name)
32 | {
33 | _lockDict.TryRemove(name, out _);
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/Program.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.Extensions.Configuration;
2 | using Microsoft.Extensions.DependencyInjection;
3 | using Microsoft.Extensions.Hosting;
4 | using Serilog;
5 |
6 | namespace LogShippingService
7 | {
8 | internal class Program
9 | {
10 | public static readonly NamedLocker Locker = new();
11 |
12 | private static void Main(string[] args)
13 | {
14 | Log.Logger = new LoggerConfiguration()
15 | .WriteTo.Console()
16 | .CreateLogger();
17 |
18 | Directory.SetCurrentDirectory(AppContext.BaseDirectory);
19 | var configuration = File.Exists(Config.ConfigFile) ? new ConfigurationBuilder()
20 | .SetBasePath(Directory.GetCurrentDirectory())
21 | .AddJsonFile("appsettings.json")
22 | .Build() : null;
23 |
24 | SetupLogging(configuration);
25 | AppConfig.Config = configuration?.GetSection("Config").Get() ?? new Config();
26 |
27 | AppConfig.Config.ApplyCommandLineOptions(args);
28 |
29 | Log.Information("Configuration:\n" + AppConfig.Config.ToString());
30 | try
31 | {
32 | AppConfig.Config.ValidateConfig();
33 | }
34 | catch (Exception ex)
35 | {
36 | Log.Error(ex, "Error validating config.");
37 | return;
38 | }
39 |
40 | var builder = Host.CreateApplicationBuilder();
41 |
42 | // Configure the ShutdownTimeout to infinite
43 | builder.Services.Configure(options =>
44 | options.ShutdownTimeout = Timeout.InfiniteTimeSpan);
45 | builder.Services.AddWindowsService(options =>
46 | {
47 | options.ServiceName = "LogShippingService";
48 | });
49 | builder.Services.AddHostedService();
50 |
51 | var host = builder.Build();
52 | host.Run();
53 | }
54 |
55 | private static void SetupLogging(IConfigurationRoot? configuration)
56 | {
57 | // Check if the Serilog section exists and has content
58 | var serilogSection = configuration?.GetSection("Serilog");
59 | if (configuration != null && serilogSection.Exists() && serilogSection.GetChildren().Any())
60 | {
61 | // Configure Serilog from the configuration file
62 | Log.Logger = new LoggerConfiguration()
63 | .ReadFrom.Configuration(configuration)
64 | .CreateLogger();
65 | }
66 | else
67 | {
68 | // Configure Serilog with default settings programmatically
69 | Log.Logger = new LoggerConfiguration()
70 | .MinimumLevel.Debug()
71 | .WriteTo.Console(outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}")
72 | .WriteTo.File(path: "Logs/log-.txt",
73 | rollingInterval: RollingInterval.Hour,
74 | retainedFileCountLimit: 24,
75 | outputTemplate: "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}")
76 | .Enrich.FromLogContext()
77 | .Enrich.WithMachineName()
78 | .Enrich.WithThreadId()
79 | .Enrich.WithProperty("Application", "LogShippingService")
80 | .CreateLogger();
81 | }
82 | }
83 | }
84 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/ReadOnlyBackupSet.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Threading.Tasks;
6 |
7 | namespace LogShippingService
8 | {
9 | internal class ReadOnlyBackupSet
10 | {
11 | public List BackupFiles=new();
12 | public List ToRestore = null!;
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/S3Uri.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Text;
5 | using System.Text.RegularExpressions;
6 | using System.Threading.Tasks;
7 | using Amazon.S3.Util;
8 |
9 | namespace LogShippingService
10 | {
11 | internal class S3Uri
12 | {
13 | public string Bucket { get; private set; }
14 | public string Key { get; private set; }
15 | public string RegionSystemName { get; private set; }
16 | public Uri Uri { get; private set; }
17 |
18 | public Amazon.RegionEndpoint Region => Amazon.RegionEndpoint.GetBySystemName(RegionSystemName);
19 |
20 | public S3Uri(string s3Uri)
21 | {
22 | Uri = new Uri(s3Uri);
23 | RegionSystemName = ExtractRegionFromHost(Uri.Host);
24 | Bucket = Uri.Host.Split('.')[0];
25 | Key = Uri.AbsolutePath.TrimStart('/');
26 | }
27 |
28 | private string ExtractRegionFromHost(string host)
29 | {
30 | // Regular expression to extract the region from a standard S3 or a virtual-hosted style S3 URI
31 | var regex = new Regex(@"s3[.-](?[a-z0-9-]+)\.amazonaws\.com$", RegexOptions.IgnoreCase);
32 | var match = regex.Match(host);
33 | if (match.Success)
34 | {
35 | return match.Groups["region"].Value;
36 | }
37 | throw new ArgumentException("Region not found in URI.");
38 | }
39 | }
40 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/SQL/GetDatabases.sql:
--------------------------------------------------------------------------------
1 | WITH LR AS (
2 | SELECT rh.destination_database_name,
3 | bs.backup_finish_date,
4 | rh.restore_date,
5 | ROW_NUMBER() OVER(PARTITION BY rh.destination_database_name ORDER BY rh.restore_date DESC) rnum
6 | FROM msdb.dbo.restorehistory rh
7 | JOIN msdb.dbo.backupset bs on rh.backup_set_id = bs.backup_set_id
8 | WHERE rh.restore_date > DATEADD(d,-14,GETUTCDATE())
9 | AND bs.type IN('D','I','L','P','Q')
10 |
11 | )
12 | SELECT d.name,
13 | DATEADD(mi,DATEDIFF(mi,GETDATE(),GETUTCDATE()),LR.backup_finish_date) AS backup_finish_date,
14 | DATEADD(mi,DATEDIFF(mi,GETDATE(),GETUTCDATE()),LR.restore_date) AS restore_date
15 | FROM sys.databases d
16 | LEFT OUTER JOIN LR ON d.name = LR.destination_database_name AND LR.rnum=1
17 | WHERE (d.state = 1 OR d.is_in_standby=1)
18 | AND d.recovery_model <> 3 /* Exclude Simple */
19 | /* Exclude databases with a restore in progress */
20 | AND NOT EXISTS( SELECT 1
21 | FROM sys.dm_exec_requests R
22 | JOIN sys.dm_tran_locks L ON R.session_id = L.request_session_id
23 | WHERE R.command='RESTORE DATABASE'
24 | AND L.request_mode='X'
25 | AND L.resource_type='DATABASE'
26 | AND L.resource_database_id = d.database_id
27 | )
28 | ORDER BY LR.backup_finish_date
--------------------------------------------------------------------------------
/sql-log-shipping-service/SQL/GetFilesForLastBackup.sql:
--------------------------------------------------------------------------------
1 | SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
2 | SELECT BMF.physical_device_name,
3 | LB.backup_finish_date,
4 | BMF.device_type
5 | FROM (
6 | SELECT TOP(1) bs.media_set_id,
7 | bs.backup_finish_date
8 | FROM msdb.dbo.backupset bs
9 | WHERE bs.database_name = @db
10 | AND bs.type = @backup_type
11 | AND bs.backup_finish_date >= DATEADD(d,-@MaxBackupAgeForInitialization,GETDATE()) /* For performance & it probably doesn't make sense to initialize from an old backup */
12 | AND bs.is_snapshot=0
13 | ORDER BY backup_finish_date DESC
14 | ) AS LB
15 | JOIN msdb.dbo.backupmediafamily BMF ON LB.media_set_id = BMF.media_set_id
--------------------------------------------------------------------------------
/sql-log-shipping-service/SQL/GetRedoStartLSN.sql:
--------------------------------------------------------------------------------
1 | SELECT redo_start_lsn
2 | FROM sys.master_files
3 | WHERE database_id = DB_ID(@db)
4 | AND type=0
5 | AND file_id = 1
6 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/SQL/GetUserDatabases.sql:
--------------------------------------------------------------------------------
1 | SELECT name,
2 | recovery_model,
3 | state,
4 | is_in_standby
5 | FROM sys.databases
6 | WHERE database_id>4
--------------------------------------------------------------------------------
/sql-log-shipping-service/Services_5724.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/trimble-oss/sql-log-shipping-service/a6d8f78e06e71649e2bd97feaa520a4ac3e1b4c4/sql-log-shipping-service/Services_5724.ico
--------------------------------------------------------------------------------
/sql-log-shipping-service/SqlStrings.cs:
--------------------------------------------------------------------------------
1 | using System.Reflection;
2 |
3 | namespace LogShippingService
4 | {
5 | internal class SqlStrings
6 | {
7 | public static string GetSqlString(string name)
8 | {
9 | var resourcePath = "LogShippingService.SQL." + name + ".sql";
10 | var assembly = Assembly.GetExecutingAssembly();
11 |
12 | using var stream = assembly.GetManifestResourceStream(resourcePath);
13 | if (stream != null)
14 | {
15 | using StreamReader reader = new(stream);
16 | return reader.ReadToEnd();
17 | }
18 | else
19 | {
20 | throw new ArgumentException($"GetSqlString did not find {name}");
21 | }
22 | }
23 |
24 | public static string GetDatabases => GetSqlString("GetDatabases");
25 |
26 | public static string GetUserDatabases => GetSqlString("GetUserDatabases");
27 |
28 | public static string GetFilesForLastBackup => GetSqlString("GetFilesForLastBackup");
29 |
30 | public static string GetRedoStartLSN => GetSqlString("GetRedoStartLSN");
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/sql-log-shipping-service/Waiter.cs:
--------------------------------------------------------------------------------
1 | using Serilog;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 |
8 | namespace LogShippingService
9 | {
10 | internal class Waiter
11 | {
12 | private static Config Config => AppConfig.Config;
13 |
14 | public static async Task WaitUntilActiveHours(CancellationToken stoppingToken)
15 | {
16 | if (CanRestoreLogsNow) return;
17 |
18 | Log.Information("Waiting for active hours to run {Hours}", Config.Hours);
19 |
20 | while (!CanRestoreLogsNow && !stoppingToken.IsCancellationRequested)
21 | {
22 | await Task.Delay(1000, stoppingToken);
23 | }
24 |
25 | if (!stoppingToken.IsCancellationRequested)
26 | {
27 | Log.Information("Wait for active hours is complete");
28 | }
29 | }
30 |
31 | public static async Task WaitUntilTime(DateTime waitUntil, CancellationToken stoppingToken)
32 | {
33 | int delayMilliseconds;
34 | do
35 | {
36 | // Calculate how long to wait based on when we want the next iteration to start. If we need to wait longer than int.MaxValue (24.8 days), the process will loop.
37 | delayMilliseconds =
38 | (int)Math.Min((waitUntil - DateTime.Now).TotalMilliseconds, int.MaxValue);
39 | if (delayMilliseconds <= 0) break;
40 | await Task.Delay(delayMilliseconds, stoppingToken);
41 | } while (delayMilliseconds == int.MaxValue && waitUntil > DateTime.Now && !stoppingToken.IsCancellationRequested); // Not expected to loop - only if we overflowed the int.MaxValue (24.8 days)
42 | }
43 |
44 | public static bool CanRestoreLogsNow => Config.Hours.Contains(DateTime.Now.Hour);
45 | }
46 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/appsettings.json.azure.example:
--------------------------------------------------------------------------------
1 | {
2 | "Config": {
3 | "ContainerUrl": "https://your_storage_account.blob.core.windows.net/uour_container_name",
4 | "SASToken": "?sp=...",
5 | "LogFilePath": "LOG/SERVERNAME/{DatabaseName}/",
6 | "MaxThreads": 10,
7 | "Destination": "Data Source=LOCALHOST;Integrated Security=True;Encrypt=True;Trust Server Certificate=True",
8 | "DelayBetweenIterationsMs": 10000,
9 | "OffsetMins": 0,
10 | "MaxProcessingTimeMins": 60
11 | },
12 | "Serilog": {
13 | "Using": [ "Serilog.Sinks.Console", "Serilog.Sinks.File" ],
14 | "MinimumLevel": "Debug",
15 | "WriteTo": [
16 | {
17 | "Name": "Console",
18 | "Args": {
19 | "outputTemplate": "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}"
20 | }
21 | },
22 | {
23 | "Name": "File",
24 | "Args": {
25 | "path": "Logs/log-.txt",
26 | "rollingInterval": "Hour",
27 | "retainedFileCountLimit": 24,
28 | "outputTemplate": "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}"
29 | }
30 | }
31 | ],
32 | "Enrich": [ "FromLogContext", "WithMachineName", "WithThreadId" ],
33 | "Properties": {
34 | "Application": "LogShippingService"
35 | }
36 | }
37 |
38 | }
--------------------------------------------------------------------------------
/sql-log-shipping-service/appsettings.json.unc.example:
--------------------------------------------------------------------------------
1 | {
2 | "Config": {
3 | "LogFilePath": "\\\\BACKUPSERVER\\Backups\\SERVERNAME\\{DatabaseName}\\LOG",
4 | "MaxThreads": 10,
5 | "Destination": "Data Source=LOCALHOST;Integrated Security=True;Encrypt=True;Trust Server Certificate=True",
6 | "DelayBetweenIterationsMs": 10000,
7 | "OffsetMins": 0,
8 | "MaxProcessingTimeMins": 60
9 | },
10 | "Serilog": {
11 | "Using": [ "Serilog.Sinks.Console", "Serilog.Sinks.File" ],
12 | "MinimumLevel": "Debug",
13 | "WriteTo": [
14 | {
15 | "Name": "Console",
16 | "Args": {
17 | "outputTemplate": "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}"
18 | }
19 | },
20 | {
21 | "Name": "File",
22 | "Args": {
23 | "path": "Logs/log-.txt",
24 | "rollingInterval": "Hour",
25 | "retainedFileCountLimit": 24,
26 | "outputTemplate": "{Timestamp:yyyy-MM-dd HH:mm:ss.fff zzz} [{Level:u3}] {Message:lj} <{ThreadId}>{NewLine}{Exception}"
27 | }
28 | }
29 | ],
30 | "Enrich": [ "FromLogContext", "WithMachineName", "WithThreadId" ],
31 | "Properties": {
32 | "Application": "LogShippingService"
33 | }
34 | }
35 |
36 | }
--------------------------------------------------------------------------------
/test/CI_Workflow-Restore-Copy.Tests.ps1:
--------------------------------------------------------------------------------
1 | Describe 'CI Workflow checks - Restore Copy' {
2 |
3 | It 'Test Standby Count' {
4 | # Expect 3 DBs to be in standby state
5 | @(Get-DbaDatabase -SqlInstance "LOCALHOST" -Status "Standby").Count | Should -Be 3
6 | }
7 | It 'Test Normal Count' {
8 | # Expect 3 DBs to be in normal state - we are restoring a copy of these databases
9 | @(Get-DbaDatabase -SqlInstance "LOCALHOST" -Status "Normal" -Database "LogShipping1","LogShipping2","LogShipping3").Count | Should -Be 3
10 | }
11 | It '2 logical backups in same physical file check' {
12 | # LogShipping1_2.trn as 2 logical backups. If LogShipping1_3.trn is restored, then we handled it OK
13 | $results= Invoke-DbaQuery -SqlInstance "LOCALHOST" -As PSObject -Query "SELECT TOP(1) bmf.physical_device_name AS LastLogFile
14 | FROM msdb.dbo.restorehistory rsh
15 | INNER JOIN msdb.dbo.backupset bs ON rsh.backup_set_id = bs.backup_set_id
16 | INNER JOIN msdb.dbo.restorefile rf ON rsh.restore_history_id = rf.restore_history_id
17 | INNER JOIN msdb.dbo.backupmediafamily bmf ON bmf.media_set_id = bs.media_set_id
18 | WHERE rsh.restore_type = 'L'
19 | AND rsh.destination_database_name='LogShipping1_Copy'
20 | ORDER BY rsh.restore_history_id DESC;"
21 |
22 | $results.LastLogFile | Should -Be "C:\Backup\LOG1\LogShipping1\LogShipping1_3.trn"
23 | }
24 |
25 | }
--------------------------------------------------------------------------------
/test/CI_Workflow.Tests.ps1:
--------------------------------------------------------------------------------
1 | Describe 'CI Workflow checks' {
2 |
3 | It 'Test Standby Count' {
4 | # Expect 3 DBs to be in standby state
5 | @(Get-DbaDatabase -SqlInstance "LOCALHOST" -Status "Standby").Count | Should -Be 3
6 | }
7 | It '2 logical backups in same physical file check' {
8 | # LogShipping1_2.trn as 2 logical backups. If LogShipping1_3.trn is restored, then we handled it OK
9 | $results= Invoke-DbaQuery -SqlInstance "LOCALHOST" -As PSObject -Query "SELECT TOP(1) bmf.physical_device_name AS LastLogFile
10 | FROM msdb.dbo.restorehistory rsh
11 | INNER JOIN msdb.dbo.backupset bs ON rsh.backup_set_id = bs.backup_set_id
12 | INNER JOIN msdb.dbo.restorefile rf ON rsh.restore_history_id = rf.restore_history_id
13 | INNER JOIN msdb.dbo.backupmediafamily bmf ON bmf.media_set_id = bs.media_set_id
14 | WHERE rsh.restore_type = 'L'
15 | AND rsh.destination_database_name='LogShipping1'
16 | ORDER BY rsh.restore_history_id DESC;"
17 |
18 | $results.LastLogFile | Should -Be "C:\Backup\LOG\LogShipping1\LogShipping1_3.trn"
19 | }
20 |
21 | }
--------------------------------------------------------------------------------