├── AKS └── aks.ps1 ├── Arc ├── Arc - SQL Bits 2023.ps1 ├── Arc Demo Data Scotland.ps1 ├── Arc with kubectl │ ├── Arc with Kubectl.ps1 │ ├── bootstrapper-unified.dist.yaml │ ├── bootstrapper-unified.yaml │ ├── controller.yaml │ └── sql-mi.yaml ├── ArcDemos.ipynb ├── Full Arc Deployment.ps1 ├── Global Azure │ └── Global Azure 2021.pdf ├── PASS Summit 2022 - Arc-enabled Data Services.ps1 ├── SQL Sat Vienna 2021 │ └── AzureArc - SQL Sat Vienna.pdf ├── SQLBits 2023 - Azure Arc in 50 Minutes.pdf └── The Future of SQL is Hybrid.pdf ├── BDC ├── Covid │ ├── 01. Covid_SQL_Prepare.ipynb │ ├── 02. Covid_Spark_Population.ipynb │ ├── 03. Covid_Spark_Cases.ipynb │ ├── 04. Covid_SQL.ipynb │ └── README.md ├── Garmin │ ├── 01. Garmin_Spark.ipynb │ ├── 02. Garmin_SQL.ipynb │ └── README.md ├── README.md └── Slides │ ├── BDC_Usergroups.pdf │ ├── README.md │ └── SQLFriday.pdf ├── Containers - You better get on Board ├── 01-Containers.ps1 ├── 02-Kubernetes.ps1 ├── Containers - You better get on board.pdf └── SQL.yaml ├── MVP ├── MVPCheck.ps1 └── README.md ├── PASS Essential 07-2019 ├── Code │ ├── Essential_072019.bimlproj │ ├── Essential_072019.mst │ ├── Essential_072019.mst.pkglyt │ ├── Essential_072019.v50.muo │ └── addedBiml │ │ ├── BimlScripts │ │ ├── 010_Connections.biml │ │ ├── 020_Tables.biml │ │ ├── 030_CreateTables.biml │ │ ├── 040_LoadTables.biml │ │ ├── 050_MasterPackage.biml │ │ └── 090_FullLoad.biml │ │ └── Globals │ │ └── Global1.biml └── Pass_Essential.pdf ├── PASS Summit 2021 └── 1-Arc.ps1 ├── Pass Summit 2020 └── BDC Deployment │ ├── 01_Windows_Tools.ps1 │ ├── 02_single-node-kubeadm.sh │ ├── 03_PrePull.sh │ ├── 04_AKS Deployment.ps1 │ └── Demos.ps1 ├── PassCamp 2022 ├── PVC.yaml ├── SQL.yaml └── preparek8s ├── README.md ├── Repo └── repo.ps1 ├── SQL 2025 └── Anthony and Ben │ ├── Episode 03 - REST APIs.ipynb │ └── README.md ├── SQL Server on Linux, Containers and Kubernetes ├── 01 SQL on Linux.ps1 ├── 02 SQL on Containers & Kubernetes.ps1 ├── PVC.yaml └── SQL.yaml ├── SQLBits_2019 ├── SSIS Performance.sql ├── SSIS_Performance.bak └── SSIS_Performance │ ├── .vs │ └── SSIS_Performance │ │ ├── v14 │ │ └── .suo │ │ └── v15 │ │ └── .suo │ ├── SSIS_Performance.sln │ └── SSIS_Performance │ ├── 00-00_WarmUp.biml │ ├── 00_Warmup.dtsx │ ├── 01-01_Create.dtsx │ ├── 01-01_Environment.biml │ ├── 01-02_BuildMeta.biml │ ├── 01-02_Load_Linear.dtsx │ ├── 01-03_CreateTables.biml │ ├── 01-03_Load_Parallel.dtsx │ ├── 01-04a_Load_Tables_Linear.biml │ ├── 01-04b_Load_Tables_Parallel.biml │ ├── 01-05_Recursion.biml │ ├── 01-06_Dataflow.biml │ ├── 01-TopologySort.vb │ ├── 02-01_CreateStaging.dtsx │ ├── 02-01_Environment.biml │ ├── 02-02_BuildTSTMeta.biml │ ├── 02-02_Evaluate.dtsx │ ├── 02-03_BuildProdMeta.biml │ ├── 02-03_Load.dtsx │ ├── 02-04_CreateTables.biml │ ├── 02-05_CreateEvaluation.biml │ ├── 02-06_CreateLoad.biml │ ├── 02-07_Dataflow.biml │ ├── 02-BimlFunctions.vb │ ├── 03-01_CreateLoad.biml │ ├── 03-01_Load_Containers.dtsx │ ├── Project.params │ ├── SSIS_Performance.database │ ├── SSIS_Performance.dtproj │ ├── SSIS_Performance.dtproj.user │ ├── bin │ └── Development │ │ └── SSIS_Performance.ispac │ └── obj │ └── Development │ ├── 00_Warmup.dtsx │ ├── 01-01_Create.dtsx │ ├── 01-02_Load_Linear.dtsx │ ├── 01-03_Load_Parallel.dtsx │ ├── 01_Create.dtsx │ ├── 02-01_CreateStaging.dtsx │ ├── 02-02_Evaluate.dtsx │ ├── 02-03_Load.dtsx │ ├── 02_Load_Linear.dtsx │ ├── 03-01_Load_Containers.dtsx │ ├── 03_Load_Parallel.dtsx │ ├── BuildLog.xml │ ├── Project.params │ └── SSIS_Performance.dtproj ├── SQLSatRheinlandPrecon └── Slides.pdf ├── SSIS and ADF with Biml └── SSIS_Live_Master │ ├── .vs │ └── SSIS_Live_Master │ │ └── v15 │ │ └── .suo │ ├── SSIS_Live_Master.sln │ └── SSIS_Live_Master │ ├── 02_Populate Tables.dtsx │ ├── A-01-Environment.biml │ ├── A-02-TableMeta.biml │ ├── A-03-Create_Staging.biml │ ├── A-03-Create_Staging_CS.biml │ ├── B-01-Environment.biml │ ├── B-02-TableMeta.biml │ ├── B-03-Create_Staging.biml │ ├── C-01-Environment.biml │ ├── C-02-TableMeta.biml │ ├── C-03-Create_Staging.biml │ ├── C-04-Populate_Staging.biml │ ├── D-01-Environment.biml │ ├── D-02-TableMeta.biml │ ├── D-03-Create_Staging.biml │ ├── D-04-Populate_Staging.biml │ ├── E-01-Environment.biml │ ├── E-02-TableMeta.biml │ ├── E-03a-Adf-Preview.biml │ ├── E-03b-Adf-Writer.biml │ ├── E-04a-OnPrem_DataSets.biml │ ├── E-04b-Blob_DataSets.biml │ ├── E-04c-SqlAzure_DataSets.biml │ ├── E-05-Pipeline.biml │ ├── Project.params │ ├── SSIS_Live_Master.database │ ├── SSIS_Live_Master.dtproj │ ├── SSIS_Live_Master.dtproj.user │ ├── bin │ └── Development │ │ └── SSIS_Live_Master.ispac │ └── obj │ └── Development │ ├── 01_CreateStaging.dtsx │ ├── 02_Populate Tables.dtsx │ ├── BuildLog.xml │ ├── Project.params │ └── SSIS_Live_Master.dtproj ├── Synapse ├── SQLBits 2023 - Azure Synapse Link.pdf └── Synapse - SQL Bits 2023.ps1 ├── VSCode ├── keybindings.json └── settings.json └── misc └── solisyon.png /AKS/aks.ps1: -------------------------------------------------------------------------------- 1 | Set-Location ~/desktop/AKS 2 | #choco upgrade kubernetes-cli -y 3 | #az bicep upgrade 4 | Clear-Host 5 | # Parameters 6 | $RG="AKSRG" 7 | $Region="eastus" 8 | $ClusterName="" 9 | $tenant="" 10 | $winpw="" 11 | 12 | # Login and create RG 13 | az login --tenant $Tenant 14 | az account set -s "Azure Data Demos" 15 | az group create -l $Region -n $RG 16 | 17 | # Azure CLI - I ran this before 18 | az aks create -g $RG -n $ClusterName --generate-ssh-keys ` 19 | --network-plugin azure ` 20 | --windows-admin-username azure --windows-admin-password $winpw 21 | 22 | # Add a Windows Node 23 | az aks nodepool add --resource-group $RG --cluster-name $ClusterName ` 24 | --os-type Windows --name npwind --node-count 1 25 | 26 | #az aks create -g $RG -n AKS-AZCLI-SMALL --node-count 2 27 | 28 | #az aks create --generate-ssh-keys -g $RG -n AKS-AZCLI-LARGE ` 29 | # --node-count 4 --node-vm-size Standard_d16_v4 30 | 31 | az aks list -o table 32 | 33 | # And look in Portal 34 | Start-Process https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.ContainerService%2FmanagedClusters 35 | 36 | # PowerShell and az module 37 | # Connect-AzAccount -Subscription (az account show --query id -o tsv) -Tenant $tenant 38 | 39 | Get-AzAksVersion -Location $Region | Select-Object OrchestratorVersion 40 | 41 | #New-AzAksCluster -ResourceGroupName $RG ` 42 | # -Name AKS-PS ` 43 | # -NodeCount 4 ` 44 | # -NodeVmSize Standard_d16_v4 ` 45 | # -KubernetesVersion 1.26.6 46 | 47 | Get-AzAksCluster -ResourceGroupName $RG | Select-Object Name 48 | 49 | # ARM Templates 50 | code aks-arm.json 51 | 52 | az bicep decompile --file aks-arm.json --force 53 | code aks-arm.bicep 54 | 55 | #az Deployment group create -f aks-arm.bicep ` 56 | # -g $RG ` 57 | # --parameters sshRSAPublicKey=$SSH clusterName=AKS-ARM-BICEP agentCount=1 58 | 59 | # Communicating with our cluster 60 | az aks get-credentials -g $RG -n $ClusterName 61 | 62 | kubectl cluster-info 63 | kubectl get nodes -o wide 64 | 65 | kubectl create deployment nginx --image=nginx 66 | 67 | code nginx.yaml 68 | kubectl apply -f nginx.yaml 69 | 70 | kubectl get service nginx -w 71 | $SERVICEIP=(kubectl get service nginx -o jsonpath='{ .status.loadBalancer.ingress[0].ip }') 72 | kubectl get pods 73 | Start-Process http://$SERVICEIP 74 | 75 | code windows-app.yaml 76 | kubectl apply -f windows-app.yaml 77 | 78 | kubectl get pods -o wide 79 | 80 | # Move imperative nginx to linux node: 81 | # nodeSelector: 82 | # "kubernetes.io/os": linux 83 | kubectl edit deployment nginx 84 | 85 | kubectl get pods -o wide -w 86 | 87 | kubectl describe pod -l app=sample 88 | 89 | # az group delete --resource-group $RG --yes -------------------------------------------------------------------------------- /Arc/Arc - SQL Bits 2023.ps1: -------------------------------------------------------------------------------- 1 | # Making sure we're on the correct cluster 2 | kubectl config use-context kubeadm 3 | 4 | # Set some variables 5 | $Region = "eastus" 6 | $RG = "BitsArc" 7 | az account set -s "Azure Data Demos" 8 | $Subscription=(az account show --query id -o tsv) 9 | $k8sNamespace="arc" 10 | kubectl config set-context --current --namespace=$k8sNamespace 11 | 12 | # And credentials 13 | $admincredentials = New-Object System.Management.Automation.PSCredential ('arcadmin', (ConvertTo-SecureString -String 'P@ssw0rd' -AsPlainText -Force)) 14 | $ENV:AZDATA_USERNAME="$($admincredentials.UserName)" 15 | $ENV:AZDATA_PASSWORD="$($admincredentials.GetNetworkCredential().Password)" 16 | $ENV:ACCEPT_EULA='yes' 17 | 18 | # We could deploy direct from Portal (requires arc connected k8s!) 19 | # https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli 20 | Start-Process https://portal.azure.com/#create/Microsoft.DataController 21 | 22 | # Let's stick to indirect for today 23 | # Deploy DC from Command Line 24 | az arcdata dc create --connectivity-mode Indirect --name arc-dc-kubeadm --k8s-namespace $k8sNamespace ` 25 | --subscription $Subscription ` 26 | -g $RG -l $Region --storage-class nfs-storage ` 27 | --profile-name azure-arc-kubeadm --infrastructure onpremises --use-k8s 28 | 29 | # Check Status of the DC 30 | az arcdata dc status show --k8s-namespace $k8sNamespace --use-k8s 31 | 32 | kubectl edit arcdc 33 | 34 | kubectl get pods -l plane=control 35 | 36 | # We could also do this in ADS... 37 | 38 | 39 | # We could deploy Postgres... 40 | # But we'll deploy a Managed Instance 41 | 42 | # Business Critical 43 | $bcinstance="mi-bc" 44 | az sql mi-arc create --name $bcinstance --k8s-namespace $k8sNamespace ` 45 | --tier BusinessCritical --dev --replicas 3 ` 46 | --cores-limit 8 --cores-request 2 --memory-limit 32Gi --memory-request 8Gi ` 47 | --volume-size-data 20Gi --volume-size-logs 5Gi --volume-size-backups 20Gi ` 48 | --storage-class-data local-storage --storage-class-datalogs local-storage --storage-class-logs local-storage --storage-class-backups nfs-storage ` 49 | --collation Turkish_CI_AS --agent-enabled true --use-k8s 50 | 51 | # To delete (takes ~ 2 seconds): 52 | # az sql mi-arc delete --name $bcinstance --use-k8s 53 | 54 | # Check the pods that got created 55 | kubectl get pods -l app.kubernetes.io/instance=mi-bc 56 | 57 | # Everything in Arc-enabled Data Services is also Kubernetes native! 58 | kubectl edit sqlmi $bcinstance 59 | 60 | # Let's have AdventureWorks restored quickly... 61 | copy-item E:\backup\AdventureWorks2019.bak . -Force 62 | kubectl cp AdventureWorks2019.bak mi-bc-0:/var/opt/mssql/data/AdventureWorks2019.bak -c arc-sqlmi 63 | 64 | 65 | # Let us also grab the new sqlcmd 66 | # http://aka.ms/sqlcmd 67 | $URL=(((Invoke-WebRequest https://api.github.com/repos/microsoft/go-sqlcmd/releases/latest).Content | ConvertFrom-Json).assets ` 68 | | Where-Object {$_.content_type -eq 'application/zip'} |Where-Object { $_.name -like '*windows-x64*'}).browser_download_url 69 | $URL 70 | curl.exe -o sqlcmd.zip $URL -L 71 | Expand-Archive .\sqlcmd.zip -Force 72 | 73 | # Grab endpoint 74 | $Endpoint=(kubectl get sqlmi $bcinstance -o jsonpath='{ .status.endpoints.primary }').split(',') 75 | $env:sqlcmdpassword = $ENV:AZDATA_PASSWORD 76 | 77 | # Create a new context for sqlcmd 78 | .\sqlcmd\sqlcmd config add-user --username $ENV:AZDATA_USERNAME --name arcmi --password-encryption none 79 | .\sqlcmd\sqlcmd config add-endpoint --address $Endpoint[0] --port $Endpoint[1] --name arcmi 80 | .\sqlcmd\sqlcmd config add-context --endpoint arcmi --user arcmi --name arcmi 81 | 82 | # We have a new context (which is the current) 83 | .\sqlcmd\sqlcmd config get-contexts 84 | 85 | # We can run a restore 86 | .\sqlcmd\sqlcmd query "RESTORE DATABASE AdventureWorks2019 FROM DISK = N'/var/opt/mssql/data/AdventureWorks2019.bak' WITH MOVE 'AdventureWorks2017' TO '/var/opt/mssql/data/AdventureWorks2019.mdf', MOVE 'AdventureWorks2017_Log' TO '/var/opt/mssql/data/AdventureWorks2019_Log.ldf'" 87 | .\sqlcmd\sqlcmd query "SELECT name from sys.databases" 88 | 89 | # We can also check that in ADS 90 | .\sqlcmd\sqlcmd open ads 91 | 92 | # We could have added (local!) AD Auth 93 | # https://learn.microsoft.com/en-us/azure/azure-arc/data/deploy-active-directory-sql-managed-instance 94 | 95 | # We can see our endpoint and state 96 | az sql mi-arc list --k8s-namespace $k8sNamespace --use-k8s -o table 97 | 98 | # We can scale our Instances - here or in ADS 99 | # az sql mi-arc update --name $gpinstance --cores-limit 8 --cores-request 4 ` 100 | # --memory-limit 16Gi --memory-request 8Gi --k8s-namespace $k8sNamespace --use-k8s 101 | 102 | # All this has full built-in HA through k8s and also MI when in BC tier 103 | # General purpose - HA is provided by k8s 104 | # Business criticial - HA is an AG 105 | # Determine which Pod is primary 106 | for ($i=0; $i -le 2; $i++){ 107 | kubectl get pod ("$($bcinstance)-$i") -o jsonpath="{.metadata.labels}" | ConvertFrom-Json | grep -v controller | grep -v app | grep -v arc-resource | grep -v -e '^$' 108 | } 109 | 110 | # Delete a Pod 111 | kubectl delete pod mi-bc-0 112 | kubectl get pods -l app.kubernetes.io/instance=mi-bc 113 | 114 | # Determine which is primary now 115 | for ($i=0; $i -le 2; $i++){ 116 | kubectl get pod ("$($bcinstance)-$i") -o jsonpath="{.metadata.labels}" | ConvertFrom-Json | grep -v controller | grep -v app | grep -v arc-resource | grep -v -e '^$' 117 | } 118 | 119 | # And we can query this immediately! 120 | .\sqlcmd\sqlcmd query "SELECT Name FROM sys.Databases" 121 | 122 | # If things go wrong, you can re-provision individual replicas: 123 | # az sql mi-arc reprovision-replica -n -k --use-k8s 124 | 125 | # Upgrades 126 | # Check versions 127 | az arcdata dc list-upgrades -k $k8sNamespace 128 | # az arcdata dc upgrade -k $k8sNamespace --use-k8s --desired-version "v1.17.0_2023-03-14" 129 | # Or check ADS! 130 | $ENV:AZDATA_PASSWORD | Set-Clipboard 131 | # where: 132 | # We can add, managed, monitor and query those through Grafana/Kibana 133 | # We could also stream to Kafka 134 | # Or we use a TelemetryRouter 135 | # https://learn.microsoft.com/en-us/azure/azure-arc/data/deploy-telemetry-router 136 | 137 | # Backup / Restore 138 | # Lets modify some data... 139 | .\sqlcmd\sqlcmd query "Update adventureworks2019.person.person set Lastname = 'Weissman',Firstname='Ben'" 140 | 141 | # ooops - that was dumb 142 | # Let's fix it 143 | $PointInTime=(Get-Date).AddSeconds(-120).ToString("yyyy-MM-ddTHH:mm:ssZ") 144 | $PointInTime 145 | az sql midb-arc restore --managed-instance $bcinstance --name AdventureWorks2019 --dest-name AdventureWorks2019_Restore ` 146 | --k8s-namespace arc --time $PointInTime --use-k8s 147 | 148 | # And: 149 | .\sqlcmd\sqlcmd query "SELECT TOP 3 Firstname,lastname from adventureworks2019.person.person" 150 | .\sqlcmd\sqlcmd query "SELECT TOP 3 Firstname,lastname from adventureworks2019_restore.person.person" 151 | 152 | # No Differential, Log or any other manual backups 153 | 154 | 155 | # Connect to Azure Monitor: 156 | az group create --name $RG --location $Region 157 | # Create Service Principal 158 | $SP=(az ad sp create-for-rbac --name http://BitsArcDemoSP --role Contributor --scope subscriptions/$Subscription| ConvertFrom-Json) 159 | 160 | # Add Role 161 | az role assignment create --assignee $SP.appId --role "Monitoring Metrics Publisher" --scope subscriptions/$Subscription 162 | 163 | # Grab our LAWS ID and credentials again 164 | $LAWS=(az monitor log-analytics workspace create -g $RG -n ArcLAWS| ConvertFrom-Json) 165 | $LAWSKEYS=(az monitor log-analytics workspace get-shared-keys -g $RG -n ArcLAWS | ConvertFrom-Json) 166 | 167 | # For Direct connected mode: 168 | # Connect the Kubernetes Cluster to Azure (Arc-enabled Kubernetes) 169 | # Enable the Cluster for Custom Locations 170 | # Deploy Custom Location and DC from Portal 171 | 172 | # In indirect connected mode: 173 | 174 | # Store keys 175 | $Env:SPN_AUTHORITY='https://login.microsoftonline.com' 176 | $Env:WORKSPACE_ID=$LAWS.customerId 177 | $Env:WORKSPACE_SHARED_KEY=$LAWSKEYS.primarySharedKey 178 | $Env:SPN_CLIENT_ID=$SP.appId 179 | $Env:SPN_CLIENT_SECRET=$SP.password 180 | $Env:SPN_TENANT_ID=$SP.tenant 181 | $Env:AZDATA_VERIFY_SSL='no' 182 | 183 | # Export our logs and metrics (and usage) 184 | # az arcdata dc export -t usage --path usage.json -k $k8sNamespace --force --use-k8s 185 | az arcdata dc export -t metrics --path metrics.json -k $k8sNamespace --force --use-k8s 186 | az arcdata dc export -t logs --path logs.json -k $k8sNamespace --force --use-k8s 187 | 188 | # Upload the data to Azure - this should be a scheduled job. 189 | # az arcdata dc upload --path usage.json 190 | az arcdata dc upload --path metrics.json 191 | az arcdata dc upload --path logs.json 192 | 193 | remove-item *.json 194 | 195 | # Check in portal 196 | Start-Process ("https://portal.azure.com/#@"+ (az account show --query tenantId -o tsv) + "/resource" + (az group show -n $RG --query id -o tsv)) 197 | 198 | # Cleanup when done 199 | az group delete -g $RG --yes 200 | az ad sp delete --id $SP.appId 201 | kubectl delete namespace $k8sNamespace 202 | .\sqlcmd\sqlcmd config delete-context --name arcmi 203 | # az logout -------------------------------------------------------------------------------- /Arc/Arc Demo Data Scotland.ps1: -------------------------------------------------------------------------------- 1 | # Let's check out our environment 2 | # We run a few VMs on Hyper-V 3 | Get-VM 4 | 5 | # What does that look like in k8s? 6 | kubectl get nodes 7 | kubectl get nodes -o wide 8 | 9 | kubectl delete ns squil 10 | kubectl get pods 11 | 12 | # A big time factor is image download - so we've pre-pulled them 13 | kubectl get nodes (kubectl get nodes -o jsonpath="{.items[1].metadata.name}" ) -o jsonpath="{range .status.images[*]}{.names[1]}{'\n'}{end}" | grep arcdata 14 | 15 | # It's ~ 30 GB (for current and previous version) - PER WORKER! 16 | $TotalSize = 0 17 | ((kubectl get nodes (kubectl get nodes -o jsonpath="{.items[1].metadata.name}" ) -o jsonpath="{range .status.images[*]}{.sizeBytes}{'\t'}{.names[1]}{'\n'}{end}" | grep arcdata).Split("`t") | grep -v mcr).Split("`n") | ForEach-Object { $TotalSize += $_} 18 | [Math]::Round(($TotalSize/1024/1024),2) 19 | 20 | # OK, let's login to Azure 21 | $subscriptionName = "Azure Data Demos" 22 | # az login --only-show-errors -o table --query Dummy 23 | az account set -s $SubscriptionName 24 | 25 | # Set some variables 26 | $RG="ArcDataRG" 27 | $Region="eastus" 28 | $Subscription=(az account show --query id -o tsv) 29 | $k8sNamespace="arc" 30 | 31 | # And credentials 32 | $admincredentials = New-Object System.Management.Automation.PSCredential ('arcadmin', (ConvertTo-SecureString -String 'P@ssw0rd' -AsPlainText -Force)) 33 | $ENV:AZDATA_USERNAME="$($admincredentials.UserName)" 34 | $ENV:AZDATA_PASSWORD="$($admincredentials.GetNetworkCredential().Password)" 35 | $ENV:SQLCMDPASSWORD="$($admincredentials.GetNetworkCredential().Password)" 36 | $ENV:ACCEPT_EULA='yes' 37 | $ENV:SQLCMDPASSWORD=$ENV:AZDATA_PASSWORD 38 | 39 | # Create an RG 40 | az group create -l $Region -n $RG 41 | 42 | # We could deploy direct from Portal (requires arc connected k8s!) - Jes 43 | Start-Process https://portal.azure.com/#create/Microsoft.DataController 44 | 45 | # Let's stick to indirect for today 46 | # Deploy DC from Command Line - Ben 47 | az arcdata dc create --connectivity-mode Indirect --name arc-dc-kubeadm --k8s-namespace $k8sNamespace ` 48 | --subscription $Subscription ` 49 | -g $RG -l eastus --storage-class local-storage ` 50 | --profile-name azure-arc-kubeadm --infrastructure onpremises --use-k8s 51 | 52 | # Check ADS while running 53 | 54 | # This created a new Namespace for us 55 | kubectl get namespace 56 | 57 | # Check the pods that got created 58 | kubectl get pods -n $k8sNamespace 59 | 60 | # Check Status of the DC 61 | az arcdata dc status show --k8s-namespace arc --use-k8s 62 | 63 | # Add Controller in ADS 64 | 65 | # Create MIs 66 | $gpinstance = "mi-gp" 67 | # $bcinstance = "mi-bc" 68 | 69 | # General Purpose 70 | az sql mi-arc create -n $gpinstance --k8s-namespace $k8sNamespace --use-k8s ` 71 | --storage-class-data local-storage ` 72 | --storage-class-datalogs local-storage ` 73 | --storage-class-logs local-storage ` 74 | --storage-class-backups nfs-storage ` 75 | --cores-limit 4 --cores-request 2 ` 76 | --memory-limit 8Gi --memory-request 4Gi ` 77 | --tier GeneralPurpose --dev 78 | 79 | # Check the pods that got created 80 | kubectl get pods -n $k8sNamespace 81 | 82 | # Function to reliably get VM IPs 83 | function GetIP { 84 | param ( 85 | $VMName 86 | ) 87 | $MacAddr=(Get-VMNetworkAdapter -VMName $VMName | Select -ExpandProperty MacAddress).Insert(2,"-").Insert(5,"-").Insert(8,"-").Insert(11,"-").Insert(14,"-") 88 | $IP=(Get-NetNeighbor | where LinkLayerAddress -eq $MacAddr | Select -ExpandProperty IPAddress) 89 | $IP 90 | } 91 | 92 | ssh -t ("demo@" + (GetIP('k8s-nfs'))) 'ls -l /srv/exports/volumes/dynamic && ls -l /srv/exports/volumes/dynamic/*' 93 | 94 | # Everything in Arc-enabled Data Services is also Kubernetes native! 95 | kubectl edit sqlmi $gpinstance -n $k8sNamespace 96 | 97 | # Business Critical 98 | #az sql mi-arc create --name $bcinstance --k8s-namespace $k8sNamespace ` 99 | #--tier BusinessCritical --dev --replicas 3 ` 100 | #--cores-limit 8 --cores-request 2 --memory-limit 32Gi --memory-request 8Gi ` 101 | #--volume-size-data 20Gi --volume-size-logs 5Gi --volume-size-backups 20Gi ` 102 | #--collation Turkish_CI_AS --agent-enabled true --use-k8s 103 | 104 | # We can scale our Instances 105 | # az sql mi-arc update --name $gpinstance --cores-limit 8 --cores-request 4 ` 106 | # --memory-limit 16Gi --memory-request 8Gi --k8s-namespace $k8sNamespace --use-k8s 107 | 108 | # Let's restore AdventureWorks to our GP Instance - Ben 109 | copy-item e:\Backup\AdventureWorks2019.bak . 110 | kubectl cp AdventureWorks2019.bak mi-gp-0:/var/opt/mssql/data/AdventureWorks2019.bak -n $k8sNamespace -c arc-sqlmi 111 | Remove-Item AdventureWorks2019.bak 112 | 113 | # We can see the file 114 | kubectl exec mi-gp-0 -n $k8sNamespace -c arc-sqlmi -- ls -l /var/opt/mssql/data/AdventureWorks2019.bak 115 | 116 | # We could restore in the Pod - or using the Instance's Endpoint 117 | kubectl get sqlmi $gpinstance -n $k8sNamespace 118 | $SQLEndpoint=(kubectl get sqlmi $gpinstance -n $k8sNamespace -o jsonpath='{ .status.primaryEndpoint }') 119 | 120 | # No AdventureWorks 121 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 122 | 123 | # Restore 124 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "RESTORE DATABASE AdventureWorks2019 FROM DISK = N'/var/opt/mssql/data/AdventureWorks2019.bak' WITH MOVE 'AdventureWorks2017' TO '/var/opt/mssql/data/AdventureWorks2019.mdf', MOVE 'AdventureWorks2017_Log' TO '/var/opt/mssql/data/AdventureWorks2019_Log.ldf'" 125 | 126 | # Tadaaaaaa 127 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 128 | 129 | # We can add, managed, monitor and query those from ADS! 130 | 131 | # All this has full built-in HA through k8s and also MI when in BC tier 132 | 133 | # Upgrades 134 | # Check versions 135 | az arcdata dc list-upgrades -k $k8sNamespace 136 | 137 | $SQLEndpoint=(kubectl get sqlmi $gpinstance -n $k8sNamespace -o jsonpath='{ .status.primaryEndpoint }') 138 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT @@version" 139 | 140 | az sql mi-arc show -n $gpinstance --use-k8s --k8s-namespace $k8sNamespace | ConvertFrom-Json 141 | # az sql mi-arc upgrade -n $gpinstance --use-k8s --k8s-namespace $k8sNamespace 142 | 143 | # Backup / Restore 144 | 145 | $PointInTime=(Get-Date).AddSeconds(-120).ToString("yyyy-MM-ddTHH:mm:ssZ") 146 | $PointInTime 147 | 148 | az sql midb-arc restore --managed-instance $gpinstance --name AdventureWorks2019 --dest-name AdventureWorks2019_Restore ` 149 | --k8s-namespace arc --time $PointInTime --use-k8s --dry-run 150 | 151 | # Connect to Azure Monitor: 152 | # Create Service Principal 153 | $SP=(az ad sp create-for-rbac --name http://ArcDemoSP --role Contributor --scope subscriptions/$Subscription| ConvertFrom-Json) 154 | 155 | # Add Role 156 | az role assignment create --assignee $SP.appId --role "Monitoring Metrics Publisher" --scope subscriptions/$Subscription 157 | 158 | # Create Log Analytics Workspace and retrieve it's credentials 159 | $LAWS=(az monitor log-analytics workspace create -g $RG -n ArcLAWS| ConvertFrom-Json) 160 | $LAWSKEYS=(az monitor log-analytics workspace get-shared-keys -g $RG -n ArcLAWS | ConvertFrom-Json) 161 | 162 | # For Direct connected mode: 163 | # Connect the Kubernetes Cluster to Azure (Arc-enabled Kubernetes) 164 | # Enable the Cluster for Custom Locations 165 | # Deploy Custom Location and DC from Portal 166 | 167 | # In indirect connected mode: 168 | 169 | # Store keys 170 | $Env:SPN_AUTHORITY='https://login.microsoftonline.com' 171 | $Env:WORKSPACE_ID=$LAWS.customerId 172 | $Env:WORKSPACE_SHARED_KEY=$LAWSKEYS.primarySharedKey 173 | $Env:SPN_CLIENT_ID=$SP.appId 174 | $Env:SPN_CLIENT_SECRET=$SP.password 175 | $Env:SPN_TENANT_ID=$SP.tenant 176 | $Env:AZDATA_VERIFY_SSL='no' 177 | 178 | # Export our logs and metrics (and usage) 179 | # az arcdata dc export -t usage --path usage.json -k $k8sNamespace --force --use-k8s 180 | az arcdata dc export -t metrics --path metrics.json -k $k8sNamespace --force --use-k8s 181 | az arcdata dc export -t logs --path logs.json -k $k8sNamespace --force --use-k8s 182 | 183 | # Upload the data to Azure - this should be a scheduled job. 184 | # az arcdata dc upload --path usage.json 185 | az arcdata dc upload --path metrics.json 186 | az arcdata dc upload --path logs.json 187 | 188 | remove-item *.json 189 | 190 | # Check in portal 191 | Start-Process ("https://portal.azure.com/#@"+ (az account show --query tenantId -o tsv) + "/resource" + (az group show -n $RG --query id -o tsv)) 192 | 193 | # Cleanup when done 194 | kubectl delete namespace arc 195 | az group delete -g $RG --yes 196 | az ad sp delete --id $SP.appId 197 | # az logout -------------------------------------------------------------------------------- /Arc/Arc with kubectl/Arc with Kubectl.ps1: -------------------------------------------------------------------------------- 1 | set-location C:\Users\demo\Desktop\Code\Arc 2 | 3 | kubectl get nodes -o wide 4 | 5 | kubectl get sc 6 | 7 | kubectl get ns 8 | 9 | kubectl get crd 10 | 11 | kubectl get crd | grep -v metallb 12 | 13 | grep ^kind: bootstrapper-unified.yaml 14 | code -d bootstrapper-unified.yaml bootstrapper-unified.dist.yaml 15 | 16 | kubectl config set-context --current --namespace=arc 17 | 18 | kubectl apply -f bootstrapper-unified.yaml 19 | 20 | kubectl get pod -l app=bootstrapper -w 21 | 22 | kubectl get crd | grep -v metallb 23 | 24 | code controller.yaml 25 | 26 | kubectl create secret generic metricsui-admin-secret --from-literal=username=arcadmin --from-literal=password=SuperSecretP@ssw0rd 27 | kubectl create secret generic logsui-admin-secret --from-literal=username=arcadmin --from-literal=password=SuperSecretP@ssw0rd 28 | 29 | kubectl get secret logsui-admin-secret -o jsonpath='{ .data }' | ConvertFrom-Json 30 | 31 | kubectl apply -f controller.yaml 32 | 33 | kubectl get datacontroller 34 | 35 | kubectl get pods 36 | 37 | kubectl get pvc 38 | 39 | kubectl get pods -w 40 | 41 | kubectl get datacontroller 42 | 43 | kubectl describe datacontroller 44 | 45 | kubectl get svc 46 | 47 | Start-Process ("https://" + (kubectl get svc metricsui-external-svc -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ":3000") 48 | 49 | code sql-mi.yaml 50 | 51 | kubectl create secret generic mi-login-secret --from-literal=username=arcadmin --from-literal=password=SuperSecretP@ssw0rd 52 | 53 | kubectl apply -f sql-mi.yaml 54 | 55 | kubectl get sqlmi 56 | 57 | kubectl get pvc -l controller=sql-mi-1 58 | 59 | kubectl get pods -w 60 | 61 | kubectl get sqlmi 62 | 63 | kubectl get svc -l controller=sql-mi-1 64 | 65 | $Env:SQLCMDSERVER=(kubectl get svc sql-mi-1-external-svc -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 66 | $env:sqlcmduser='arcadmin' 67 | $env:sqlcmdpassword='SuperSecretP@ssw0rd' 68 | sqlcmd -Q "SELECT @@Version" 69 | 70 | kubectl get sqlmi -o jsonpath='{.items[0].spec.settings}' | ConvertFrom-Json -------------------------------------------------------------------------------- /Arc/Arc with kubectl/bootstrapper-unified.dist.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | annotations: 5 | openshift.io/sa.scc.supplemental-groups: 1000700001/10000 #required for OpenShift 6 | openshift.io/sa.scc.uid-range: 1000700001/10000 #required for OpenShift 7 | labels: 8 | arcdata.microsoft.com/namespace: arc 9 | kubernetes.io/metadata.name: arc 10 | name: {{NAMESPACE}} 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: {{NAMESPACE}}:crb-deployer 16 | subjects: 17 | - kind: ServiceAccount 18 | name: sa-arcdata-deployer 19 | namespace: {{NAMESPACE}} 20 | roleRef: 21 | kind: ClusterRole 22 | name: {{NAMESPACE}}:cr-deployer 23 | apiGroup: rbac.authorization.k8s.io 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: Role 27 | metadata: 28 | name: arcdata-deployer-role 29 | rules: 30 | - apiGroups: 31 | - "" 32 | resources: 33 | - pods/log 34 | verbs: 35 | - get 36 | - apiGroups: 37 | - "" 38 | resources: 39 | - secrets 40 | verbs: 41 | - get 42 | - delete 43 | - apiGroups: 44 | - "" 45 | resourceNames: 46 | - sa-arc-webhook-job 47 | - sa-arc-bootstrapper 48 | resources: 49 | - serviceaccounts 50 | verbs: 51 | - delete 52 | - apiGroups: 53 | - apps 54 | resources: 55 | - deployments 56 | verbs: 57 | - delete 58 | - apiGroups: 59 | - rbac.authorization.k8s.io 60 | resources: 61 | - roles 62 | - rolebindings 63 | verbs: 64 | - delete 65 | - apiGroups: 66 | - batch 67 | resources: 68 | - jobs 69 | verbs: 70 | - create 71 | - get 72 | - list 73 | - delete 74 | - apiGroups: 75 | - arcdata.microsoft.com 76 | resources: 77 | - datacontrollers 78 | verbs: 79 | - delete 80 | --- 81 | apiVersion: rbac.authorization.k8s.io/v1 82 | kind: Role 83 | metadata: 84 | name: bootstrapper-grantor-role 85 | rules: 86 | - apiGroups: 87 | - "" 88 | resources: 89 | - pods 90 | verbs: 91 | - delete 92 | - get 93 | - list 94 | - patch 95 | - update 96 | - watch 97 | - apiGroups: 98 | - "" 99 | resources: 100 | - configmaps 101 | - persistentvolumeclaims 102 | - secrets 103 | verbs: 104 | - delete 105 | - apiGroups: 106 | - "" 107 | resources: 108 | - configmaps 109 | - events 110 | - persistentvolumeclaims 111 | - secrets 112 | - serviceaccounts 113 | - services 114 | verbs: 115 | - create 116 | - get 117 | - list 118 | - patch 119 | - update 120 | - watch 121 | - apiGroups: 122 | - apps 123 | resources: 124 | - daemonsets 125 | - deployments 126 | - replicasets 127 | - statefulsets 128 | verbs: 129 | - create 130 | - get 131 | - list 132 | - patch 133 | - update 134 | - watch 135 | - apiGroups: 136 | - rbac.authorization.k8s.io 137 | resources: 138 | - roles 139 | - rolebindings 140 | verbs: 141 | - create 142 | - get 143 | - list 144 | - patch 145 | - update 146 | - apiGroups: 147 | - sql.arcdata.microsoft.com 148 | - tasks.sql.arcdata.microsoft.com 149 | - tasks.arcdata.microsoft.com 150 | - arcdata.microsoft.com 151 | resources: 152 | - "*" 153 | verbs: 154 | - create 155 | - get 156 | - list 157 | - watch 158 | - patch 159 | - update 160 | - apiGroups: 161 | - clusterconfig.azure.com 162 | resources: 163 | - azureclusteridentityrequests 164 | verbs: 165 | - create 166 | - delete 167 | - get 168 | - apiGroups: 169 | - clusterconfig.azure.com 170 | resources: 171 | - azureclusteridentityrequests/status 172 | verbs: 173 | - patch 174 | - update 175 | --- 176 | apiVersion: rbac.authorization.k8s.io/v1 177 | kind: ClusterRole 178 | metadata: 179 | name: {{NAMESPACE}}:cr-deployer 180 | rules: 181 | - apiGroups: 182 | - apiextensions.k8s.io 183 | resources: 184 | - customresourcedefinitions 185 | verbs: 186 | - create 187 | - list 188 | - get 189 | - watch 190 | - apiGroups: 191 | - apiextensions.k8s.io 192 | resourceNames: 193 | - activedirectoryconnectors.arcdata.microsoft.com 194 | - sqlmanagedinstancemonitoringprofiles.arcdata.microsoft.com 195 | - datacontrollers.arcdata.microsoft.com 196 | - exporttasks.tasks.arcdata.microsoft.com 197 | - failovergroups.sql.arcdata.microsoft.com 198 | - kafkas.arcdata.microsoft.com 199 | - monitors.arcdata.microsoft.com 200 | - telemetrycollectors.arcdata.microsoft.com 201 | - postgresqls.arcdata.microsoft.com 202 | - sqlmanagedinstancerestoretasks.tasks.sql.arcdata.microsoft.com 203 | - sqlmanagedinstances.sql.arcdata.microsoft.com 204 | - telemetryrouters.arcdata.microsoft.com 205 | resources: 206 | - customresourcedefinitions 207 | verbs: 208 | - update 209 | - patch 210 | - apiGroups: 211 | - admissionregistration.k8s.io 212 | resources: 213 | - mutatingwebhookconfigurations 214 | verbs: 215 | - create 216 | - apiGroups: 217 | - admissionregistration.k8s.io 218 | resources: 219 | - mutatingwebhookconfigurations 220 | verbs: 221 | - delete 222 | - get 223 | - patch 224 | - apiGroups: 225 | - rbac.authorization.k8s.io 226 | resources: 227 | - clusterroles 228 | verbs: 229 | - create 230 | - patch 231 | - get 232 | - apiGroups: 233 | - rbac.authorization.k8s.io 234 | resources: 235 | - clusterroles 236 | verbs: 237 | - delete 238 | - apiGroups: 239 | - rbac.authorization.k8s.io 240 | resources: 241 | - clusterrolebindings 242 | verbs: 243 | - create 244 | - patch 245 | - get 246 | - apiGroups: 247 | - rbac.authorization.k8s.io 248 | resources: 249 | - clusterrolebindings 250 | verbs: 251 | - delete 252 | - apiGroups: 253 | - "" 254 | resources: 255 | - namespaces 256 | verbs: 257 | - create 258 | - list 259 | - apiGroups: 260 | - "" 261 | resources: 262 | - namespaces 263 | verbs: 264 | - get 265 | - patch 266 | - apiGroups: 267 | - "" 268 | resources: 269 | - nodes/stats 270 | - nodes/proxy 271 | - pods 272 | verbs: 273 | - list 274 | - get 275 | 276 | --- 277 | 278 | apiVersion: v1 279 | kind: ServiceAccount 280 | metadata: 281 | name: sa-arcdata-deployer 282 | --- 283 | apiVersion: rbac.authorization.k8s.io/v1 284 | kind: RoleBinding 285 | metadata: 286 | name: bootstrapper-grantor-role-binding 287 | subjects: 288 | - kind: ServiceAccount 289 | name: sa-arcdata-deployer 290 | roleRef: 291 | apiGroup: rbac.authorization.k8s.io 292 | kind: Role 293 | name: bootstrapper-grantor-role 294 | 295 | --- 296 | 297 | apiVersion: rbac.authorization.k8s.io/v1 298 | kind: RoleBinding 299 | metadata: 300 | name: arcdata-deployer-role-binding 301 | subjects: 302 | - kind: ServiceAccount 303 | name: sa-arcdata-deployer 304 | roleRef: 305 | apiGroup: rbac.authorization.k8s.io 306 | kind: Role 307 | name: arcdata-deployer-role 308 | --- 309 | apiVersion: batch/v1 310 | kind: Job 311 | metadata: 312 | name: arc-bootstrapper-job 313 | spec: 314 | template: 315 | spec: 316 | nodeSelector: 317 | kubernetes.io/os: linux 318 | containers: 319 | - name: bootstrapper 320 | image: mcr.microsoft.com/arcdata/arc-bootstrapper:v1.13.0_2022-11-08 321 | imagePullPolicy: Always 322 | args: 323 | - -image 324 | - mcr.microsoft.com/arcdata/arc-bootstrapper:v1.13.0_2022-11-08 325 | - -policy 326 | - Always 327 | - -chart 328 | - /opt/helm/arcdataservices 329 | - -bootstrap 330 | command: 331 | - /opt/bootstrapper/bin/bootstrapper 332 | imagePullSecrets: 333 | - name: arc-private-registry 334 | restartPolicy: Never 335 | serviceAccountName: sa-arcdata-deployer 336 | ttlSecondsAfterFinished: 86400 #24 hours 337 | backoffLimit: 0 -------------------------------------------------------------------------------- /Arc/Arc with kubectl/bootstrapper-unified.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | arcdata.microsoft.com/namespace: arc 6 | kubernetes.io/metadata.name: arc 7 | name: arc 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: arc:crb-deployer 13 | subjects: 14 | - kind: ServiceAccount 15 | name: sa-arcdata-deployer 16 | namespace: arc 17 | roleRef: 18 | kind: ClusterRole 19 | name: arc:cr-deployer 20 | apiGroup: rbac.authorization.k8s.io 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: Role 24 | metadata: 25 | name: arcdata-deployer-role 26 | rules: 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - pods/log 31 | verbs: 32 | - get 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - secrets 37 | verbs: 38 | - get 39 | - delete 40 | - apiGroups: 41 | - "" 42 | resourceNames: 43 | - sa-arc-webhook-job 44 | - sa-arc-bootstrapper 45 | resources: 46 | - serviceaccounts 47 | verbs: 48 | - delete 49 | - apiGroups: 50 | - apps 51 | resources: 52 | - deployments 53 | verbs: 54 | - delete 55 | - apiGroups: 56 | - rbac.authorization.k8s.io 57 | resources: 58 | - roles 59 | - rolebindings 60 | verbs: 61 | - delete 62 | - apiGroups: 63 | - batch 64 | resources: 65 | - jobs 66 | verbs: 67 | - create 68 | - get 69 | - list 70 | - delete 71 | - apiGroups: 72 | - arcdata.microsoft.com 73 | resources: 74 | - datacontrollers 75 | verbs: 76 | - delete 77 | --- 78 | apiVersion: rbac.authorization.k8s.io/v1 79 | kind: Role 80 | metadata: 81 | name: bootstrapper-grantor-role 82 | rules: 83 | - apiGroups: 84 | - "" 85 | resources: 86 | - pods 87 | verbs: 88 | - delete 89 | - get 90 | - list 91 | - patch 92 | - update 93 | - watch 94 | - apiGroups: 95 | - "" 96 | resources: 97 | - configmaps 98 | - persistentvolumeclaims 99 | - secrets 100 | verbs: 101 | - delete 102 | - apiGroups: 103 | - "" 104 | resources: 105 | - configmaps 106 | - events 107 | - persistentvolumeclaims 108 | - secrets 109 | - serviceaccounts 110 | - services 111 | verbs: 112 | - create 113 | - get 114 | - list 115 | - patch 116 | - update 117 | - watch 118 | - apiGroups: 119 | - apps 120 | resources: 121 | - daemonsets 122 | - deployments 123 | - replicasets 124 | - statefulsets 125 | verbs: 126 | - create 127 | - get 128 | - list 129 | - patch 130 | - update 131 | - watch 132 | - apiGroups: 133 | - rbac.authorization.k8s.io 134 | resources: 135 | - roles 136 | - rolebindings 137 | verbs: 138 | - create 139 | - get 140 | - list 141 | - patch 142 | - update 143 | - apiGroups: 144 | - sql.arcdata.microsoft.com 145 | - tasks.sql.arcdata.microsoft.com 146 | - tasks.arcdata.microsoft.com 147 | - arcdata.microsoft.com 148 | resources: 149 | - "*" 150 | verbs: 151 | - create 152 | - get 153 | - list 154 | - watch 155 | - patch 156 | - update 157 | - apiGroups: 158 | - clusterconfig.azure.com 159 | resources: 160 | - azureclusteridentityrequests 161 | verbs: 162 | - create 163 | - delete 164 | - get 165 | - apiGroups: 166 | - clusterconfig.azure.com 167 | resources: 168 | - azureclusteridentityrequests/status 169 | verbs: 170 | - patch 171 | - update 172 | --- 173 | apiVersion: rbac.authorization.k8s.io/v1 174 | kind: ClusterRole 175 | metadata: 176 | name: arc:cr-deployer 177 | rules: 178 | - apiGroups: 179 | - apiextensions.k8s.io 180 | resources: 181 | - customresourcedefinitions 182 | verbs: 183 | - create 184 | - list 185 | - get 186 | - watch 187 | - apiGroups: 188 | - apiextensions.k8s.io 189 | resourceNames: 190 | - activedirectoryconnectors.arcdata.microsoft.com 191 | - sqlmanagedinstancemonitoringprofiles.arcdata.microsoft.com 192 | - datacontrollers.arcdata.microsoft.com 193 | - exporttasks.tasks.arcdata.microsoft.com 194 | - failovergroups.sql.arcdata.microsoft.com 195 | - kafkas.arcdata.microsoft.com 196 | - monitors.arcdata.microsoft.com 197 | - telemetrycollectors.arcdata.microsoft.com 198 | - postgresqls.arcdata.microsoft.com 199 | - sqlmanagedinstancerestoretasks.tasks.sql.arcdata.microsoft.com 200 | - sqlmanagedinstances.sql.arcdata.microsoft.com 201 | - telemetryrouters.arcdata.microsoft.com 202 | resources: 203 | - customresourcedefinitions 204 | verbs: 205 | - update 206 | - patch 207 | - apiGroups: 208 | - admissionregistration.k8s.io 209 | resources: 210 | - mutatingwebhookconfigurations 211 | verbs: 212 | - create 213 | - apiGroups: 214 | - admissionregistration.k8s.io 215 | resources: 216 | - mutatingwebhookconfigurations 217 | verbs: 218 | - delete 219 | - get 220 | - patch 221 | - apiGroups: 222 | - rbac.authorization.k8s.io 223 | resources: 224 | - clusterroles 225 | verbs: 226 | - create 227 | - patch 228 | - get 229 | - apiGroups: 230 | - rbac.authorization.k8s.io 231 | resources: 232 | - clusterroles 233 | verbs: 234 | - delete 235 | - apiGroups: 236 | - rbac.authorization.k8s.io 237 | resources: 238 | - clusterrolebindings 239 | verbs: 240 | - create 241 | - patch 242 | - get 243 | - apiGroups: 244 | - rbac.authorization.k8s.io 245 | resources: 246 | - clusterrolebindings 247 | verbs: 248 | - delete 249 | - apiGroups: 250 | - "" 251 | resources: 252 | - namespaces 253 | verbs: 254 | - create 255 | - list 256 | - apiGroups: 257 | - "" 258 | resources: 259 | - namespaces 260 | verbs: 261 | - get 262 | - patch 263 | - apiGroups: 264 | - "" 265 | resources: 266 | - nodes/stats 267 | - nodes/proxy 268 | - pods 269 | verbs: 270 | - list 271 | - get 272 | 273 | --- 274 | 275 | apiVersion: v1 276 | kind: ServiceAccount 277 | metadata: 278 | name: sa-arcdata-deployer 279 | --- 280 | apiVersion: rbac.authorization.k8s.io/v1 281 | kind: RoleBinding 282 | metadata: 283 | name: bootstrapper-grantor-role-binding 284 | subjects: 285 | - kind: ServiceAccount 286 | name: sa-arcdata-deployer 287 | roleRef: 288 | apiGroup: rbac.authorization.k8s.io 289 | kind: Role 290 | name: bootstrapper-grantor-role 291 | 292 | --- 293 | 294 | apiVersion: rbac.authorization.k8s.io/v1 295 | kind: RoleBinding 296 | metadata: 297 | name: arcdata-deployer-role-binding 298 | subjects: 299 | - kind: ServiceAccount 300 | name: sa-arcdata-deployer 301 | roleRef: 302 | apiGroup: rbac.authorization.k8s.io 303 | kind: Role 304 | name: arcdata-deployer-role 305 | --- 306 | apiVersion: batch/v1 307 | kind: Job 308 | metadata: 309 | name: arc-bootstrapper-job 310 | spec: 311 | template: 312 | spec: 313 | nodeSelector: 314 | kubernetes.io/os: linux 315 | containers: 316 | - name: bootstrapper 317 | image: mcr.microsoft.com/arcdata/arc-bootstrapper:v1.13.0_2022-11-08 318 | imagePullPolicy: IfNotPresent 319 | args: 320 | - -image 321 | - mcr.microsoft.com/arcdata/arc-bootstrapper:v1.13.0_2022-11-08 322 | - -policy 323 | - IfNotPresent 324 | - -chart 325 | - /opt/helm/arcdataservices 326 | - -bootstrap 327 | command: 328 | - /opt/bootstrapper/bin/bootstrapper 329 | imagePullSecrets: 330 | - name: arc-private-registry 331 | restartPolicy: Never 332 | serviceAccountName: sa-arcdata-deployer 333 | ttlSecondsAfterFinished: 86400 #24 hours 334 | backoffLimit: 0 -------------------------------------------------------------------------------- /Arc/Arc with kubectl/controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: arcdata.microsoft.com/v5 2 | kind: DataController 3 | metadata: 4 | name: arc-dc 5 | spec: 6 | credentials: 7 | dockerRegistry: arc-private-registry 8 | serviceAccount: sa-arc-controller 9 | docker: 10 | imagePullPolicy: IfNotPresent 11 | imageTag: v1.13.0_2022-11-08 12 | registry: mcr.microsoft.com 13 | repository: arcdata 14 | infrastructure: onpremises 15 | security: 16 | allowDumps: true 17 | allowNodeMetricsCollection: true 18 | allowPodMetricsCollection: true 19 | services: 20 | - name: controller 21 | port: 30080 22 | serviceType: LoadBalancer 23 | settings: 24 | ElasticSearch: 25 | vm.max_map_count: "-1" 26 | azure: 27 | connectionMode: indirect 28 | location: eastus 29 | resourceGroup: ArcDataResources 30 | subscription: 92cc49b9-95ca-4935-86e1-1545d29bc50b 31 | controller: 32 | displayName: arc-dc 33 | enableBilling: true 34 | logs.rotation.days: "7" 35 | logs.rotation.size: "5000" 36 | storage: 37 | data: 38 | accessMode: ReadWriteOnce 39 | className: nfs-storage 40 | size: 15Gi 41 | logs: 42 | accessMode: ReadWriteOnce 43 | className: nfs-storage 44 | size: 10Gi -------------------------------------------------------------------------------- /Arc/Arc with kubectl/sql-mi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: sql.arcdata.microsoft.com/v7 2 | kind: SqlManagedInstance 3 | metadata: 4 | name: sql-mi-1 5 | spec: 6 | dev: true 7 | licenseType: LicenseIncluded 8 | tier: GeneralPurpose 9 | security: 10 | adminLoginSecret: mi-login-secret 11 | scheduling: 12 | default: 13 | resources: 14 | limits: 15 | cpu: "2" 16 | memory: 4Gi 17 | requests: 18 | cpu: "1" 19 | memory: 2Gi 20 | services: 21 | primary: 22 | type: LoadBalancer 23 | storage: 24 | backups: 25 | volumes: 26 | - className: nfs-storage 27 | size: 5Gi 28 | data: 29 | volumes: 30 | - className: local-storage 31 | size: 5Gi 32 | datalogs: 33 | volumes: 34 | - className: local-storage 35 | size: 5Gi 36 | logs: 37 | volumes: 38 | - className: local-storage 39 | size: 5Gi -------------------------------------------------------------------------------- /Arc/Full Arc Deployment.ps1: -------------------------------------------------------------------------------- 1 | Clear-Host 2 | cd 'C:\ArcDataServices' 3 | $RG="ArcDataRG" 4 | $Subscription=(az account show --query id -o tsv) 5 | $ENV:ACCEPT_EULA='yes' 6 | $ENV:AZDATA_USERNAME='admin' 7 | $ENV:AZDATA_PASSWORD='P@ssw0rdP@ssw0rd' 8 | 9 | az group create -l eastus -n $RG 10 | 11 | kubectl get nodes -o wide 12 | 13 | kubectl get nodes worker-3 -o jsonpath="{range .status.images[*]}{.names[1]}{'\n'}{end}" | grep arcdata 14 | 15 | $TotalSize = 0 16 | ((kubectl get nodes worker-3 -o jsonpath="{range .status.images[*]}{.sizeBytes}{'\t'}{.names[1]}{'\n'}{end}" | grep arcdata).Split("`t") | grep -v mcr).Split("`n") | Foreach { $TotalSize += $_} 17 | [Math]::Round(($TotalSize/1024/1024),2) 18 | 19 | 20 | # We could deploy direct from Portal (requires arc connected k8s!) 21 | Start-Process https://portal.azure.com/#create/Microsoft.DataController 22 | 23 | # Deploy DC from Command Line 24 | az arcdata dc create --connectivity-mode Indirect --name arc-dc-kubeadm --k8s-namespace arc ` 25 | --subscription $Subscription ` 26 | -g $RG -l eastus --storage-class local-storage ` 27 | --profile-name azure-arc-kubeadm --infrastructure onpremises --use-k8s 28 | 29 | # Check ADS while running 30 | 31 | # Check the pods that got created 32 | kubectl get pods -n arc 33 | 34 | # Check Status 35 | az arcdata dc status show --k8s-namespace arc --use-k8s 36 | 37 | # Add Controller in ADS 38 | 39 | # Create MI 40 | az sql mi-arc create -n mi-1 --k8s-namespace arc --use-k8s ` 41 | --storage-class-data local-storage ` 42 | --storage-class-datalogs local-storage ` 43 | --storage-class-logs local-storage ` 44 | --cores-limit 1 --cores-request 1 ` 45 | --memory-limit 2Gi --memory-request 2Gi --dev 46 | 47 | az sql mi-arc list --k8s-namespace arc --use-k8s -o table 48 | 49 | kubectl get sqlmi -n arc 50 | 51 | kubectl get pods -n arc -o wide 52 | 53 | kubectl describe pod mi-1-0 -n arc 54 | 55 | # Could deploy as AG using --replicas 2/3 56 | # az sql mi-arc create -n mi-2 --k8s-namespace arc --use-k8s --replicas 3 57 | az sql mi-arc create -n mi-2 --k8s-namespace arc --use-k8s --replicas 2 --dev 58 | 59 | # Could also deploy / resize from ADS and access Grafana/Kibana 60 | $ENV:AZDATA_PASSWORD | Set-Clipboard 61 | 62 | # Backups 63 | # az sql midb-arc restore --managed-instance mi-1 --name BackupDemo --dest-name RestoreDemo --k8s-namespace arc --time $PointInTime --use-k8s 64 | 65 | # Updates: 66 | az arcdata dc list-upgrades -k arc 67 | 68 | # az arcdata dc update 69 | # az arcdata sql mi-arc update 70 | 71 | # Connect to Azure Monitor: 72 | # Create Service Principal 73 | $SP=(az ad sp create-for-rbac --name http://ArcDemoSP --role Contributor| ConvertFrom-Json) 74 | $SP | Out-String | grep -v password 75 | 76 | # Add Role 77 | az role assignment create --assignee $SP.appId --role "Monitoring Metrics Publisher" --scope subscriptions/$Subscription 78 | 79 | # Create Log Analytics Workspace and retrieve it's credentials 80 | $LAWS=(az monitor log-analytics workspace create -g $RG -n ArcLAWS| ConvertFrom-Json) 81 | $LAWSKEYS=(az monitor log-analytics workspace get-shared-keys -g $RG -n ArcLAWS | ConvertFrom-Json) 82 | 83 | # For Direct connected mode: 84 | # Connect the Kubernetes Cluster to Azure (Arc-enabled Kubernetes) 85 | # Enable the Cluster for Custom Locations 86 | # Deploy Custom Location and DC from Portal 87 | 88 | # In indirect connected mode: 89 | 90 | # Store keys 91 | $Env:SPN_AUTHORITY='https://login.microsoftonline.com' 92 | $Env:WORKSPACE_ID=$LAWS.customerId 93 | $Env:WORKSPACE_SHARED_KEY=$LAWSKEYS.primarySharedKey 94 | $Env:SPN_CLIENT_ID=$SP.appId 95 | $Env:SPN_CLIENT_SECRET=$SP.password 96 | $Env:SPN_TENANT_ID=$SP.tenant 97 | $Env:AZDATA_VERIFY_SSL='no' 98 | 99 | # Export our logs and metrics (and usage) 100 | # az arcdata dc export -t usage --path usage.json -k arc --force --use-k8s 101 | az arcdata dc export -t metrics --path metrics.json -k arc --force --use-k8s 102 | az arcdata dc export -t logs --path logs.json -k arc --force --use-k8s 103 | 104 | # Upload the data to Azure - this should be a scheduled job. 105 | az arcdata dc upload --path metrics.json 106 | az arcdata dc upload --path logs.json 107 | 108 | remove-item *.json 109 | 110 | # Check in portal 111 | Start-Process ("https://portal.azure.com/#@"+ (az account show --query tenantId -o tsv) + "/resource" + (az group show -n $RG --query id -o tsv)) 112 | 113 | # Cleanup when done 114 | kubectl delete namespace arc 115 | az group delete -g $RG --yes 116 | az ad sp delete --id $SP.appId -------------------------------------------------------------------------------- /Arc/Global Azure/Global Azure 2021.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Arc/Global Azure/Global Azure 2021.pdf -------------------------------------------------------------------------------- /Arc/PASS Summit 2022 - Arc-enabled Data Services.ps1: -------------------------------------------------------------------------------- 1 | # Making sure we're on the correct cluster 2 | kubectl config use-context kubeadm 3 | 4 | # Set some variables 5 | $Subscription=(az account show --query id -o tsv) 6 | $k8sNamespace="arc" 7 | kubectl config set-context --current --namespace=$k8sNamespace 8 | 9 | # And credentials 10 | $admincredentials = New-Object System.Management.Automation.PSCredential ('arcadmin', (ConvertTo-SecureString -String 'P@ssw0rd' -AsPlainText -Force)) 11 | $ENV:AZDATA_USERNAME="$($admincredentials.UserName)" 12 | $ENV:AZDATA_PASSWORD="$($admincredentials.GetNetworkCredential().Password)" 13 | $ENV:SQLCMDPASSWORD="$($admincredentials.GetNetworkCredential().Password)" 14 | $ENV:ACCEPT_EULA='yes' 15 | $ENV:SQLCMDPASSWORD=$ENV:AZDATA_PASSWORD 16 | 17 | # Pre-Pulling images saves time! 18 | # It's almost 40 GB (for current and previous version) - PER WORKER! 19 | code C:\demo\pre-deploy\pre-pull.ps1 20 | 21 | # We could deploy direct from Portal (requires arc connected k8s!) 22 | # https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/quickstart-connect-cluster?tabs=azure-cli 23 | Start-Process https://portal.azure.com/#create/Microsoft.DataController 24 | 25 | # Let's stick to indirect for today 26 | # Deploy DC from Command Line 27 | az arcdata dc create --connectivity-mode Indirect --name arc-dc-kubeadm --k8s-namespace $k8sNamespace ` 28 | --subscription $Subscription ` 29 | -g PASSPreconRG -l eastus --storage-class nfs-storage ` 30 | --profile-name azure-arc-kubeadm --infrastructure onpremises --use-k8s 31 | 32 | # Check ADS while running 33 | 34 | # This created a new Namespace for us 35 | kubectl get namespace 36 | 37 | # Check the pods that got created 38 | kubectl get pods -n $k8sNamespace 39 | 40 | # Check Status of the DC 41 | az arcdata dc status show --k8s-namespace $k8sNamespace --use-k8s 42 | 43 | # We can also use kubectl 44 | kubectl get datacontroller -n arc 45 | 46 | # Or edit the controller's settings 47 | kubectl edit datacontroller -n arc 48 | 49 | # Or pre-create our own config before deployment (LoadBalancer, Ports etc.): 50 | az arcdata dc config init --path customarc --force --source azure-arc-kubeadm 51 | code customarc/control.json 52 | 53 | # Add Controller in ADS 54 | 55 | # Create MIs 56 | $gpinstance = "mi-gp" 57 | $bcinstance = "mi-bc" 58 | 59 | # General Purpose 60 | az sql mi-arc create -n $gpinstance --k8s-namespace $k8sNamespace --use-k8s ` 61 | --storage-class-data nfs-storage ` 62 | --storage-class-datalogs nfs-storage ` 63 | --storage-class-logs nfs-storage ` 64 | --storage-class-backups nfs-storage ` 65 | --cores-limit 4 --cores-request 2 ` 66 | --memory-limit 8Gi --memory-request 4Gi ` 67 | --tier GeneralPurpose --dev 68 | 69 | # Check the pods that got created 70 | kubectl get pods -n $k8sNamespace 71 | 72 | # This also provisioned all our PVCs etc 73 | ssh -t "demo@storage" 'ls /srv/exports/volumes/dynamic/' 74 | 75 | # Everything in Arc-enabled Data Services is also Kubernetes native! 76 | kubectl edit sqlmi $gpinstance -n $k8sNamespace 77 | 78 | # Business Critical 79 | az sql mi-arc create --name $bcinstance --k8s-namespace $k8sNamespace ` 80 | --tier BusinessCritical --dev --replicas 3 ` 81 | --cores-limit 8 --cores-request 2 --memory-limit 32Gi --memory-request 8Gi ` 82 | --volume-size-data 20Gi --volume-size-logs 5Gi --volume-size-backups 20Gi ` 83 | --storage-class-data nfs-storage --storage-class-datalogs nfs-storage --storage-class-logs nfs-storage --storage-class-backups nfs-storage ` 84 | --collation Turkish_CI_AS --agent-enabled true --use-k8s 85 | 86 | # We could have added (local!) AD Auth 87 | # https://learn.microsoft.com/en-us/azure/azure-arc/data/deploy-active-directory-sql-managed-instance 88 | 89 | # We now have 2 MIs! 90 | az sql mi-arc list --k8s-namespace $k8sNamespace --use-k8s -o table 91 | 92 | # We can scale our Instances - here or in ADS 93 | # az sql mi-arc update --name $gpinstance --cores-limit 8 --cores-request 4 ` 94 | # --memory-limit 16Gi --memory-request 8Gi --k8s-namespace $k8sNamespace --use-k8s 95 | 96 | # Let's restore AdventureWorks to our GP Instance 97 | kubectl cp AdventureWorks2019.bak mi-gp-0:/var/opt/mssql/data/AdventureWorks2019.bak -n $k8sNamespace -c arc-sqlmi 98 | 99 | # We can see the file 100 | kubectl exec mi-gp-0 -n $k8sNamespace -c arc-sqlmi -- ls -l /var/opt/mssql/data/AdventureWorks2019.bak 101 | 102 | # We could restore in the Pod - or using the Instance's Endpoint 103 | kubectl get sqlmi $gpinstance -n $k8sNamespace 104 | $SQLEndpoint=(kubectl get sqlmi $gpinstance -n $k8sNamespace -o jsonpath='{ .status.primaryEndpoint }') 105 | 106 | # No AdventureWorks 107 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 108 | 109 | # Restore 110 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "RESTORE DATABASE AdventureWorks2019 FROM DISK = N'/var/opt/mssql/data/AdventureWorks2019.bak' WITH MOVE 'AdventureWorks2017' TO '/var/opt/mssql/data/AdventureWorks2019.mdf', MOVE 'AdventureWorks2017_Log' TO '/var/opt/mssql/data/AdventureWorks2019_Log.ldf'" 111 | 112 | # Tadaaaaaa 113 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 114 | 115 | # We can add, managed, monitor and query those from ADS or through Grafana/Kibana 116 | 117 | # Or we use a TelemetryRouter 118 | # https://learn.microsoft.com/en-us/azure/azure-arc/data/deploy-telemetry-router 119 | 120 | # All this has full built-in HA through k8s and also MI when in BC tier 121 | # General purpose - HA is provided by k8s 122 | # Verify HA 123 | kubectl get pods --namespace $k8sNamespace -l app.kubernetes.io/instance=mi-gp 124 | # Delete primary 125 | kubectl delete pod mi-gp-0 --namespace $k8sNamespace 126 | kubectl get pods --namespace $k8sNamespace -l app.kubernetes.io/instance=mi-gp -w 127 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 128 | 129 | # Business criticial - HA is an AG 130 | # Determine which Pod is primary 131 | for ($i=0; $i -le 2; $i++){ 132 | kubectl get pod ("$($bcinstance)-$i") -n $k8sNamespace -o jsonpath="{.metadata.labels}" | ConvertFrom-Json | grep -v controller | grep -v app | grep -v arc-resource | grep -v -e '^$' 133 | } 134 | 135 | # Delete a Pod 136 | kubectl delete pod mi-bc-0 -n $k8sNamespace 137 | kubectl get pods -n $k8sNamespace -l app.kubernetes.io/instance=mi-bc 138 | 139 | # Determine which is primary now 140 | for ($i=0; $i -le 2; $i++){ 141 | kubectl get pod ("$($bcinstance)-$i") -n $k8sNamespace -o jsonpath="{.metadata.labels}" | ConvertFrom-Json | grep -v controller | grep -v app | grep -v arc-resource | grep -v -e '^$' 142 | } 143 | 144 | # And we can query this immediately! 145 | $SQLEndpoint_BC=(kubectl get sqlmi $bcinstance -n $k8sNamespace -o jsonpath='{ .status.primaryEndpoint }') 146 | sqlcmd -S $SQLEndpoint_BC -U $ENV:AZDATA_USERNAME -Q "SELECT Name FROM sys.Databases" 147 | 148 | # If things go wrong, you can re-provision individual replicas: 149 | # az sql mi-arc reprovision-replica -n -k --use-k8s 150 | 151 | # Upgrades 152 | # Check versions 153 | az arcdata dc list-upgrades -k $k8sNamespace 154 | 155 | $SQLEndpoint=(kubectl get sqlmi $gpinstance -n $k8sNamespace -o jsonpath='{ .status.primaryEndpoint }') 156 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT @@version" 157 | 158 | az sql mi-arc show -n $gpinstance --use-k8s --k8s-namespace $k8sNamespace | ConvertFrom-Json 159 | # az sql mi-arc upgrade -n $gpinstance --use-k8s --k8s-namespace $k8sNamespace 160 | 161 | # Backup / Restore 162 | 163 | $PointInTime=(Get-Date).AddSeconds(-120).ToString("yyyy-MM-ddTHH:mm:ssZ") 164 | $PointInTime 165 | 166 | # Lets modify some data... 167 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "Update adventureworks2019.person.person set Lastname = 'Weissman',Firstname='Ben'" 168 | 169 | # ooops - that was dumb 170 | # Let's fix it 171 | $PointInTime=(Get-Date).AddSeconds(-120).ToString("yyyy-MM-ddTHH:mm:ssZ") 172 | $PointInTime 173 | az sql midb-arc restore --managed-instance $gpinstance --name AdventureWorks2019 --dest-name AdventureWorks2019_Restore ` 174 | --k8s-namespace arc --time $PointInTime --use-k8s 175 | 176 | # And: 177 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT TOP 3 Firstname,lastname from adventureworks2019.person.person" 178 | sqlcmd -S $SQLEndpoint -U $ENV:AZDATA_USERNAME -Q "SELECT TOP 3 Firstname,lastname from adventureworks2019_restore.person.person" 179 | 180 | # No Differential, Log or any other manual backups 181 | 182 | 183 | # Connect to Azure Monitor: 184 | # Create Service Principal 185 | $SP=(az ad sp create-for-rbac --name http://PASSArcDemoSP --role Contributor --scope subscriptions/$Subscription| ConvertFrom-Json) 186 | 187 | # Add Role 188 | az role assignment create --assignee $SP.appId --role "Monitoring Metrics Publisher" --scope subscriptions/$Subscription 189 | 190 | # Grab our LAWS ID and credentials again 191 | $LAWS=(az monitor log-analytics workspace create -g PASSPreconRG -n ArcLAWS| ConvertFrom-Json) 192 | $LAWSKEYS=(az monitor log-analytics workspace get-shared-keys -g PASSPreconRG -n ArcLAWS | ConvertFrom-Json) 193 | 194 | # For Direct connected mode: 195 | # Connect the Kubernetes Cluster to Azure (Arc-enabled Kubernetes) 196 | # Enable the Cluster for Custom Locations 197 | # Deploy Custom Location and DC from Portal 198 | 199 | # In indirect connected mode: 200 | 201 | # Store keys 202 | $Env:SPN_AUTHORITY='https://login.microsoftonline.com' 203 | $Env:WORKSPACE_ID=$LAWS.customerId 204 | $Env:WORKSPACE_SHARED_KEY=$LAWSKEYS.primarySharedKey 205 | $Env:SPN_CLIENT_ID=$SP.appId 206 | $Env:SPN_CLIENT_SECRET=$SP.password 207 | $Env:SPN_TENANT_ID=$SP.tenant 208 | $Env:AZDATA_VERIFY_SSL='no' 209 | 210 | # Export our logs and metrics (and usage) 211 | # az arcdata dc export -t usage --path usage.json -k $k8sNamespace --force --use-k8s 212 | az arcdata dc export -t metrics --path metrics.json -k $k8sNamespace --force --use-k8s 213 | az arcdata dc export -t logs --path logs.json -k $k8sNamespace --force --use-k8s 214 | 215 | # Upload the data to Azure - this should be a scheduled job. 216 | # az arcdata dc upload --path usage.json 217 | az arcdata dc upload --path metrics.json 218 | az arcdata dc upload --path logs.json 219 | 220 | remove-item *.json 221 | 222 | # Check in portal 223 | Start-Process ("https://portal.azure.com/#@"+ (az account show --query tenantId -o tsv) + "/resource" + (az group show -n PASSPreconRG --query id -o tsv)) 224 | -------------------------------------------------------------------------------- /Arc/SQL Sat Vienna 2021/AzureArc - SQL Sat Vienna.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Arc/SQL Sat Vienna 2021/AzureArc - SQL Sat Vienna.pdf -------------------------------------------------------------------------------- /Arc/SQLBits 2023 - Azure Arc in 50 Minutes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Arc/SQLBits 2023 - Azure Arc in 50 Minutes.pdf -------------------------------------------------------------------------------- /Arc/The Future of SQL is Hybrid.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Arc/The Future of SQL is Hybrid.pdf -------------------------------------------------------------------------------- /BDC/Covid/02. Covid_Spark_Population.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "name": "pysparkkernel", 5 | "display_name": "PySpark" 6 | }, 7 | "language_info": { 8 | "name": "pyspark", 9 | "mimetype": "text/x-python", 10 | "codemirror_mode": { 11 | "name": "python", 12 | "version": 2 13 | }, 14 | "pygments_lexer": "python2" 15 | } 16 | }, 17 | "nbformat_minor": 2, 18 | "nbformat": 4, 19 | "cells": [ 20 | { 21 | "cell_type": "markdown", 22 | "source": [ 23 | "https://github.com/datasets/population/tree/master/data\r\n", 24 | "" 25 | ], 26 | "metadata": { 27 | "azdata_cell_guid": "49f3664f-fca3-407f-8011-fbde655e1a72" 28 | } 29 | }, 30 | { 31 | "cell_type": "code", 32 | "source": [ 33 | "from pyspark.sql.types import *\r\n", 34 | "import pandas as pd\r\n", 35 | "df = pd.read_csv(\"https://raw.githubusercontent.com/datasets/population/master/data/population.csv\")\r\n", 36 | "df = df.drop(columns=['Country Code'])\r\n", 37 | "df_spark = spark.createDataFrame(df).filter(\"Year == 2018\")\r\n", 38 | "df_spark = df_spark.drop(\"Year\")\r\n", 39 | "df_spark.write.format('csv').option('header',True).mode('overwrite').save('/covid/csv/population')\r\n", 40 | "df_spark.sort([\"Value\"], ascending=False).show(5)" 41 | ], 42 | "metadata": { 43 | "azdata_cell_guid": "18768c22-9a44-4656-b42f-7e5666cb4495", 44 | "tags": [] 45 | }, 46 | "outputs": [], 47 | "execution_count": null 48 | } 49 | ] 50 | } -------------------------------------------------------------------------------- /BDC/Covid/03. Covid_Spark_Cases.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "name": "pysparkkernel", 5 | "display_name": "PySpark" 6 | }, 7 | "language_info": { 8 | "name": "pyspark", 9 | "mimetype": "text/x-python", 10 | "codemirror_mode": { 11 | "name": "python", 12 | "version": 2 13 | }, 14 | "pygments_lexer": "python2" 15 | } 16 | }, 17 | "nbformat_minor": 2, 18 | "nbformat": 4, 19 | "cells": [ 20 | { 21 | "cell_type": "markdown", 22 | "source": [ 23 | "https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series\r\n", 24 | "" 25 | ], 26 | "metadata": { 27 | "azdata_cell_guid": "49f3664f-fca3-407f-8011-fbde655e1a72" 28 | } 29 | }, 30 | { 31 | "cell_type": "code", 32 | "source": [ 33 | "baseurl = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_\"\r\n", 34 | "from pyspark.sql.types import *\r\n", 35 | "import pandas as pd" 36 | ], 37 | "metadata": { 38 | "azdata_cell_guid": "05d49e03-8290-4890-9c0b-ab12cc10959e", 39 | "tags": [] 40 | }, 41 | "outputs": [], 42 | "execution_count": null 43 | }, 44 | { 45 | "cell_type": "code", 46 | "source": [ 47 | "def saveFile(filetype):\r\n", 48 | " df = pd.read_csv(baseurl + filetype + \"_global.csv\")\r\n", 49 | " df = df.drop(columns=['Lat','Long'])\r\n", 50 | " df_unpivoted = df.melt(id_vars=['Province/State','Country/Region'], var_name='Date', value_name=filetype)\r\n", 51 | " df_unpivoted['Date'] = pd.to_datetime(df_unpivoted['Date'], format=\"%m/%d/%y\")\r\n", 52 | " df_schema = StructType([ StructField(\"Province\", StringType(), True),StructField(\"Country\", StringType(), True),StructField(\"Date\", DateType(), True),StructField(filetype, IntegerType(), True)])\r\n", 53 | " df_spark = spark.createDataFrame(df_unpivoted,df_schema)\r\n", 54 | " df_spark.createOrReplaceTempView(\"tmpView\")\r\n", 55 | " df_spark = spark.sql(\"SELECT Province,Country,Date(Date),\" + filetype + \" FROM tmpView\")\r\n", 56 | " df_spark.write.format('csv').option('header',True).mode('overwrite').save('/covid/csv/' + filetype)\r\n", 57 | " df_spark.write.format('parquet').mode('overwrite').saveAsTable(filetype,path='/covid/parquet/' + filetype)\r\n", 58 | " df_spark.sort([\"Date\",filetype], ascending=False).show(5)" 59 | ], 60 | "metadata": { 61 | "azdata_cell_guid": "a8175f60-31bc-49cf-9b8e-6688bf45ad6c", 62 | "tags": [] 63 | }, 64 | "outputs": [], 65 | "execution_count": null 66 | }, 67 | { 68 | "cell_type": "code", 69 | "source": [ 70 | "saveFile(\"recovered\")\r\n", 71 | "saveFile(\"deaths\")\r\n", 72 | "saveFile(\"confirmed\")" 73 | ], 74 | "metadata": { 75 | "azdata_cell_guid": "fbfe6a88-eee6-48e6-a4bf-8e6b00da5272", 76 | "tags": [] 77 | }, 78 | "outputs": [], 79 | "execution_count": null 80 | } 81 | ] 82 | } -------------------------------------------------------------------------------- /BDC/Covid/04. Covid_SQL.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "name": "SQL", 5 | "display_name": "SQL", 6 | "language": "sql" 7 | }, 8 | "language_info": { 9 | "name": "sql", 10 | "version": "" 11 | } 12 | }, 13 | "nbformat_minor": 2, 14 | "nbformat": 4, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "source": [ 19 | "USE COVID" 20 | ], 21 | "metadata": { 22 | "azdata_cell_guid": "9741c6e4-b15b-4d3b-b37e-5d281f50ddaf" 23 | }, 24 | "outputs": [], 25 | "execution_count": null 26 | }, 27 | { 28 | "cell_type": "code", 29 | "source": [ 30 | "SELECT TOP 10 * FROM sp_parquet.covid_recovered ORDER BY DATE DESC" 31 | ], 32 | "metadata": { 33 | "azdata_cell_guid": "d4ee016d-a95d-41f4-8113-5dee1e7a937b" 34 | }, 35 | "outputs": [], 36 | "execution_count": null 37 | }, 38 | { 39 | "cell_type": "code", 40 | "source": [ 41 | "TRUNCATE TABLE dp.Covid_Development\r\n", 42 | "INSERT INTO dp.Covid_Development\r\n", 43 | "SELECT C.Country, c.[DATE] DTE, CONVERT(NVARCHAR(10), c.DATE, 103) DTE_Str, SUM(Confirmed) Confirmed,\r\n", 44 | " SUM(ISNULL(Recovered, 0)) Recovered, SUM(ISNULL(deaths, 0)) Deaths, SUM(Confirmed)-SUM(ISNULL(deaths, 0))-SUM(ISNULL(Recovered, 0)) Active\r\n", 45 | "FROM sp_parquet.covid_confirmed c\r\n", 46 | " LEFT JOIN sp_parquet.covid_recovered r ON C.DATE=r.DATE AND c.country=r.country AND c.province=r.province\r\n", 47 | " LEFT JOIN sp_parquet.covid_deaths d ON C.DATE=d.DATE AND c.country=d.country AND c.province=d.province\r\n", 48 | "GROUP BY C.Country, c.[DATE]" 49 | ], 50 | "metadata": { 51 | "azdata_cell_guid": "a35b8663-6047-4763-875d-7c68674f26a5", 52 | "tags": [] 53 | }, 54 | "outputs": [], 55 | "execution_count": null 56 | }, 57 | { 58 | "cell_type": "code", 59 | "source": [ 60 | "SELECT count(*) FROM [dp].[Covid_Development]" 61 | ], 62 | "metadata": { 63 | "azdata_cell_guid": "ff903de1-99a2-4e54-9b33-0dc848f806e4" 64 | }, 65 | "outputs": [], 66 | "execution_count": null 67 | }, 68 | { 69 | "cell_type": "code", 70 | "source": [ 71 | "EXEC ('USE [Covid]; SELECT count(*) FROM dp.Covid_Development') AT Data_Source SqlDataPool" 72 | ], 73 | "metadata": { 74 | "azdata_cell_guid": "664a14d5-bbef-431d-87d3-9508b25cd987" 75 | }, 76 | "outputs": [], 77 | "execution_count": null 78 | }, 79 | { 80 | "cell_type": "code", 81 | "source": [ 82 | "SELECT Top 10 * FROM dp.Covid_Development WHERE country = 'Germany' order by dte desc" 83 | ], 84 | "metadata": { 85 | "azdata_cell_guid": "c6b5e564-d1f1-4efc-9b4c-21ae63471663" 86 | }, 87 | "outputs": [], 88 | "execution_count": null 89 | }, 90 | { 91 | "cell_type": "code", 92 | "source": [ 93 | "SELECT TOP 10 * FROM sp_csv.population" 94 | ], 95 | "metadata": { 96 | "azdata_cell_guid": "37ac393f-02ed-4a58-8375-a8b531c92fb5" 97 | }, 98 | "outputs": [], 99 | "execution_count": null 100 | }, 101 | { 102 | "cell_type": "code", 103 | "source": [ 104 | "SELECT DISTINCT Country\r\n", 105 | "FROM dp.Covid_Development\r\n", 106 | "WHERE NOT country IN(SELECT country FROM sp_csv.population)" 107 | ], 108 | "metadata": { 109 | "azdata_cell_guid": "fb9c0d97-1e3e-43d1-8cc0-e13b8810b257" 110 | }, 111 | "outputs": [], 112 | "execution_count": null 113 | }, 114 | { 115 | "cell_type": "code", 116 | "source": [ 117 | "SELECT TOP 10 * FROM mi.Mapping_Country" 118 | ], 119 | "metadata": { 120 | "azdata_cell_guid": "95748743-7d41-4d4d-8174-9a3b81fea5c8" 121 | }, 122 | "outputs": [], 123 | "execution_count": null 124 | }, 125 | { 126 | "cell_type": "code", 127 | "source": [ 128 | "SELECT TOP 3 *\r\n", 129 | "FROM mi.Mapping_Country\r\n", 130 | "WHERE Ctry_Covid<>Ctry_Population\r\n", 131 | "ORDER BY ctry_covid" 132 | ], 133 | "metadata": { 134 | "azdata_cell_guid": "b978b5f2-a7b7-47c6-9370-057a691b7e6a" 135 | }, 136 | "outputs": [], 137 | "execution_count": null 138 | }, 139 | { 140 | "cell_type": "code", 141 | "source": [ 142 | "SELECT *\r\n", 143 | "FROM(SELECT Dte, a.country, population, confirmed, active, active * 1000000 / population Active_PerMillion\r\n", 144 | " FROM dp.Covid_Development a\r\n", 145 | " INNER JOIN mi.Mapping_Country b ON a.country=b.ctry_covid\r\n", 146 | " INNER JOIN sp_csv.population c ON c.country=b.Ctry_Population\r\n", 147 | " WHERE dte IN (SELECT MAX(Date)FROM sp_parquet.covid_confirmed) and active > 0) a\r\n", 148 | "ORDER BY Active_PerMillion DESC" 149 | ], 150 | "metadata": { 151 | "azdata_cell_guid": "ad804737-44da-49cd-a7bc-ed1986ef062e" 152 | }, 153 | "outputs": [], 154 | "execution_count": null 155 | } 156 | ] 157 | } -------------------------------------------------------------------------------- /BDC/Covid/README.md: -------------------------------------------------------------------------------- 1 | # Big Data Clusters 2 | ## Demo Notebooks 3 | ### COVID-19 4 | 5 | Download Population Data from GitHub 6 | Download current COVID cases from GitHub 7 | Store both in storage pool, push transformed COVID Data to data pool 8 | run combined query across all pools -------------------------------------------------------------------------------- /BDC/Garmin/01. Garmin_Spark.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "name": "pysparkkernel", 5 | "display_name": "PySpark" 6 | }, 7 | "language_info": { 8 | "name": "pyspark", 9 | "mimetype": "text/x-python", 10 | "codemirror_mode": { 11 | "name": "python", 12 | "version": 2 13 | }, 14 | "pygments_lexer": "python2" 15 | } 16 | }, 17 | "nbformat_minor": 2, 18 | "nbformat": 4, 19 | "cells": [ 20 | { 21 | "cell_type": "markdown", 22 | "source": [ 23 | "https://pypi.org/project/garminconnect/" 24 | ], 25 | "metadata": { 26 | "azdata_cell_guid": "a94f3f90-960c-45f8-a743-95d27be0031f" 27 | } 28 | }, 29 | { 30 | "cell_type": "code", 31 | "source": [ 32 | "import pandas as pd\r\n", 33 | "import json" 34 | ], 35 | "metadata": { 36 | "azdata_cell_guid": "912a94fb-5366-48c2-92a5-ed210b8cb7bf", 37 | "tags": [ 38 | "hide_input" 39 | ] 40 | }, 41 | "outputs": [], 42 | "execution_count": null 43 | }, 44 | { 45 | "cell_type": "code", 46 | "source": [ 47 | "import subprocess\r\n", 48 | "stdout = subprocess.check_output(\r\n", 49 | " \"pip3 install garminconnect==0.1.13\",\r\n", 50 | " stderr=subprocess.STDOUT,\r\n", 51 | " shell=True).decode(\"utf-8\")\r\n", 52 | "print(stdout)" 53 | ], 54 | "metadata": { 55 | "azdata_cell_guid": "19b1c5e7-8904-4b95-8efe-bd36d5594a04", 56 | "tags": [] 57 | }, 58 | "outputs": [], 59 | "execution_count": null 60 | }, 61 | { 62 | "cell_type": "code", 63 | "source": [ 64 | "from garminconnect import Garmin" 65 | ], 66 | "metadata": { 67 | "azdata_cell_guid": "f026c2de-e816-4b15-9ae2-1ea87f8794b7", 68 | "tags": [] 69 | }, 70 | "outputs": [], 71 | "execution_count": null 72 | }, 73 | { 74 | "cell_type": "code", 75 | "source": [ 76 | "# Set Username and PW here!\r\n", 77 | "Garmin_User = ''\r\n", 78 | "Garmin_PW = ''" 79 | ], 80 | "metadata": { 81 | "azdata_cell_guid": "5df203a4-c8ab-4d9f-ab98-ea281cd223ac", 82 | "tags": [] 83 | }, 84 | "outputs": [], 85 | "execution_count": null 86 | }, 87 | { 88 | "cell_type": "code", 89 | "source": [ 90 | "client = Garmin(Garmin_User, Garmin_PW)\r\n", 91 | "client.login()" 92 | ], 93 | "metadata": { 94 | "azdata_cell_guid": "343b94cb-455e-4725-8cc8-c63569fd18c9", 95 | "tags": [] 96 | }, 97 | "outputs": [], 98 | "execution_count": null 99 | }, 100 | { 101 | "cell_type": "code", 102 | "source": [ 103 | "activities = json.dumps(client.get_activities(0,10000))\r\n", 104 | "df = spark.read.json(sc.parallelize([activities]))\r\n", 105 | "df.createOrReplaceTempView(\"tmpView\")\r\n", 106 | "df = spark.sql(\"SELECT ownerFullName Runner,Date(startTimeGMT) DTE,String(activityName) Activity,round(distance,2) Distance,round(movingDuration,2) Duration FROM tmpView where String(activityType) like '%running%'\")\r\n", 107 | "df.show(5)\r\n", 108 | "df.write.format('parquet').mode('overwrite').saveAsTable('garmin',path='/running/parquet')\r\n", 109 | "df.write.format('csv').mode('overwrite').saveAsTable('garmin',path='/running/csv')" 110 | ], 111 | "metadata": { 112 | "azdata_cell_guid": "4035fd01-2dcb-436c-8be5-270857ff241a", 113 | "tags": [] 114 | }, 115 | "outputs": [], 116 | "execution_count": null 117 | } 118 | ] 119 | } -------------------------------------------------------------------------------- /BDC/Garmin/02. Garmin_SQL.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "kernelspec": { 4 | "name": "SQL", 5 | "display_name": "SQL", 6 | "language": "sql" 7 | }, 8 | "language_info": { 9 | "name": "sql", 10 | "version": "" 11 | } 12 | }, 13 | "nbformat_minor": 2, 14 | "nbformat": 4, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "source": [ 19 | "USE master\r\n", 20 | "GO\r\n", 21 | "IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = 'running')\r\n", 22 | " CREATE DATABASE running" 23 | ], 24 | "metadata": { 25 | "azdata_cell_guid": "834e9873-fbe4-4ace-a8b1-601f2f84f09d" 26 | }, 27 | "outputs": [], 28 | "execution_count": null 29 | }, 30 | { 31 | "cell_type": "code", 32 | "source": [ 33 | "USE running" 34 | ], 35 | "metadata": { 36 | "azdata_cell_guid": "a8a9c9d8-d1b2-41f3-a346-19075568a6ac" 37 | }, 38 | "outputs": [], 39 | "execution_count": null 40 | }, 41 | { 42 | "cell_type": "code", 43 | "source": [ 44 | "IF NOT EXISTS(SELECT * FROM sys.external_data_sources WHERE name = 'SqlStoragePool')\r\n", 45 | " CREATE EXTERNAL DATA SOURCE SqlStoragePool\r\n", 46 | " WITH (LOCATION = 'sqlhdfs://controller-svc/default');" 47 | ], 48 | "metadata": { 49 | "azdata_cell_guid": "34469737-1431-4a5b-ad1e-44be6c4f9e0b" 50 | }, 51 | "outputs": [], 52 | "execution_count": null 53 | }, 54 | { 55 | "cell_type": "code", 56 | "source": [ 57 | "IF NOT EXISTS(SELECT * FROM sys.external_file_formats WHERE name = 'parquet_file')\r\n", 58 | " CREATE EXTERNAL FILE FORMAT parquet_file\r\n", 59 | " WITH (\r\n", 60 | " FORMAT_TYPE = PARQUET\r\n", 61 | " );" 62 | ], 63 | "metadata": { 64 | "azdata_cell_guid": "5a58e574-da6a-4411-87ca-9272fe6186a6" 65 | }, 66 | "outputs": [], 67 | "execution_count": null 68 | }, 69 | { 70 | "cell_type": "code", 71 | "source": [ 72 | "IF EXISTS(SELECT * FROM sys.external_tables WHERE name = 'activities')\r\n", 73 | " DROP EXTERNAL TABLE activities" 74 | ], 75 | "metadata": { 76 | "azdata_cell_guid": "2ddc0ddc-be7f-49bd-91a0-8ca82d6fcd8a" 77 | }, 78 | "outputs": [], 79 | "execution_count": null 80 | }, 81 | { 82 | "cell_type": "code", 83 | "source": [ 84 | "IF NOT EXISTS(SELECT * FROM sys.external_tables WHERE name = 'activities')\r\n", 85 | " CREATE EXTERNAL TABLE [activities]\r\n", 86 | " (\"Runner\" VARCHAR(50) ,\r\n", 87 | " \"DTE\" Date ,\r\n", 88 | " \"Activity\" VARCHAR(50) COLLATE LATIN1_GENERAL_100_CI_AS_SC_UTF8,\r\n", 89 | " \"Distance\" float ,\r\n", 90 | " \"Duration\" float)\r\n", 91 | " WITH\r\n", 92 | " (\r\n", 93 | " DATA_SOURCE = SqlStoragePool,\r\n", 94 | " LOCATION = '/running/parquet',\r\n", 95 | " FILE_FORMAT = parquet_file\r\n", 96 | " );" 97 | ], 98 | "metadata": { 99 | "azdata_cell_guid": "500d7c09-38c3-49f4-b20f-0702353cd42d" 100 | }, 101 | "outputs": [], 102 | "execution_count": null 103 | }, 104 | { 105 | "cell_type": "code", 106 | "source": [ 107 | "SELECT TOP 5 * from activities" 108 | ], 109 | "metadata": { 110 | "azdata_cell_guid": "548b8601-54ed-4b30-934a-75d5b692e471" 111 | }, 112 | "outputs": [], 113 | "execution_count": null 114 | }, 115 | { 116 | "cell_type": "code", 117 | "source": [ 118 | "SELECT DATEPART(year, DTE)* 100+DATEPART(month, DTE) MT, round(SUM(Distance / 1000),1) KM\r\n", 119 | "FROM activities\r\n", 120 | "WHERE DTE>='01.01.2019'\r\n", 121 | "GROUP BY DATEPART(year, DTE)* 100+DATEPART(month, DTE)\r\n", 122 | "ORDER BY DATEPART(year, DTE)* 100+DATEPART(month, DTE)" 123 | ], 124 | "metadata": { 125 | "azdata_cell_guid": "d4b74545-a599-498c-bf78-f9ebacc8a413", 126 | "tags": [] 127 | }, 128 | "outputs": [], 129 | "execution_count": null 130 | } 131 | ] 132 | } -------------------------------------------------------------------------------- /BDC/Garmin/README.md: -------------------------------------------------------------------------------- 1 | # Big Data Clusters 2 | ## Demo Notebooks 3 | ### Garmin 4 | 5 | Log into your Garmin Account, read your activities, store them in a parquet file and query it using SQL. -------------------------------------------------------------------------------- /BDC/README.md: -------------------------------------------------------------------------------- 1 | # Big Data Clusters 2 | ## Demo Notebooks -------------------------------------------------------------------------------- /BDC/Slides/BDC_Usergroups.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/BDC/Slides/BDC_Usergroups.pdf -------------------------------------------------------------------------------- /BDC/Slides/README.md: -------------------------------------------------------------------------------- 1 | # Big Data Clusters 2 | ## Slides 3 | 4 | -------------------------------------------------------------------------------- /BDC/Slides/SQLFriday.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/BDC/Slides/SQLFriday.pdf -------------------------------------------------------------------------------- /Containers - You better get on Board/01-Containers.ps1: -------------------------------------------------------------------------------- 1 | ####################################################################################################################################### 2 | # mostly borrowed from Anthony E. Nocentino :) 3 | ####################################################################################################################################### 4 | 5 | #Set password variable used for sa password for SQL Server 6 | $PASSWORD='S0methingS@Str0ng!' 7 | 8 | #Pull a container, examine layers. 9 | docker pull mcr.microsoft.com/mssql/server:2019-latest 10 | docker pull mcr.microsoft.com/mssql/server:2019-CU13-ubuntu-18.04 11 | docker pull mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 12 | docker pull mcr.microsoft.com/mssql/server:2019-CU17-ubuntu-20.04 13 | docker pull mcr.microsoft.com/mssql/server:2022-latest 14 | 15 | #List all available images in a registry... 16 | curl.exe -sL https://mcr.microsoft.com/v2/mssql/server/tags/list 17 | 18 | 19 | #list of images on this system 20 | docker images | grep sql 21 | 22 | 23 | #Check out the docker image details 24 | docker image inspect mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 | more 25 | 26 | #Run a container 27 | docker run ` 28 | --env 'ACCEPT_EULA=Y' ` 29 | --env ('MSSQL_SA_PASSWORD=' + $Password) ` 30 | --name 'sql1' ` 31 | --publish 1433:1433 ` 32 | --detach mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 33 | 34 | #Finding help in docker 35 | docker help run | more 36 | 37 | #Let's read the logs, useful if the container doesn't start up for some reason. 38 | #Most common reason I see, not a complex enough sa password 39 | docker logs sql1 | more 40 | 41 | #List running containers (or use VSCode extension!) 42 | docker ps 43 | 44 | #Access our application 45 | $env:SQLCMDUSER="sa" 46 | $env:SQLCMDPASSWORD=$PASSWORD 47 | 48 | sqlcmd -S localhost -Q 'SELECT @@SERVERNAME' 49 | sqlcmd -S localhost -Q 'SELECT @@VERSION' 50 | 51 | 52 | #Run a second container, new name, new port, same source image 53 | docker run ` 54 | -e 'ACCEPT_EULA=Y' ` 55 | -e ('MSSQL_SA_PASSWORD=' + $Password) ` 56 | --name 'sql2' ` 57 | --hostname 'sql2' ` 58 | -p 1434:1433 ` 59 | -d mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 60 | 61 | 62 | #List running containers 63 | docker ps 64 | 65 | 66 | #Access our second application, discuss servername, connect to specific port 67 | sqlcmd -S localhost,1434 -Q 'SELECT @@SERVERNAME' 68 | 69 | 70 | #Copy a backup file into the container and set the permissions 71 | Set-Location 'C:\Users\demo\Desktop\Code\Containers - You better get on Board' 72 | docker cp TestDB1.bak sql2:/var/opt/mssql/data 73 | docker exec -u root sql2 chown mssql /var/opt/mssql/data/TestDB1.bak 74 | 75 | 76 | #Restore a database to our container 77 | Get-Content('restore_testdb1.sql') 78 | sqlcmd -S localhost,1434 -i restore_testdb1.sql 79 | 80 | #Connect to the container, start an interactive bash session 81 | docker exec -it sql2 /bin/bash 82 | 83 | #Inside container, check out the uploaded and process listing 84 | ps -aux 85 | ls -la /var/opt/mssql/data 86 | exit 87 | 88 | #Stopping a container 89 | docker stop sql2 90 | 91 | 92 | #List running containers 93 | docker ps 94 | 95 | #List all containers, including stopped containers. Examine the status and the exit code 96 | docker ps -a 97 | 98 | 99 | #Starting a container that's already local. All the parameters from the docker run command persist. 100 | docker start sql2 101 | docker ps 102 | 103 | 104 | #Stop them containers... 105 | docker stop sql1 106 | docker stop sql2 107 | docker ps -a 108 | 109 | #Removing THE Container...THIS WILL DELETE YOUR DATA IN THE CONTAINER 110 | docker rm sql1 111 | docker rm sql2 112 | 113 | 114 | #Even though the containers are gone, we still have the image! 115 | docker image ls | grep sql 116 | docker ps -a 117 | 118 | 119 | #Persisting data with a Container 120 | #Start up a container with a Data Volume 121 | docker run ` 122 | --env 'ACCEPT_EULA=Y' ` 123 | --env ('MSSQL_SA_PASSWORD=' + $Password) ` 124 | --name 'sql1' ` 125 | --publish 1433:1433 ` 126 | -v sqldata1:/var/opt/mssql ` 127 | --detach mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 128 | 129 | 130 | #Copy the database into the Container, set the permissions on the backup file and restore it 131 | docker cp TestDB1.bak sql1:/var/opt/mssql/data 132 | docker exec -u root sql1 chown mssql /var/opt/mssql/data/TestDB1.bak 133 | sqlcmd -S localhost -i restore_testdb1.sql 134 | 135 | 136 | #Check out our list of databases 137 | sqlcmd -S localhost -Q 'SELECT name from sys.databases' 138 | sqlcmd -S localhost -Q 'SELECT name, physical_name from sys.master_files' -W 139 | 140 | 141 | #Stop the container then remove it. Which normally would destroy our data..but we're using a volume now. 142 | docker stop sql1 143 | docker rm sql1 144 | 145 | 146 | #List our current volumes 147 | docker volume ls 148 | 149 | 150 | #Dig into the details about our volume 151 | docker volume inspect sqldata1 152 | 153 | 154 | #Start the container back up, using the same data volume. We need docker run since we deleted the container. 155 | docker run ` 156 | --env 'ACCEPT_EULA=Y' ` 157 | --env ('MSSQL_SA_PASSWORD=' + $Password) ` 158 | --name 'sql1' ` 159 | --publish 1433:1433 ` 160 | -v sqldata1:/var/opt/mssql ` 161 | --detach mcr.microsoft.com/mssql/server:2019-CU16-ubuntu-20.04 162 | 163 | #Check out our list of databases...wut? 164 | sqlcmd -S localhost -Q 'SELECT name from sys.databases' 165 | 166 | #stop our container 167 | docker stop sql1 168 | 169 | #delete our container 170 | docker rm sql1 171 | 172 | 173 | #Let's upgrade the container... 174 | docker run ` 175 | --env 'ACCEPT_EULA=Y' ` 176 | --env ('MSSQL_SA_PASSWORD=' + $Password) ` 177 | --name 'sql1' ` 178 | --publish 1433:1433 ` 179 | -v sqldata1:/var/opt/mssql ` 180 | --detach mcr.microsoft.com/mssql/server:2019-CU17-ubuntu-20.04 181 | 182 | 183 | #Let's what the upgrade process 184 | docker logs sql1 --follow 185 | 186 | #Check the version 187 | sqlcmd -S localhost -Q 'SELECT @@VERSION' 188 | 189 | #stop our container 190 | docker stop sql1 191 | 192 | #delete our container 193 | docker rm sql1 194 | 195 | #remove the created volume 196 | #THIS WILL DELETE YOUR DATA! 197 | docker volume rm sqldata1 198 | 199 | #remove an image 200 | #docker rmi mcr.microsoft.com/mssql/server:2019-latest 201 | 202 | #if there's a new image available if you pull again only new containers will be sourced from that image. 203 | #You'll need to create a new container and migrate your data to it. -------------------------------------------------------------------------------- /Containers - You better get on Board/02-Kubernetes.ps1: -------------------------------------------------------------------------------- 1 | # Check out my k8s cluster 2 | kubectl get nodes 3 | 4 | # We'll create a Namespace 5 | kubectl create namespace mssql 6 | kubectl get namespace 7 | kubectl config set-context --current --namespace=mssql 8 | 9 | # It's empty 10 | kubectl get secret,pods,pvc,svc 11 | 12 | # Let's deploy SQL Server 13 | kubectl apply -f SQL.yaml 14 | 15 | # It's all here 16 | kubectl get secret,pods,pvc,svc 17 | 18 | # Can we access it? 19 | $SQL_IP=(kubectl get svc mssql -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 20 | sqlcmd -S $SQL_IP -Q "SELECT @@Version" 21 | 22 | # Let's check out that password first: 23 | kubectl get secret mssql -o yaml 24 | kubectl get secret mssql -o jsonpath='{.data}' 25 | $EncodedPassword=(kubectl get secret mssql -o jsonpath='{.data}' | convertfrom-json)[0].MSSQL_SA_PASSWORD 26 | $Env:SQLCMDPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($EncodedPassword)) 27 | 28 | # Let's try again: 29 | sqlcmd -S $SQL_IP -Q "SELECT @@Version" 30 | 31 | # This only works if the password hasn't been changed after deployment! 32 | 33 | # What else have we got besides a secret (all from the single Yaml)? 34 | code .\SQL.yaml 35 | 36 | # We got: 37 | kubectl get statefulset 38 | kubectl get pod 39 | kubectl get pvc 40 | kubectl get configmap 41 | kubectl get svc 42 | 43 | # The order doesn't matter! 44 | 45 | # Restore data 46 | kubectl cp .\TestDB1.bak mssql-0:/var/opt/mssql/data/TestDB1.bak 47 | sqlcmd -S $SQL_IP -i restore_testdb1.sql 48 | 49 | sqlcmd -S $SQL_IP -Q "SELECT name from sys.databases" 50 | 51 | # Delete everything 52 | kubectl delete namespace mssql 53 | 54 | # So why k8s? 55 | # Persistent endpoints, external storage, scalability, security... -------------------------------------------------------------------------------- /Containers - You better get on Board/Containers - You better get on board.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Containers - You better get on Board/Containers - You better get on board.pdf -------------------------------------------------------------------------------- /Containers - You better get on Board/SQL.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: mssql 5 | spec: 6 | replicas: 1 7 | serviceName: "mssql" 8 | selector: 9 | matchLabels: 10 | app: mssql 11 | template: 12 | metadata: 13 | labels: 14 | app: mssql 15 | spec: 16 | securityContext: 17 | fsGroup: 10001 18 | containers: 19 | - name: mssql 20 | image: 'mcr.microsoft.com/mssql/server:2019-latest' 21 | ports: 22 | - containerPort: 1433 23 | env: 24 | - name: ACCEPT_EULA 25 | value: "Y" 26 | - name: MSSQL_SA_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: mssql 30 | key: MSSQL_SA_PASSWORD 31 | - name: MSSQL_PID 32 | value: Evaluation 33 | - name: MSSQL_AGENT_ENABLED 34 | value: "true" 35 | volumeMounts: 36 | - name: mssqldb 37 | mountPath: /var/opt/mssql 38 | - name: data 39 | mountPath: /var/opt/mssql/userdata 40 | - name: logs 41 | mountPath: /var/opt/mssql/userlogs 42 | - name: mssqlconf 43 | mountPath: /var/opt/mssql/mssql.conf 44 | subPath: mssql.conf 45 | volumes: 46 | - name: mssqldb 47 | persistentVolumeClaim: 48 | claimName: sql-ad-main 49 | - name: data 50 | persistentVolumeClaim: 51 | claimName: sql-ad-data 52 | - name: logs 53 | persistentVolumeClaim: 54 | claimName: sql-ad-logs 55 | - name: mssqlconf 56 | configMap: 57 | name: mssqlconf 58 | --- 59 | apiVersion: v1 60 | kind: Service 61 | metadata: 62 | name: mssql 63 | spec: 64 | type: LoadBalancer 65 | ports: 66 | - name: sql 67 | protocol: TCP 68 | port: 1433 69 | targetPort: 1433 70 | selector: 71 | statefulset.kubernetes.io/pod-name: mssql-0 72 | --- 73 | apiVersion: v1 74 | kind: PersistentVolumeClaim 75 | metadata: 76 | name: sql-ad-main 77 | spec: 78 | storageClassName: nfs-storage 79 | accessModes: 80 | - ReadWriteOnce 81 | resources: 82 | requests: 83 | storage: 35Gi 84 | --- 85 | apiVersion: v1 86 | kind: PersistentVolumeClaim 87 | metadata: 88 | name: sql-ad-data 89 | spec: 90 | storageClassName: nfs-storage 91 | accessModes: 92 | - ReadWriteOnce 93 | resources: 94 | requests: 95 | storage: 35Gi 96 | --- 97 | apiVersion: v1 98 | kind: PersistentVolumeClaim 99 | metadata: 100 | name: sql-ad-logs 101 | spec: 102 | storageClassName: nfs-storage 103 | accessModes: 104 | - ReadWriteOnce 105 | resources: 106 | requests: 107 | storage: 35Gi 108 | --- 109 | apiVersion: v1 110 | kind: ConfigMap 111 | metadata: 112 | name: mssqlconf 113 | data: 114 | mssql.conf: | 115 | [filelocation] 116 | defaultdatadir = /var/opt/mssql/userdata 117 | defaultlogdir = /var/opt/mssql/userlog 118 | --- 119 | apiVersion: v1 120 | data: 121 | MSSQL_SA_PASSWORD: UGFzc3cwcmQ= 122 | kind: Secret 123 | metadata: 124 | name: mssql 125 | type: Opaque -------------------------------------------------------------------------------- /MVP/MVPCheck.ps1: -------------------------------------------------------------------------------- 1 | # Install-module -Name MVP -Scope CurrentUser -Repository PSGallery 2 | # More Details on the module and how to get a subscription key: https://github.com/lazywinadmin/MVP 3 | Import-Module MVP 4 | $SubscriptionKey = '' 5 | $StartDate = "2020-04-01" 6 | $MVPName="Weissman" 7 | Set-MVPConfiguration -SubscriptionKey $SubscriptionKey 8 | $Contributions=Get-MVPContribution -Limit 1000 | Select StartDate,Title,ReferenceUrl,Description | Where {$_.ReferenceUrl -like "http*"} | where {$_.StartDate -gt $StartDate } 9 | foreach ($cont in $Contributions) { 10 | $ContainsName = 0 11 | try { 12 | $res=curl($cont.ReferenceUrl) 13 | $cont.Description = "Name missing" 14 | if ($cont.ReferenceUrl -like "*.sessionize.com*") { $cont.Description = "Name missing (Sessionize)" } 15 | if ($cont.ReferenceUrl -like "*.pdf") { $cont.Description = "Name missing (PDF)" } 16 | if ($res -like "*Looking for PASS or SQLSaturday?*") { $cont.Description = "URL Invalid" } 17 | if ($res -like "*$MVPName*") { $cont.Description = "OK" }} 18 | catch { 19 | $res="" 20 | $cont.Description = "URL Invalid" 21 | } 22 | } 23 | $Contributions | Where {$_.Description -ne "OK"} | Sort-Object Description, StartDate | Format-Table StartDate,Title,ReferenceUrl,Description -------------------------------------------------------------------------------- /MVP/README.md: -------------------------------------------------------------------------------- 1 | # MVP Check 2 | Get a subscription ID (see https://github.com/lazywinadmin/MVP) and set it using the SubscriptionKey variable 3 | 4 | Also adjust the name to look for and the StartDate 5 | 6 | The script will check all your contributions that have a URL, if: 7 | 8 | - The URL is valid 9 | - If your name is on that page (doesn't work for sessionize, PDFs etc.) 10 | 11 | Hope, this is helpful! -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/Essential_072019.bimlproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 12 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/Essential_072019.mst: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | False 5 | True 6 | True 7 | True 8 | False 9 | 10 | addedBiml 11 | output 12 | documentation 13 | False 14 | 4 15 | True 16 | False 17 | SsisPackages 18 | SqlServer2014 19 | Ssas2014 20 | SsasTabular2016 21 | Ssis2017 22 | True 23 | 24 | True 25 | True 26 | False 27 | False 28 | True 29 | True 30 | 31 | 32 | 33 | 34 | 35 | 36 | 5.0.63009.0 37 | 38 | 39 | 40 | 0 41 | 42 | 43 | 44 | 45 | 0 46 | 47 | 48 | 0 49 | 50 | 51 | 0 52 | 53 | 54 | 0 55 | 56 | 57 | 0 58 | 59 | 60 | 0 61 | 62 | 63 | 0 64 | 65 | 66 | 0 67 | 68 | 69 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/Essential_072019.mst.pkglyt: -------------------------------------------------------------------------------- 1 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person 5 5 473 710 2 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.TRUNCATE TABLE AW_Person_Person 59 5 300 100 3 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE 5 155 408 405 4 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC 48 5 300 100 5 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.Dest 48 155 300 100 6 | **********Compact Node Bounds 7 | **********Source Anchors 8 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person 02_Load_Tables.LOAD TABLE AW_Person_Person.Output 9 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.TRUNCATE TABLE AW_Person_Person 02_Load_Tables.LOAD TABLE AW_Person_Person.TRUNCATE TABLE AW_Person_Person.Output 10 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC.Output 11 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC.Error 12 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.Dest 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.Dest.Error 13 | **********Sink Anchors 14 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE 02_Load_Tables.LOAD TABLE AW_Person_Person.TRUNCATE TABLE AW_Person_Person.Output 15 | 02_Load_Tables 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.Dest 02_Load_Tables.LOAD TABLE AW_Person_Person.LOAD TABLE.SRC.Output 16 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/Essential_072019.v50.muo: -------------------------------------------------------------------------------- 1 | 2 | 3 | FP:addedBiml\Globals\Global1.biml|TV:addedBiml\BimlScripts\010_Connections.biml|FP:Essential_072019.bimlproj|PV:|FP:addedBiml\BimlScripts\090_FullLoad.biml|FP:addedBiml\BimlScripts\040_LoadTables.biml|TV:addedBiml\BimlScripts\040_LoadTables.biml| 4 | [] 5 | {"Name":"Essential_072019.mst","IsExpanded":true,"Children":[{"Name":"Relational","IsExpanded":true,"Children":[{"Name":"Connections","IsExpanded":true,"Children":[{"Name":"Source","IsExpanded":false,"Children":null},{"Name":"Target","IsExpanded":false,"Children":null}]},{"Name":"Databases","IsExpanded":true,"Children":[{"Name":"PE_Destination","IsExpanded":false,"Children":null}]},{"Name":"Schemas","IsExpanded":true,"Children":[{"Name":"dbo","IsExpanded":false,"Children":null}]},{"Name":"Tables","IsExpanded":true,"Children":[{"Name":"AW_Person_Address","IsExpanded":true,"Children":null},{"Name":"AW_Person_Person","IsExpanded":true,"Children":null},{"Name":"AW_Person_PersonPhone","IsExpanded":true,"Children":null}]}]},{"Name":"Data Integration","IsExpanded":true,"Children":[{"Name":"Integration Services","IsExpanded":true,"Children":[{"Name":"Projects","IsExpanded":false,"Children":null},{"Name":"Packages","IsExpanded":true,"Children":[{"Name":"01_Create_Tables","IsExpanded":false,"Children":null},{"Name":"02_Load_Tables_AW_Person_Address","IsExpanded":true,"Children":null},{"Name":"02_Load_Tables_AW_Person_Person","IsExpanded":true,"Children":null},{"Name":"02_Load_Tables_AW_Person_PersonPhone","IsExpanded":true,"Children":null},{"Name":"03_Master","IsExpanded":false,"Children":null}]},{"Name":"File Formats","IsExpanded":false,"Children":null},{"Name":"Script Projects","IsExpanded":false,"Children":null}]},{"Name":"Azure Data Factory","IsExpanded":true,"Children":[{"Name":"Data Factories","IsExpanded":false,"Children":null}]}]},{"Name":"Analysis Services","IsExpanded":true,"Children":[{"Name":"Multidimensional","IsExpanded":true,"Children":[{"Name":"Projects","IsExpanded":false,"Children":null},{"Name":"Cubes","IsExpanded":false,"Children":null},{"Name":"Dimensions (0)","IsExpanded":false,"Children":null},{"Name":"Measure Groups (0)","IsExpanded":false,"Children":null}]},{"Name":"Tabular","IsExpanded":true,"Children":[{"Name":"Projects","IsExpanded":false,"Children":null},{"Name":"Tabular Models","IsExpanded":false,"Children":null},{"Name":"Tabular Tables (0)","IsExpanded":false,"Children":null}]}]},{"Name":"Metadata","IsExpanded":true,"Children":[{"Name":"Principals","IsExpanded":false,"Children":null},{"Name":"Metadata","IsExpanded":false,"Children":null}]},{"Name":"Library","IsExpanded":true,"Children":[{"Name":"Global Includes (1)","IsExpanded":true,"Children":[{"Name":"Global1.biml","IsExpanded":false,"Children":null}]},{"Name":"Utilities (1)","IsExpanded":true,"Children":[{"Name":"090_FullLoad.biml","IsExpanded":false,"Children":null}]},{"Name":"Transformers (0)","IsExpanded":true,"Children":null},{"Name":"Frameworks (0)","IsExpanded":false,"Children":null},{"Name":"Build Configurations (1)","IsExpanded":true,"Children":[{"Name":"Essential_072019.bimlproj","IsExpanded":true,"Children":null}]},{"Name":".NET Code (0)","IsExpanded":false,"Children":null},{"Name":"Miscellaneous (0)","IsExpanded":false,"Children":null}]},{"Name":"Documentation","IsExpanded":true,"Children":[{"Name":"Settings (0)","IsExpanded":false,"Children":null},{"Name":"HTML Templates (0)","IsExpanded":false,"Children":null},{"Name":"Schema Graph Profiles (0)","IsExpanded":true,"Children":null}]},{"Name":"Bundles (0)","IsExpanded":false,"Children":null},{"Name":"Broken Live BimlScripts (0)","IsExpanded":true,"Children":null}]} 6 | false 7 | true 8 | false 9 | false 10 | false 11 | 01/01/0001 00:00:00 12 | C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\IDE\devenv.exe 13 | 14 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/010_Connections.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" tier="10" #> 2 | 3 | 4 | 5 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/020_Tables.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" tier="20" #> 2 | 3 | 4 | <# Dim AW = RootNode.Connections("Source") 5 | Dim Tbls as new List(of String) 6 | MyMeta = ExternalDataAccess.GetDataTable( 7 | RootNode.Connections("Target").RenderedConnectionString, 8 | "Select * from AAA_MetaTables") 9 | 10 | for each dr in MyMeta.rows 11 | Tbls.add(dr("TableName")) 12 | next 13 | Dim IR as ImportResults = AW.GetDatabaseSchema(nothing,tbls,ImportOptions.ExcludeIdentity Or ImportOptions.ExcludePrimaryKey Or ImportOptions.ExcludeUniqueKey Or ImportOptions.ExcludeColumnDefault Or ImportOptions.ExcludeIndex Or ImportOptions.ExcludeCheckConstraint Or ImportOptions.ExcludeForeignKey ) 14 | for each tbl in IR.TableNodes #> 15 | 16 | 17 | <# for each col in tbl.columns #> 18 | <#= col.GetBiml() #> 19 | <# next #> 20 | 21 | 22 | 23 | 24 | 25 | 26 | <#= tbl.SchemaQualifiedName #> 27 | AX 28 | 29 |
30 | <# next #> 31 | 32 |
33 |
34 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/030_CreateTables.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" tier="30" #> 2 | 3 | 4 | 5 | 6 | <# for each tbl in RootNode.Tables #> 7 | 8 | <#= tbl.GetDropAndCreateDDL() #> 9 | 10 | <# next #> 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/040_LoadTables.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" tier="40" #> 2 | 3 | 4 | <# for each tbl in RootNode.Tables #> 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | <#= CallBimlScript("090_FullLoad.biml",tbl) #> 15 | 16 | 17 | true 18 | 19 | 20 | <# next #> 21 | 22 | 23 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/050_MasterPackage.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" tier="50" #> 2 | 3 | 4 | 5 | 6 | <# for each pack in RootNode.Packages.Where(function(c) c.getTag("IsLoad") = "true") #> 7 | 8 | 9 | 10 | <# next #> 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/BimlScripts/090_FullLoad.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" optionexplicit="False" designerbimlpath="Biml/Packages/Package/Tasks" #> 2 | <#@ property name="tbl" type="AstTableNode" #> 3 | 4 | 5 | 6 | 7 | TRUNCATE TABLE <#= tbl.SchemaQualifiedName #> 8 | 9 | 10 | 11 | 12 | SELECT * FROM <#= tbl.GetTag("SchemaQualifiedName") #> 13 | 14 | 15 | 16 | @[System::StartTime] 17 | "AX" 18 | @[System::ExecutionInstanceGUID] 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Code/addedBiml/Globals/Global1.biml: -------------------------------------------------------------------------------- 1 | <#@ global active="True" #> 2 | 3 | -------------------------------------------------------------------------------- /PASS Essential 07-2019/Pass_Essential.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/PASS Essential 07-2019/Pass_Essential.pdf -------------------------------------------------------------------------------- /PASS Summit 2021/1-Arc.ps1: -------------------------------------------------------------------------------- 1 | Clear-Host 2 | cd 'C:\ArcDataServices' 3 | $RG="ArcDataRG" 4 | $Subscription=(az account show --query id -o tsv) 5 | $ENV:ACCEPT_EULA='yes' 6 | $ENV:AZDATA_USERNAME='admin' 7 | $ENV:AZDATA_PASSWORD='P@ssw0rdP@ssw0rd' 8 | 9 | az group create -l eastus -n $RG 10 | 11 | kubectl get nodes -o wide 12 | 13 | kubectl get nodes worker-3 -o jsonpath="{range .status.images[*]}{.names[1]}{'\n'}{end}" | grep arcdata 14 | 15 | $TotalSize = 0 16 | ((kubectl get nodes worker-3 -o jsonpath="{range .status.images[*]}{.sizeBytes}{'\t'}{.names[1]}{'\n'}{end}" | grep arcdata).Split("`t") | grep -v mcr).Split("`n") | Foreach { $TotalSize += $_} 17 | [Math]::Round(($TotalSize/1024/1024),2) 18 | 19 | 20 | # We could deploy direct from Portal (requires arc connected k8s!) 21 | Start-Process https://portal.azure.com/#create/Microsoft.DataController 22 | 23 | # Deploy DC from Command Line 24 | az arcdata dc create --connectivity-mode Indirect --name arc-dc-kubeadm --k8s-namespace arc ` 25 | --subscription $Subscription ` 26 | -g $RG -l eastus --storage-class local-storage ` 27 | --profile-name azure-arc-kubeadm --infrastructure onpremises --use-k8s 28 | 29 | # Check ADS while running 30 | 31 | # Check the pods that got created 32 | kubectl get pods -n arc 33 | 34 | # Check Status 35 | az arcdata dc status show --k8s-namespace arc --use-k8s 36 | 37 | # Add Controller in ADS 38 | 39 | # Create MI 40 | az sql mi-arc create -n mi-1 --k8s-namespace arc --use-k8s ` 41 | --storage-class-backups local-storage ` 42 | --storage-class-data local-storage ` 43 | --storage-class-datalogs local-storage ` 44 | --storage-class-logs local-storage ` 45 | --cores-limit 1 --cores-request 1 ` 46 | --memory-limit 2Gi --memory-request 2Gi --dev 47 | 48 | az sql mi-arc list --k8s-namespace arc --use-k8s -o table 49 | 50 | kubectl get sqlmi -n arc 51 | 52 | kubectl get pods -n arc -o wide 53 | 54 | kubectl describe pod mi-1-0 -n arc 55 | 56 | # Could deploy as AG using --replicas 2/3 57 | # az sql mi-arc create -n mi-2 --k8s-namespace arc --use-k8s --replicas 3 58 | az sql mi-arc create -n mi-2 --k8s-namespace arc --use-k8s --replicas 2 --dev 59 | 60 | # Could also deploy / resize from ADS and access Grafana/Kibana 61 | $ENV:AZDATA_PASSWORD | Set-Clipboard 62 | 63 | # Backups 64 | kubectl edit sqlmi mi-1 -n arc 65 | 66 | # Updates: 67 | az arcdata dc list-upgrades -k arc 68 | 69 | # az arcdata dc update 70 | # az arcdata sql mi-arc update 71 | 72 | # Connect to Azure Monitor: 73 | # Create Service Principal 74 | $SP=(az ad sp create-for-rbac --name http://ArcDemoSP | ConvertFrom-Json) 75 | $SP | Out-String | grep -v password 76 | 77 | # Add Role 78 | az role assignment create --assignee $SP.appId --role "Monitoring Metrics Publisher" --scope subscriptions/$Subscription 79 | 80 | # Create Log Analytics Workspace and retrieve it's credentials 81 | $LAWS=(az monitor log-analytics workspace create -g $RG -n ArcLAWS| ConvertFrom-Json) 82 | $LAWSKEYS=(az monitor log-analytics workspace get-shared-keys -g $RG -n ArcLAWS | ConvertFrom-Json) 83 | 84 | # For Direct connected mode: 85 | # Connect the Kubernetes Cluster to Azure (Arc-enabled Kubernetes) 86 | # Enable the Cluster for Custom Locations 87 | # Deploy Custom Location and DC from Portal 88 | 89 | # In indirect connected mode: 90 | 91 | # Store keys 92 | $Env:SPN_AUTHORITY='https://login.microsoftonline.com' 93 | $Env:WORKSPACE_ID=$LAWS.customerId 94 | $Env:WORKSPACE_SHARED_KEY=$LAWSKEYS.primarySharedKey 95 | $Env:SPN_CLIENT_ID=$SP.appId 96 | $Env:SPN_CLIENT_SECRET=$SP.password 97 | $Env:SPN_TENANT_ID=$SP.tenant 98 | $Env:AZDATA_VERIFY_SSL='no' 99 | 100 | # Export our logs and metrics (and usage) 101 | # az arcdata dc export -t usage --path usage.json -k arc --force --use-k8s 102 | az arcdata dc export -t metrics --path metrics.json -k arc --force --use-k8s 103 | az arcdata dc export -t logs --path logs.json -k arc --force --use-k8s 104 | 105 | # Upload the data to Azure - this should be a scheduled job. 106 | az arcdata dc upload --path metrics.json 107 | az arcdata dc upload --path logs.json 108 | 109 | remove-item *.json 110 | 111 | # Check in portal 112 | Start-Process ("https://portal.azure.com/#@"+ (az account show --query tenantId -o tsv) + "/resource" + (az group show -n $RG --query id -o tsv)) 113 | 114 | # Cleanup when done 115 | kubectl delete namespace arc 116 | az group delete -g $RG --yes 117 | az ad sp delete --id $SP.appId 118 | 119 | # az group delete -g ArcDemoRG --yes 120 | # az vm deallocate -g PS --name PSDemoV3 -------------------------------------------------------------------------------- /Pass Summit 2020/BDC Deployment/01_Windows_Tools.ps1: -------------------------------------------------------------------------------- 1 | [Net.ServicePointManager]::SecurityProtocol = [Net.ServicePointManager]::SecurityProtocol -bor [Net.SecurityProtocolType]::Tls12 2 | Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')) 3 | choco install azure-data-studio -y 4 | choco install azure-cli -y 5 | choco install kubernetes-cli -y 6 | choco install curl -y 7 | choco install putty -y 8 | choco install notepadplusplus -y 9 | choco install 7zip -y 10 | choco install sqlserver-cmdlineutils -y 11 | $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") 12 | curl -o azdata.msi https://aka.ms/azdata-msi 13 | msiexec /i azdata.msi /passive -------------------------------------------------------------------------------- /Pass Summit 2020/BDC Deployment/02_single-node-kubeadm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | export LOG_FILE="kubeadm.log" 4 | export DEBIAN_FRONTEND=noninteractive 5 | export OSCODENAME=$(lsb_release -cs) 6 | KUBE_DPKG_VERSION=1.18.3-00 7 | KUBE_VERSION=1.18.3 8 | TIMEOUT=600 9 | RETRY_INTERVAL=5 10 | export PV_COUNT="80" 11 | { 12 | sudo apt-get update -q 13 | sudo apt --yes install \ 14 | software-properties-common \ 15 | apt-transport-https \ 16 | ca-certificates \ 17 | curl 18 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 19 | sudo add-apt-repository \ 20 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 21 | sudo apt update -q 22 | sudo apt-get install -q --yes docker-ce=18.06.2~ce~3-0~ubuntu --allow-downgrades 23 | sudo apt-mark hold docker-ce 24 | sudo usermod --append --groups docker $USER 25 | rm -f -r setupscript 26 | mkdir -p setupscript 27 | cd setupscript/ 28 | sudo swapoff -a 29 | sudo sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab 30 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 31 | cat < /dev/null 106 | sudo add-apt-repository "$(wget -qO- https://packages.microsoft.com/config/ubuntu/18.04/prod.list)" 107 | sudo apt-get update 108 | sudo apt-get install -y azdata-cli 109 | echo "Done." 110 | }| tee $LOG_FILE -------------------------------------------------------------------------------- /Pass Summit 2020/BDC Deployment/03_PrePull.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | export DOCKER_TAG="2019-CU8-ubuntu-16.04" 4 | IMAGES=( 5 | mssql-app-service-proxy 6 | mssql-control-watchdog 7 | mssql-controller 8 | mssql-dns 9 | mssql-hadoop 10 | mssql-mleap-serving-runtime 11 | mssql-mlserver-py-runtime 12 | mssql-mlserver-r-runtime 13 | mssql-monitor-collectd 14 | mssql-monitor-elasticsearch 15 | mssql-monitor-fluentbit 16 | mssql-monitor-grafana 17 | mssql-monitor-influxdb 18 | mssql-monitor-kibana 19 | mssql-monitor-telegraf 20 | mssql-security-knox 21 | mssql-security-support 22 | mssql-server-controller 23 | mssql-server-data 24 | mssql-ha-operator 25 | mssql-ha-supervisor 26 | mssql-service-proxy 27 | mssql-ssis-app-runtime 28 | ) 29 | 30 | for image in "${IMAGES[@]}"; 31 | do 32 | docker pull mcr.microsoft.com/mssql/bdc/$image:$DOCKER_TAG 33 | echo "Docker image" $image " pulled." 34 | done 35 | echo "Docker images pulled." 36 | -------------------------------------------------------------------------------- /Pass Summit 2020/BDC Deployment/04_AKS Deployment.ps1: -------------------------------------------------------------------------------- 1 | az login 2 | az account set -s 3 | az aks create --name bdcaks --resource-group --generate-ssh-keys --node-vm-size Standard_D8s_v3 --node-count 2 4 | az aks get-credentials --overwrite-existing --name bdcaks --resource-group --admin -------------------------------------------------------------------------------- /Pass Summit 2020/BDC Deployment/Demos.ps1: -------------------------------------------------------------------------------- 1 | Set-Location C:\users\bdc\Desktop 2 | 3 | # Let's start by taking a look at the preconfigured AKS Cluster 4 | kubectl config use-context bdcaks-admin 5 | kubectl cluster-info 6 | kubectl get nodes -o wide 7 | 8 | # What about the kubeadm? 9 | kubectl config use-context kubernetes-admin@kubernetes 10 | kubectl cluster-info 11 | kubectl get nodes -o wide 12 | 13 | # kubectl (just like azdata and ADS!) is cross platform! 14 | ssh bdc@bdclinux 15 | # kubectl get nodes -o wide 16 | 17 | # Usually, we would now join additional nodes to the cluster 18 | # but today we stick to a single node and local storage 19 | # kubeadm token create --print-join-command 20 | # Still: Storage and compute capacity of your k8s cluster are the first thing to consider! 21 | 22 | # exit 23 | 24 | # Let's take a quick look at our deployment scripts 25 | Start-Process https://bookmark.ws/BDCDeploy 26 | 27 | # While we're at it... Install the Data Virt Extension! 28 | 29 | # Just like k8s, BDC can be deployed through code! 30 | azdata bdc config init --source kubeadm-dev-test --path kubeadm-custom -f 31 | 32 | # Let's take a look at control.json and bdc.json 33 | # this is where we modify EVERYTHING (some settings like root containers or AD auth would need to be added) 34 | # Choose wisely - there is no resize! 35 | notepad++.exe .\kubeadm-custom\bdc.json .\kubeadm-custom\control.json 36 | 37 | # Want a wizard? 38 | # One that even creates the k8s for you? 39 | azuredatastudio.cmd 40 | 41 | # How's that progressing? 42 | kubectl config use-context bdcaks-admin 43 | kubectl get namespace 44 | kubectl get pods -n mssql-cluster # --watch 45 | # This will take quite a bit longer without pre-pulled images! 46 | 47 | # While this is running... 48 | # Didn't we say this is cross platform? 49 | # Let's kick of another one on our Linux machine! 50 | # Open a putty session as well so we can watch the progress... 51 | putty 52 | # then ssh into the machine 53 | ssh bdc@bdclinux 54 | 55 | # azdata bdc config init --source kubeadm-dev-test --path kubeadm-custom -f 56 | # vi kubeadm-custom/control.json 57 | # set CU6 58 | # azdata bdc config replace -p kubeadm-custom/control.json -j spec.storage.data.className=local-storage 59 | # azdata bdc config replace -p kubeadm-custom/control.json -j spec.storage.logs.className=local-storage 60 | # azdata bdc create -c kubeadm-custom 61 | # exit 62 | 63 | # Let's connect to our controller (could also be done in ADS...) 64 | kubectl config use-context kubernetes-admin@kubernetes 65 | azdata login --namespace mssql-cluster -u admin 66 | 67 | # What are our endpoints? 68 | azdata bdc endpoint list 69 | azdata bdc endpoint list -o table 70 | 71 | # Which Version are we running? 72 | sqlcmd -S 192.168.1.4,31433 -U admin -Q "SELECT @@VERSION" -P 73 | 74 | # How about an upgrade to CU8... 75 | # We can even do that from here! 76 | azdata bdc upgrade -n mssql-cluster -t 2019-CU8-ubuntu-16.04 -r mcr.microsoft.com/mssql/bdc 77 | # Spoiler: This will take about 30 minutes so we may not get to see the end of it :) 78 | 79 | # Which Version are we running now? 80 | sqlcmd -S 192.168.1.4,31433 -U admin -Q "SELECT @@VERSION" -P -------------------------------------------------------------------------------- /PassCamp 2022/PVC.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: sql-storage 5 | spec: 6 | storageClassName: local-storage 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 72Gi -------------------------------------------------------------------------------- /PassCamp 2022/SQL.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mssql-deployment 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: mssql 12 | template: 13 | metadata: 14 | labels: 15 | app: mssql 16 | spec: 17 | securityContext: 18 | fsGroup: 10001 19 | containers: 20 | - name: mssql 21 | image: 'mcr.microsoft.com/mssql/server:2019-latest' 22 | ports: 23 | - containerPort: 1433 24 | env: 25 | - name: ACCEPT_EULA 26 | value: "Y" 27 | - name: SA_PASSWORD 28 | valueFrom: 29 | secretKeyRef: 30 | name: mssql 31 | key: SA_PASSWORD 32 | volumeMounts: 33 | - name: mssqldb 34 | mountPath: /var/opt/mssql 35 | volumes: 36 | - name: mssqldb 37 | persistentVolumeClaim: 38 | claimName: sql-storage -------------------------------------------------------------------------------- /PassCamp 2022/preparek8s: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Eeuo pipefail 3 | export DEBIAN_FRONTEND=noninteractive 4 | export OSCODENAME=$(lsb_release -cs) 5 | KUBE_DPKG_VERSION=1.24.1-00 6 | KUBE_VERSION=1.24.1 7 | TIMEOUT=600 8 | RETRY_INTERVAL=5 9 | export PV_COUNT="80" 10 | sudo apt-get update -q 11 | sudo apt --yes install \ 12 | software-properties-common \ 13 | apt-transport-https \ 14 | ca-certificates \ 15 | curl 16 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 17 | sudo add-apt-repository \ 18 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 19 | sudo apt update -q 20 | rm -f -r setupscript 21 | mkdir -p setupscript 22 | cd setupscript/ 23 | sudo swapoff -a 24 | sudo sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab 25 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 26 | cat <> ~/.bash_profile 32 | source ~/.bash_profile 33 | 34 | # Try connection 35 | sqlcmd -S 127.0.0.1 -U SA -P P@ssw0rd -Q "SELECT @@Version" 36 | 37 | # ERRORLOG 38 | journalctl | grep sqlserver 39 | 40 | exit 41 | 42 | # We can also connect from our other machine 43 | sqlcmd -S (GetIP($VMName)) -U SA -P P@ssw0rd -Q "SELECT @@Version" 44 | 45 | # Or use another tool... like SSMS 46 | GetIP($VMName) -------------------------------------------------------------------------------- /SQL Server on Linux, Containers and Kubernetes/02 SQL on Containers & Kubernetes.ps1: -------------------------------------------------------------------------------- 1 | # SQL on Containers (Here: Docker on Linux) 2 | ssh $SSHTarget 3 | 4 | sudo apt install docker docker.io -y 5 | sudo docker pull mcr.microsoft.com/mssql/server:2019-latest 6 | 7 | sudo docker run -e "ACCEPT_EULA=Y" -e "SA_PASSWORD=P@ssw0rd" \ 8 | -p 31433:1433 --name sql1 -h sql1 \ 9 | -d mcr.microsoft.com/mssql/server:2019-latest 10 | 11 | sudo docker ps -a 12 | 13 | sqlcmd -S 127.0.0.1,31433 -U SA -P P@ssw0rd -Q "SELECT @@servername" 14 | 15 | # Let's also add a volume to persist our data in another container! 16 | sudo docker run -e "ACCEPT_EULA=Y" -e "SA_PASSWORD=P@ssw0rd" \ 17 | -p 31434:1433 --name sql2 -h sql2 -v sqldata2:/var/opt/mssql \ 18 | -d mcr.microsoft.com/mssql/server:2019-latest 19 | 20 | 21 | sudo docker ps -a 22 | 23 | sqlcmd -S 127.0.0.1,31434 -U SA -P P@ssw0rd -Q "SELECT @@servername" 24 | 25 | sqlcmd -S 127.0.0.1,31434 -U SA -P P@ssw0rd -Q "CREATE DATABASE VB6isTheBest" 26 | 27 | sudo ls -al /var/lib/docker/volumes/sqldata2/_data/data 28 | 29 | exit 30 | 31 | # We can again access this from external 32 | sqlcmd -S (GetIP($VMName)),31433 -U SA -P P@ssw0rd -Q "SELECT @@servername" 33 | sqlcmd -S (GetIP($VMName)),31434 -U SA -P P@ssw0rd -Q "SELECT @@servername" 34 | 35 | cd "C:\Users\bits\Desktop\Code\SQL on Linux, Containers and Kubernetes" 36 | 37 | 38 | 39 | # SQL on Kubernetes 40 | # I have two clusters 41 | kubectl config view -o jsonpath='{range .contexts[*]}{.name}{''\n''}{end}' 42 | 43 | # Let's use the small one... 44 | kubectl config use-context kubeadm-small 45 | kubectl get nodes 46 | 47 | # We'll create a Namespace 48 | kubectl create namespace mssql 49 | kubectl get namespace 50 | kubectl config set-context --current --namespace=mssql 51 | 52 | # It's empty 53 | kubectl get pods 54 | 55 | # Let's define storage first 56 | code PVC.yaml 57 | kubectl apply -f .\PVC.yaml 58 | kubectl get pvc 59 | 60 | # We'll also need an SA Password 61 | $PASSWORD='P@ssw0rd' 62 | kubectl create secret generic mssql --from-literal=SA_PASSWORD=$PASSWORD 63 | 64 | # We can then define our SQL Server 65 | code SQL.yaml 66 | kubectl apply -f SQL.yaml 67 | 68 | # Our storage is now bound 69 | kubectl get pvc 70 | 71 | # And SQL is coming up 72 | kubectl get deployment 73 | kubectl get pod 74 | 75 | # We can also check out the logs of the Pod which is the SQL Log 76 | kubectl logs (kubectl get pods -o jsonpath="{.items[0].metadata.name}" ) 77 | 78 | # But we can't access it yet... 79 | # Instead of YAML we can also use imperative commands 80 | kubectl expose deployment mssql-deployment --target-port=1433 --type=NodePort 81 | 82 | # We now have a service 83 | kubectl get service 84 | 85 | $Endpoint= ("$(GetIP("k8s-small-worker-1")),$(kubectl get service mssql-deployment -o jsonpath='{ .spec.ports[*].nodePort }')") 86 | $Endpoint 87 | sqlcmd -S $Endpoint -U SA -P ($PASSWORD) -Q "SELECT @@VERSION" 88 | 89 | # Let's restore some data 90 | if ([System.IO.File]::Exists("C:\Users\bits\Desktop\Code\SQL on Linux, Containers and Kubernetes\AdventureWorks2019.bak") -eq $false) { 91 | curl.exe -L -o AdventureWorks2019.bak https://github.com/Microsoft/sql-server-samples/releases/download/adventureworks/AdventureWorks2019.bak 92 | } 93 | 94 | dir *.bak 95 | kubectl get pods 96 | $Pod=(kubectl get pods -o jsonpath="{.items[0].metadata.name}" ) 97 | $Pod 98 | kubectl cp AdventureWorks2019.bak "$($Pod):/var/opt/mssql/data/AdventureWorks2019.bak" 99 | 100 | # Our bak is now on the server 101 | kubectl exec -i -t "$($Pod)" -- ls -al /var/opt/mssql/data/AdventureWorks2019.bak 102 | 103 | # And we can restore 104 | sqlcmd -S $Endpoint -U SA -P ($PASSWORD) -Q "RESTORE DATABASE AdventureWorks2019 FROM DISK = N'/var/opt/mssql/data/AdventureWorks2019.bak' WITH MOVE 'AdventureWorks2017' TO '/var/opt/mssql/data/AdventureWorks2019.mdf', MOVE 'AdventureWorks2017_Log' TO '/var/opt/mssql/data/AdventureWorks2019_Log.ldf'" 105 | 106 | # and woohoooo: 107 | sqlcmd -S $Endpoint -U SA -P ($PASSWORD) -Q "SELECT Name FROM sys.databases" 108 | 109 | # Of course, this is also accessible from any client 110 | $Endpoint | Set-Clipboard 111 | azuredatastudio.cmd 112 | 113 | # And we could also just upgrade... 114 | kubectl set image deployment mssql-deployment mssql=mcr.microsoft.com/mssql/server:2019-CU15-ubuntu-20.04 115 | kubectl get pods -w 116 | sqlcmd -S $Endpoint -U SA -P ($PASSWORD) -Q "SELECT @@VERSION" 117 | kubectl logs (kubectl get pods -o jsonpath="{.items[0].metadata.name}" ) 118 | sqlcmd -S $Endpoint -U SA -P ($PASSWORD) -Q "SELECT @@VERSION" 119 | 120 | # And when we're done... 121 | kubectl delete namespace mssql 122 | kubectl config set-context --current --namespace=default -------------------------------------------------------------------------------- /SQL Server on Linux, Containers and Kubernetes/PVC.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: sql-storage 5 | spec: 6 | storageClassName: local-storage 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 72Gi -------------------------------------------------------------------------------- /SQL Server on Linux, Containers and Kubernetes/SQL.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mssql-deployment 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: mssql 12 | template: 13 | metadata: 14 | labels: 15 | app: mssql 16 | spec: 17 | securityContext: 18 | fsGroup: 10001 19 | containers: 20 | - name: mssql 21 | image: 'mcr.microsoft.com/mssql/server:2019-CU14-ubuntu-20.04' 22 | ports: 23 | - containerPort: 1433 24 | env: 25 | - name: ACCEPT_EULA 26 | value: "Y" 27 | - name: SA_PASSWORD 28 | valueFrom: 29 | secretKeyRef: 30 | name: mssql 31 | key: SA_PASSWORD 32 | volumeMounts: 33 | - name: mssqldb 34 | mountPath: /var/opt/mssql 35 | volumes: 36 | - name: mssqldb 37 | persistentVolumeClaim: 38 | claimName: sql-storage -------------------------------------------------------------------------------- /SQLBits_2019/SSIS Performance.sql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLBits_2019/SSIS Performance.sql -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance.bak: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLBits_2019/SSIS_Performance.bak -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/.vs/SSIS_Performance/v14/.suo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLBits_2019/SSIS_Performance/.vs/SSIS_Performance/v14/.suo -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/.vs/SSIS_Performance/v15/.suo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLBits_2019/SSIS_Performance/.vs/SSIS_Performance/v15/.suo -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 14 4 | VisualStudioVersion = 14.0.25420.1 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{159641D6-6404-4A2A-AE62-294DE0FE8301}") = "SSIS_Performance", "SSIS_Performance\SSIS_Performance.dtproj", "{F43C6818-FBF3-4ABC-A932-63CE533E5AEE}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Development|Default = Development|Default 11 | EndGlobalSection 12 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 13 | {F43C6818-FBF3-4ABC-A932-63CE533E5AEE}.Development|Default.ActiveCfg = Development 14 | {F43C6818-FBF3-4ABC-A932-63CE533E5AEE}.Development|Default.Build.0 = Development 15 | EndGlobalSection 16 | GlobalSection(SolutionProperties) = preSolution 17 | HideSolutionNode = FALSE 18 | EndGlobalSection 19 | EndGlobal 20 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/00-00_WarmUp.biml: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |
23 |
24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | TRUNCATE TABLE ProductPhoto 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | Select top 25000 a.* from Production.ProductPhoto a 40 | CROSS JOIN [Person].[Address] 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | TRUNCATE TABLE ProductPhoto 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | Select top 25000 a.* from Production.ProductPhoto a 62 | CROSS JOIN [Person].[Address] 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 |
73 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-01_Environment.biml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-02_BuildMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" tier="2"#> 2 | 3 | 4 | <# dim tablelist as new list (of string) 5 | dim tablelist_checked as new list (of string) 6 | ' tablelist.add ("FactInternetSales") 7 | dim importResult as Importresults = rootnode.connections("AW_DW").GetDatabaseSchema() 8 | do while importresult.tablenodes.where(function(e) tablelist.Contains(e.name) and not tablelist_checked.Contains(e.name)).where(function(r) r.columns.OfType(Of AstTableColumnTableReferenceNode)().Any).Count > 0 9 | for each t as asttablenode in importresult.tablenodes.where(function(e) tablelist.Contains(e.name) and not tablelist_checked.Contains(e.name)).where(function(r) r.columns.OfType(Of AstTableColumnTableReferenceNode)().Any) 10 | for each r as asttablecolumntablereferencenode in t.columns.ofType(of AstTableColumnTableReferenceNode)() 11 | tablelist.add(r.foreigntable.name) 12 | next 13 | tablelist_checked.add(t.name) 14 | next 15 | loop 16 | importresult = rootnode.connections("AW_DW").GetDatabaseSchema(nothing,tablelist,ImportOptions.ExcludeViews or ImportOptions.ExcludeIndex) 17 | for each t as asttablenode in importresult.tablenodes 18 | t.schema = rootnode.schemas(0)#> 19 | <#= t.getbiml #> 20 | <# next #> 21 | 22 | 23 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-03_CreateTables.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="3" language="VB"#> 2 | <#@ code file="01-TopologySort.vb" #> 3 | <#@ import namespace="TopologySort" #> 4 | 5 | 6 | 7 | 8 | 9 | 10 | <# for each table as asttablenode in RootNode.Tables.TopoSort.reverse #> 11 | IF EXISTS (SELECT * from sys.objects WHERE object_id = OBJECT_ID(N'[dbo].[<#= table.name #>]') AND type IN (N'U')) 12 | DROP TABLE [dbo].[<#= table.name #>] 13 | GO 14 | <# next #> 15 | 16 | 17 | 18 | 19 | <# for each table as asttablenode in RootNode.Tables.TopoSort #> 20 | <#= table.GetDropAndCreateDDL() #> 21 | <# next #> 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-04a_Load_Tables_Linear.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB"#> 2 | <#@ code file="01-TopologySort.vb" #> 3 | <#@ import namespace="TopologySort" #> 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | <# for each t as asttablenode in rootnode.tables.TopoSort #> 13 | <#= CallBimlScript("01-06_Dataflow.biml",t,nothing) #> 14 | <# next #> 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-04b_Load_Tables_Parallel.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB"#> 2 | <#@ code file="01-TopologySort.vb" #> 3 | <#@ import namespace="TopologySort" #> 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | <# for each t as asttablenode in rootnode.tables.where(function(r) r.columns.OfType(Of AstTableColumnTableReferenceNode)().Count+r.columns.OfType(Of AstMultipleColumnTableReferenceNode)().Count = 0) #> 15 | 16 | 17 | <#= CallBimlScript("01-05_Recursion.biml",t,LoadedTables) #> 18 | 19 | 20 | <# next #> 21 | 22 | 23 | <# do while rootnode.tables.where(function(e) not loadedtables.contains(e.name) and not e.columns.OfType(Of AstMultipleColumnTableReferenceNode).Where(function(c) c.foreigntable.name <> e.name and not loadedtables.contains(c.foreigntable.name) ).count > 0 and not e.columns.OfType(Of AstTableColumnTableReferenceNode).Where(function(m) m.foreigntable.name <> e.name and not loadedtables.contains(m.foreigntable.name) ).count > 0).Any and level < 10 24 | Level += 1 25 | loadabletables = rootnode.tables.where(function(e) not loadedtables.contains(e.name) and not e.columns.OfType(Of AstMultipleColumnTableReferenceNode).Where(function(c) c.foreigntable.name <> e.name and not loadedtables.contains(c.foreigntable.name) ).count > 0 and not e.columns.OfType(Of AstTableColumnTableReferenceNode).Where(function(m) m.foreigntable.name <> e.name and not loadedtables.contains(m.foreigntable.name) ).count > 0).ToList #> 26 | 27 | 28 | <# for each tbl as asttablenode in loadabletables #> 29 | <#= CallBimlScript("01-06_Dataflow.biml",tbl,loadedtables) #> 30 | <# next #> 31 | 32 | 33 | <# loop 34 | if loadedtables.count < rootnode.tables.count then #> 35 | 36 | 37 | <# for each t as asttablenode in rootnode.tables.TopoSort.where(function(r) not loadedtables.contains(r.name)) #> 38 | <#= CallBimlScript("01-06_Dataflow.biml",t,LoadedTables) #> 39 | <# next #> 40 | 41 | 42 | <# end if #> 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-05_Recursion.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" designerbimlpath="Biml/Packages/Package/Tasks" #> 2 | <#@ property name="tbl" type="AstTableNode" #> 3 | <#@ property name="LoadedTables" type="List (of String)" #> 4 | <#= CallBimlScript("01-06_Dataflow.biml",tbl,LoadedTables) #> 5 | <# for each t as asttablenode in rootnode.tables.where(function(r) r.columns.OfType(Of AstTableColumnTableReferenceNode)().Count+r.columns.OfType(Of AstMultipleColumnTableReferenceNode)().Count =1).where(function(e) e.columns.OfType(Of AstTableColumnTableReferenceNode)().first.foreigntable.name = tbl.name ) #> 6 | <#= CallBimlScript("01-05_Recursion.biml",t,LoadedTables) #> 7 | <#next #> -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-06_Dataflow.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" designerbimlpath="Biml/Packages/Package/Tasks" #> 2 | <#@ property name="tbl" type="AstTableNode" #> 3 | <#@ property name="LoadedTables" type="List (of String)" #> 4 | <# if not loadedtables is nothing then LoadedTables.add (tbl.name) #> 5 | 6 | 7 | 8 | Select <#= tbl.GetColumnList() #> from <#=tbl.SchemaQualifiedName#> 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/01-TopologySort.vb: -------------------------------------------------------------------------------- 1 | Imports System.Collections.Generic 2 | Imports System.Linq 3 | Imports System.Linq.Expressions 4 | Imports Varigence.Languages.Biml.Table 5 | Imports System.Runtime.CompilerServices 6 | 7 | Module TopologySort 8 | Public LoadedTables As New List(Of String) 9 | Public LoadableTables As New List(Of AstTableNode) 10 | Public Level As Integer = 0 11 | 12 | Public Function TopoSort(tables As ICollection(Of AstTableNode)) As ICollection(Of AstTableNode) 13 | Dim visitedList As New List(Of AstTableNode) 14 | Dim outputList As New List(Of AstTableNode) 15 | For Each tbl As asttablenode In tables 16 | TopoVisit(tbl, outputList, visitedList) 17 | Next 18 | Return outputList 19 | End Function 20 | 21 | Private Function TopoVisit(node As AstTableNode, outputList As List(Of AstTableNode), visitedList As List(Of AstTableNode)) 22 | If Not visitedList.Contains(node) Then 23 | visitedList.Add(node) 24 | For Each dependentTable As AstTableNode In node.Columns.OfType(Of AstTableColumnTableReferenceNode).Select(Function(c) c.ForeignTable) 25 | TopoVisit(dependentTable, outputList, visitedList) 26 | Next 27 | For Each dependentTable As AstTableNode In node.Columns.OfType(Of AstMultipleColumnTableReferenceNode).Select(Function(c) c.ForeignTable) 28 | TopoVisit(dependentTable, outputList, visitedList) 29 | Next 30 | outputList.Add(node) 31 | End If 32 | End Function 33 | End Module 34 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-01_Environment.biml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-02_BuildTSTMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" tier="2" optionexplicit="False" #> 2 | <#@ code file="02-BimlFunctions.vb" #> 3 | 4 | 5 | <# dim SRC = RootNode.Connections("Source") 6 | dim importResult = SRC.GetDatabaseSchema(nothing,BF.GetNonEmptyList(RootNode.Connections("Target"), _ 7 | "select distinct tablename from meta.tables"), BF.DefaultImportOptions) 8 | for each table as asttablenode in importResult.TableNodes #> 9 | 10 | <#= table.columns.getbiml #> 11 | 12 | 13 |
14 | <# next #> 15 |
16 |
-------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-03_BuildProdMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" tier="3" optionexplicit="False" #> 2 | 3 | 4 | <# for each tbl in Rootnode.Tables #> 5 | 6 | <#= tbl.columns.getbiml #> 7 | 8 |
9 | <# 10 | next #> 11 |
12 |
-------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-04_CreateTables.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB" #> 2 | 3 | 4 | 5 | 6 | <# for each table as asttablenode in RootNode.Tables #> 7 | 8 | 9 | <#= table.GetDropAndCreateDDL#> 10 | 11 | 12 | <# next #> 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-05_CreateEvaluation.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB" optionexplicit="False" #> 2 | <#@ code file="02-BimlFunctions.vb" #> 3 | <# EvaluateTries = 2 #> 4 | 5 | 6 | 7 | 8 | 9 | 10 | <# for each row in BF.GetDT(RootNode.Connections("Target"),"select * from meta.tables where active = 1").Rows #> 11 | - <#= Row("TableName")#> - <#= Row("Pattern") #>" ConstraintMode="Linear"> 12 | 13 | <#= CallBimlScript("02-07_Dataflow.biml","PROD",row("Tablename"),row("Pattern"),row("Parameters")) #> 14 | 15 | 16 | <# next #> 17 | 18 | 19 | <# for each row in BF.GetDT(RootNode.Connections("Target"),"meta.tables").Rows #> 20 | - <#= Row("TableName")#> - <#= Row("Pattern") #>" ConstraintMode="Linear"> 21 | 22 | <# for Tries = 1 to EvaluateTries #> 23 | 24 | 25 | 26 | 27 | DELETE FROM Meta.Performancelog where patternid = <#= row("RECID") #> and EndTime is null 28 | GO 29 | insert into Meta.Performancelog (Patternid,StartTime) 30 | select <#= row("RECID") #>,GetDate() 31 | 32 | 33 | <#= CallBimlScript("02-07_Dataflow.biml","TST",row("Tablename"),row("Pattern"),row("Parameters")) #> 34 | 35 | 36 | 37 | update Meta.Performancelog set EndTime = Getdate() where patternid = <#= row("RECID") #> and EndTime is null 38 | 39 | 40 | 41 | 42 | 43 | <# next #> 44 | 45 | 46 | <# next #> 47 | 48 | 49 | EXEC SP_Evaluate <#= EvaluateTries #> 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-06_CreateLoad.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB" optionexplicit="False" #> 2 | <#@ code file="02-BimlFunctions.vb" #> 3 | 4 | 5 | 6 | 7 | 8 | <# for each row in BF.GetDT(RootNode.Connections("Target"),"select * from meta.tables where active = 1").Rows #> 9 | - <#= Row("TableName")#> - <#= Row("Pattern") #>" ConstraintMode="Linear"> 10 | 11 | <#= CallBimlScript("02-07_Dataflow.biml","PROD",row("Tablename"),row("Pattern"),row("Parameters")) #> 12 | 13 | 14 | <# next #> 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-07_Dataflow.biml: -------------------------------------------------------------------------------- 1 | <#@ template language="VB" designerbimlpath="Biml/Packages/Package/Tasks" #> 2 | <#@ property name="SCHEMA" type="String" #> 3 | <#@ property name="TableName" type="String" #> 4 | <#@ property name="Pattern" type="String" #> 5 | <#@ property name="Parameters" type="String" #> 6 | 7 | 8 | <# if Pattern = "DELETE" then#> 9 | DELETE FROM <#= schema #>.<#= tablename#> 10 | <# if parameters <> "" then #> 11 | WHERE <#= parameters #> 12 | <# end if#> 13 | <# else #> 14 | TRUNCATE TABLE <#= schema #>.<#= tablename#> 15 | <# end if #> 16 | 17 | 18 | 19 | 20 | 21 | Select * from <#= tablename#> <# if parameters <> "" then #> 22 | WHERE <#= parameters #> 23 | <# end if#> 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/02-BimlFunctions.vb: -------------------------------------------------------------------------------- 1 | Imports Varigence.Biml.CoreLowerer.SchemaManagement 2 | Imports Varigence.Biml.Extensions 3 | Imports Varigence.Languages.Biml 4 | Imports Varigence.Languages.Biml.Connection 5 | Imports System.Data 6 | Imports System.Collections.Generic 7 | Public Class BF 8 | Public Shared Function GetNonEmptyList(Conn As AstDbConnectionNode, SQL As String) As List(Of String) 9 | Dim tmplist As New List(Of String) 10 | If SQL.Contains(" ") = 0 Then SQL = "select * from " + SQL 11 | Dim DT As DataTable = ExternalDataAccess.GetDataTable(Conn.ConnectionString, SQL) 12 | For Each dr As datarow In DT.rows 13 | tmplist.Add(dr.item(0).ToString()) 14 | Next 15 | If tmplist.Count = 0 Then tmplist.Add("NONEMPTYFILLER") 16 | Return tmplist 17 | End Function 18 | Public Shared Function GetDT(Conn As AstDbConnectionNode, SQL As String) As DataTable 19 | If SQL.Contains(" ") = 0 Then SQL = "select * from " + SQL 20 | Return ExternalDataAccess.GetDataTable(Conn.ConnectionString, SQL) 21 | End Function 22 | Public Shared Function DefaultImportOptions() 23 | Return ImportOptions.ExcludeIdentity Or ImportOptions.ExcludePrimaryKey Or ImportOptions.ExcludeUniqueKey Or ImportOptions.ExcludeColumnDefault _ 24 | Or ImportOptions.ExcludeIndex Or ImportOptions.ExcludeCheckConstraint Or ImportOptions.ExcludeForeignKey 25 | End Function 26 | End Class 27 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/03-01_CreateLoad.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="4" language="VB" optionexplicit="False" #> 2 | <#@ code file="02-BimlFunctions.vb" #> 3 | <#@ import namespace="System.Data.SqlClient" #> 4 | <# MaxContainers = 8 5 | Dim conn as new SqlConnection("Data source=localhost; Database=BimlDemo_SSIS_Performance; Integrated Security=SSPI") 6 | Dim cmd = new SqlCommand("exec [dbo].[SP_SetContainers] " & maxcontainers, conn) 7 | conn.Open() 8 | cmd.ExecuteNonQuery() 9 | conn.Close() #> 10 | 11 | 12 | 13 | 14 | <# for Container = 1 to MaxContainers 15 | if BF.GetDT(RootNode.Connections("Target"),"select * from meta.tables where active = 1 and container = " & container).Rows.Count > 0 then #> 16 | 17 | <# for each row in BF.GetDT(RootNode.Connections("Target"),"select * from meta.tables where active = 1 and container = " & container).Rows #> 18 | - <#= Row("TableName")#> - <#= Row("Pattern") #>" ConstraintMode="Linear"> 19 | 20 | <#= CallBimlScript("02-07_Dataflow.biml","PROD",row("Tablename"),row("Pattern"),row("Parameters")) #> 21 | 22 | 23 | <# next #> 24 | 25 | <# end if 26 | next #> 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/Project.params: -------------------------------------------------------------------------------- 1 |  2 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/SSIS_Performance.database: -------------------------------------------------------------------------------- 1 |  2 | SSIS_Performance 3 | SSIS_Performance 4 | 0001-01-01T00:00:00Z 5 | 0001-01-01T00:00:00Z 6 | 0001-01-01T00:00:00Z 7 | Unprocessed 8 | 0001-01-01T00:00:00Z 9 | 10 | Default 11 | Unchanged 12 | 13 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/SSIS_Performance.dtproj.user: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Development 6 | 7 | 8 | 9 | false 10 | true 11 | 12 | 13 | LastModifiedTime 14 | LastModifiedTime 15 | 2017-08-25T10:35:44.141895Z 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/bin/Development/SSIS_Performance.ispac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLBits_2019/SSIS_Performance/SSIS_Performance/bin/Development/SSIS_Performance.ispac -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/obj/Development/BuildLog.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | SSIS_Performance 5 | 2019-03-01T12:25:23.7187378Z 6 | EncryptSensitiveWithUserKey 7 | 8 | 9 | 10 | 00_Warmup.dtsx 11 | 2019-03-01T10:33:37.1268414Z 12 | EncryptSensitiveWithUserKey 13 | 14 | 15 | 01-01_Create.dtsx 16 | 2019-03-01T12:05:36.1272663Z 17 | EncryptSensitiveWithUserKey 18 | 19 | 20 | 01-02_Load_Linear.dtsx 21 | 2019-03-01T12:05:36.1502661Z 22 | EncryptSensitiveWithUserKey 23 | 24 | 25 | 01-03_Load_Parallel.dtsx 26 | 2019-03-01T12:05:36.1702319Z 27 | EncryptSensitiveWithUserKey 28 | 29 | 30 | 02-01_CreateStaging.dtsx 31 | 2019-03-01T12:25:18.9841385Z 32 | EncryptSensitiveWithUserKey 33 | 34 | 35 | 02-02_Evaluate.dtsx 36 | 2019-03-01T12:25:23.675723Z 37 | EncryptSensitiveWithUserKey 38 | 39 | 40 | 02-03_Load.dtsx 41 | 2019-03-01T12:18:15.2734917Z 42 | EncryptSensitiveWithUserKey 43 | 44 | 45 | 03-01_Load_Containers.dtsx 46 | 2019-03-01T12:24:21.6322666Z 47 | EncryptSensitiveWithUserKey 48 | 49 | 50 | -------------------------------------------------------------------------------- /SQLBits_2019/SSIS_Performance/SSIS_Performance/obj/Development/Project.params: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /SQLSatRheinlandPrecon/Slides.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SQLSatRheinlandPrecon/Slides.pdf -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/.vs/SSIS_Live_Master/v15/.suo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SSIS and ADF with Biml/SSIS_Live_Master/.vs/SSIS_Live_Master/v15/.suo -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 15 4 | VisualStudioVersion = 15.0.27130.2036 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{159641D6-6404-4A2A-AE62-294DE0FE8301}") = "SSIS_Live_Master", "SSIS_Live_Master\SSIS_Live_Master.dtproj", "{217DEFEE-F1BE-4C2E-9368-8D91BFF990FE}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Development|Default = Development|Default 11 | EndGlobalSection 12 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 13 | {217DEFEE-F1BE-4C2E-9368-8D91BFF990FE}.Development|Default.ActiveCfg = Development 14 | {217DEFEE-F1BE-4C2E-9368-8D91BFF990FE}.Development|Default.Build.0 = Development 15 | EndGlobalSection 16 | GlobalSection(SolutionProperties) = preSolution 17 | HideSolutionNode = FALSE 18 | EndGlobalSection 19 | GlobalSection(ExtensibilityGlobals) = postSolution 20 | SolutionGuid = {1CF29AD9-3D79-488B-B8A9-EBEE3B4B69AA} 21 | EndGlobalSection 22 | EndGlobal 23 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/A-01-Environment.biml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/A-02-TableMeta.biml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 |
10 |
-------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/A-03-Create_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="20" language="VB" optionexplicit="False" #> 2 | 3 | 4 | 5 | 6 | <# for each tbl in RootNode.Tables #> 7 | 8 | 9 | <#=tbl.GetDropAndCreateDdl()#> 10 | 11 | 12 | <# next #> 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/A-03-Create_Staging_CS.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="20" #> 2 | 3 | 4 | 5 | 6 | <# foreach (var tbl in RootNode.Tables) { #> 7 | 8 | 9 | <#=tbl.GetDropAndCreateDdl()#> 10 | 11 | <# } #> 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/B-01-Environment.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-01-Environment.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/B-02-TableMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="10" language="VB" optionexplicit="False" #> 2 | 3 | 4 | <# dim AW = RootNode.Connections("Source") 5 | dim importResult as Importresults = AW.GetDatabaseSchema(nothing,nothing,ImportOptions.ExcludeForeignKey or ImportOptions.ExcludeIdentity) 6 | for each tbl in importResult.TableNodes #> 7 | 8 | 9 | <#= tbl.columns.getbiml #> 10 | 11 |
12 | <# next #> 13 |
14 |
-------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/B-03-Create_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-03-Create_Staging.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/C-01-Environment.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-01-Environment.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/C-02-TableMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="10" language="VB" optionexplicit="False" #> 2 | 3 | 4 | <# dim AW = RootNode.Connections("Source") 5 | Dim Tables as new List(of String) 6 | Tables.add ("PersonPhone") 7 | dim importResult as Importresults = AW.GetDatabaseSchema(nothing,Tables,ImportOptions.ExcludeForeignKey or ImportOptions.ExcludeIdentity) 8 | for each tbl in importResult.TableNodes #> 9 | 10 | 11 | <#= tbl.columns.getbiml #> 12 | 13 | 14 | 15 | <#=tbl.SchemaQualifiedName #> 16 | 17 | 18 |
19 | <# next #> 20 |
21 |
-------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/C-03-Create_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-03-Create_Staging.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/C-04-Populate_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="30" language="VB" optionexplicit="False" #> 2 | 3 | 4 | 5 | 6 | <# for each tbl in RootNode.Tables #> 7 | 8 | 9 | 10 | truncate table <#=tbl.ScopedName#> 11 | 12 | 13 | 14 | 15 | SELECT * FROM <#=tbl.GetTag("SchemaQualifiedName")#> 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | <# next #> 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/D-01-Environment.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-01-Environment.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/D-02-TableMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="10" language="VB" optionexplicit="False" #> 2 | 3 | 4 | <# Dim Tables as new List(of String) 5 | MyMetadata = ExternalDataAccess.GetDataTable(RootNode.Connections("Target").RenderedConnectionString, "Select * from [MyBimlMeta_Tables]") 6 | For Each dr In MyMetadata.rows 7 | Tables.add (dr("TableName")) 8 | Next 9 | for each tbl in RootNode.Connections("Source").GetDatabaseSchema(nothing,Tables,ImportOptions.ExcludeForeignKey or ImportOptions.ExcludeIdentity).TableNodes #> 10 | 11 | 12 | <#= tbl.columns.getbiml #> 13 | 14 | 15 | 16 | <#=tbl.SchemaQualifiedName #> 17 | 18 | 19 |
20 | <# next #> 21 |
22 |
-------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/D-03-Create_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-03-Create_Staging.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/D-04-Populate_Staging.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="C-04-Populate_Staging.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-01-Environment.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="A-01-Environment.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-02-TableMeta.biml: -------------------------------------------------------------------------------- 1 | <#@ include file="D-02-TableMeta.biml" #> -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-03a-Adf-Preview.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="20" #> 2 | <# foreach (var table in RootNode.Tables) { #> 3 | 4 | <#=CallBimlScript("E-04a-OnPrem_DataSets.biml", table) #> 5 | 6 | <#=CallBimlScript("E-04b-Blob_DataSets.biml", table) #> 7 | 8 | <#=CallBimlScript("E-04c-SqlAzure_DataSets.biml", table) #> 9 | 10 | 11 | <# } #> 12 | 13 | <#= CallBimlScript("E-05-Pipeline.biml", RootNode.Tables) #> 14 | 15 | 16 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-03b-Adf-Writer.biml: -------------------------------------------------------------------------------- 1 | <#@ template tier="20" #> 2 | <# if (!IsBackgroundCompilation) { 3 | // Not included: Gateways, Linked Services etc. 4 | #> 5 | 6 | <# foreach (var table in RootNode.Tables) { #> 7 | 8 | <# System.IO.File.WriteAllText(@"C:\Temp\BimlInTheCloud\ADF\OnPrem_" + table.Schema.Name + "_" + table.Name + ".json", CallBimlScript("E-04a-OnPrem_DataSets.biml", table)); #> 9 | <# System.IO.File.WriteAllText(@"C:\Temp\BimlInTheCloud\ADF\Blob_" + table.Schema.Name + "_" + table.Name + ".json", CallBimlScript("E-04b-Blob_DataSets.biml", table)); #> 10 | <# System.IO.File.WriteAllText(@"C:\Temp\BimlInTheCloud\ADF\SqlAzure_" + table.Schema.Name + "_" + table.Name + ".json", CallBimlScript("E-04c-SqlAzure_DataSets.biml", table)); #> 11 | 12 | <# } #> 13 | 14 | <# System.IO.File.WriteAllText(@"C:\Temp\BimlInTheCloud\ADF\_Pipeline.json", CallBimlScript("E-05-Pipeline.biml", RootNode.Tables)); #> 15 | <# } #> 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-04a-OnPrem_DataSets.biml: -------------------------------------------------------------------------------- 1 | <#@ property name="table" type="AstTableNode" #> 2 | { 3 | "name": "OnPremisesSqlServer_<#=table.Schema.Name#>_<#=table.Name#>_Dataset", 4 | "properties": { 5 | "type": "SqlServerTable", 6 | "linkedServiceName": "OnPremisesSqlServer_LinkedService", 7 | "structure": [ 8 | <# var isFirst = true; foreach (var column in table.Columns) { #> 9 | <#=isFirst ? "" : ","#>{ "<#=column.Name#>", "<#=column.DataType#>" } 10 | <# isFirst = false; } #> 11 | ], 12 | "typeProperties": { 13 | "tableName": "<#=table.SchemaQualifiedName#>" 14 | }, 15 | "availability": { 16 | "frequency": "Day", 17 | "interval": "1" 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-04b-Blob_DataSets.biml: -------------------------------------------------------------------------------- 1 | <#@ property name="table" type="AstTableNode" #> 2 | { 3 | "name": "Blob_<#=table.Schema.Name#>_<#=table.Name#>_Dataset", 4 | "properties": 5 | { 6 | "location": 7 | { 8 | "type": "AzureBlobLocation", 9 | "folderPath": "containername", 10 | "format": 11 | { 12 | "type": "TextFormat", 13 | "columnDelimiter": "\t" 14 | }, 15 | "linkedServiceName": "Blob_LinkedService" 16 | }, 17 | "availability": 18 | { 19 | "frequency": "Day", 20 | "interval": 1 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-04c-SqlAzure_DataSets.biml: -------------------------------------------------------------------------------- 1 | <#@ property name="table" type="AstTableNode" #> 2 | { 3 | "name": "SqlAzure_<#=table.Schema.Name#>_<#=table.Name#>_Dataset", 4 | "properties": { 5 | "structure": [ 6 | <# var isFirst = true; foreach (var column in table.Columns) { #> 7 | <#=isFirst ? "" : ","#>{ "<#=column.Name#>", "<#=column.DataType#>" } 8 | <# isFirst = false; } #> 9 | ], 10 | "location": 11 | { 12 | "type": "AzureSqlTableLocation", 13 | "tableName": "<#=table.SchemaQualifiedName#>", 14 | "linkedServiceName": "SqlAzure_LinkedService" 15 | }, 16 | "availability": { 17 | "frequency": "Day", 18 | "interval": "1" 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/E-05-Pipeline.biml: -------------------------------------------------------------------------------- 1 | <#@ property name="tables" type="IEnumerable" #> 2 | { 3 | "name": "CopyDataToTheCloud", 4 | "properties": { 5 | "description": "CopyDataToTheCloud", 6 | "activities": [ 7 | <# var isFirst = true; foreach (var table in tables) { #> 8 | <#=isFirst ? "" : ","#> 9 | { 10 | "name": "CopyFromSQLtoBlob_<#=table.Schema.Name#>_<#=table.Name#>", 11 | "description": "Copy data from on-premise SQL server to blob", 12 | "type": "CopyActivity", 13 | "inputs": [ {"name": "OnPremisesSqlServer_<#=table.Schema.Name#>_<#=table.Name#>_Dataset"} ], 14 | "outputs": [ {"name": "Blob_<#=table.Schema.Name#>_<#=table.Name#>_Dataset"} ], 15 | "transformation": 16 | { 17 | "source": 18 | { 19 | "type": "SqlSource", 20 | "sqlReaderQuery": "select * from <#=table.SchemaQualifiedName#>" 21 | }, 22 | "sink": 23 | { 24 | "type": "BlobSink" 25 | } 26 | }, 27 | "Policy": 28 | { 29 | "concurrency": 3, 30 | "executionPriorityOrder": "NewestFirst", 31 | "style": "StartOfInterval", 32 | "retry": 0, 33 | "timeout": "01:00:00" 34 | } 35 | 36 | }, 37 | 38 | { 39 | "name": "CopyFromBlobtoSQLAzure", 40 | "description": "Push data to Sql Azure", 41 | "type": "CopyActivity", 42 | "inputs": [ {"name": "Blob_<#=table.Schema.Name#>_<#=table.Name#>_Dataset"} ], 43 | "outputs": [ {"name": "SqlAzure_<#=table.Schema.Name#>_<#=table.Name#>_Dataset"} ], 44 | "transformation": 45 | { 46 | "source": 47 | { 48 | "type": "BlobSource" 49 | }, 50 | "sink": 51 | { 52 | "type": "SqlSink", 53 | "WriteBatchTimeout": "00:5:00", 54 | } 55 | }, 56 | "Policy": 57 | { 58 | "concurrency": 3, 59 | "executionPriorityOrder": "NewestFirst", 60 | "style": "StartOfInterval", 61 | "retry": 2, 62 | "timeout": "02:00:00" 63 | } 64 | } 65 | <# isFirst = false; } #> 66 | 67 | 68 | ], 69 | "start": "2014-05-01T00:00:00Z", 70 | "end": "2014-05-05T00:00:00Z" 71 | } 72 | } -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/Project.params: -------------------------------------------------------------------------------- 1 |  2 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/SSIS_Live_Master.database: -------------------------------------------------------------------------------- 1 |  2 | SSIS_Live_Master 3 | SSIS_Live_Master 4 | 0001-01-01T00:00:00Z 5 | 0001-01-01T00:00:00Z 6 | 0001-01-01T00:00:00Z 7 | Unprocessed 8 | 0001-01-01T00:00:00Z 9 | 10 | Default 11 | Unchanged 12 | 13 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/SSIS_Live_Master.dtproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | Project 4 | 14.0.3002.107 5 | 9.0.1.0 6 | $base64$PFNvdXJjZUNvbnRyb2xJbmZvIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zOmRkbDI9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDAzL2VuZ2luZS8yIiB4bWxuczpkZGwyXzI9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDAzL2VuZ2luZS8yLzIiIHhtbG5zOmRkbDEwMF8xMDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDA4L2VuZ2luZS8xMDAvMTAwIiB4bWxuczpkZGwyMDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEwL2VuZ2luZS8yMDAiIHhtbG5zOmRkbDIwMF8yMDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEwL2VuZ2luZS8yMDAvMjAwIiB4bWxuczpkZGwzMDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDExL2VuZ2luZS8zMDAiIHhtbG5zOmRkbDMwMF8zMDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDExL2VuZ2luZS8zMDAvMzAwIiB4bWxuczpkZGw0MDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEyL2VuZ2luZS80MDAiIHhtbG5zOmRkbDQwMF80MDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEyL2VuZ2luZS80MDAvNDAwIiB4bWxuczpkZGw1MDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEzL2VuZ2luZS81MDAiIHhtbG5zOmRkbDUwMF81MDA9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vYW5hbHlzaXNzZXJ2aWNlcy8yMDEzL2VuZ2luZS81MDAvNTAwIiB4bWxuczpkd2Q9Imh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vRGF0YVdhcmVob3VzZS9EZXNpZ25lci8xLjAiPg0KICA8RW5hYmxlZD5mYWxzZTwvRW5hYmxlZD4NCiAgPFByb2plY3ROYW1lPjwvUHJvamVjdE5hbWU+DQogIDxBdXhQYXRoPjwvQXV4UGF0aD4NCiAgPExvY2FsUGF0aD48L0xvY2FsUGF0aD4NCiAgPFByb3ZpZGVyPjwvUHJvdmlkZXI+DQo8L1NvdXJjZUNvbnRyb2xJbmZvPg== 7 | 8 | SSIS_Live_Master.database 9 | SSIS_Live_Master.database 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | {4e296f92-773e-4e1f-9df8-9f3c5c469ca4} 18 | SSIS_Live_Master 19 | 0 20 | 0 21 | 0 22 | 23 | 24 | 2018-06-05T15:11:23.7593458+02:00 25 | SOLISYON\b.weissman 26 | NB-BWDEMO 27 | 28 | 29 | AQAAANCMnd8BFdERjHoAwE/Cl+sBAAAAS8+BSQans0aYTUY++AF3QgAAAAACAAAAAAAQZgAAAAEAACAAAADh1j6Hda2x7YTPnig3329wL//jLdpgoxRFqUOsa3brdQAAAAAOgAAAAAIAACAAAAC4aMQ2rMc8R32TiSHR+UPEac2/xJNyo72hJ2O6cc9kvpAAAAA2EeQOJJMTNG+FnZEoTdvHiHD4uPzkTHUKCPx3dU4w4U44rv/fA6jvMvO1nnoIlxqB4PeX5uE2PpvyzHu3YDVsTz9ZMnwtBjI1V6P6dgJCOQb8yeu0fsLZ5ZL4XSR0Z++OfEHQbg64cfY4kIZPRboYSpUXwpqfPAdD7/JlOlRy7Ou5Pl0b0XOdaD99TEPhRPxAAAAAryriMNnpmGM3bPoUOjRpHSEzGs/ldlfCuPo8ThSf4V3K8hI8UoY9/1cyfPOIGwJ/es2Edf5WsQRGph1VdfyjQw== 30 | 1 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | A-01-Environment.biml 45 | A-01-Environment.biml 46 | 47 | 48 | A-02-TableMeta.biml 49 | A-02-TableMeta.biml 50 | 51 | 52 | A-03-Create_Staging.biml 53 | A-03-Create_Staging.biml 54 | 55 | 56 | A-03-Create_Staging_CS.biml 57 | A-03-Create_Staging_CS.biml 58 | 59 | 60 | B-01-Environment.biml 61 | B-01-Environment.biml 62 | 63 | 64 | B-02-TableMeta.biml 65 | B-02-TableMeta.biml 66 | 67 | 68 | B-03-Create_Staging.biml 69 | B-03-Create_Staging.biml 70 | 71 | 72 | C-01-Environment.biml 73 | C-01-Environment.biml 74 | 75 | 76 | C-02-TableMeta.biml 77 | C-02-TableMeta.biml 78 | 79 | 80 | C-03-Create_Staging.biml 81 | C-03-Create_Staging.biml 82 | 83 | 84 | C-04-Populate_Staging.biml 85 | C-04-Populate_Staging.biml 86 | 87 | 88 | D-01-Environment.biml 89 | D-01-Environment.biml 90 | 91 | 92 | D-02-TableMeta.biml 93 | D-02-TableMeta.biml 94 | 95 | 96 | D-03-Create_Staging.biml 97 | D-03-Create_Staging.biml 98 | 99 | 100 | D-04-Populate_Staging.biml 101 | D-04-Populate_Staging.biml 102 | 103 | 104 | E-01-Environment.biml 105 | E-01-Environment.biml 106 | 107 | 108 | E-02-TableMeta.biml 109 | E-02-TableMeta.biml 110 | 111 | 112 | E-03a-Adf-Preview.biml 113 | E-03a-Adf-Preview.biml 114 | 115 | 116 | E-03b-Adf-Writer.biml 117 | E-03b-Adf-Writer.biml 118 | 119 | 120 | E-04a-OnPrem_DataSets.biml 121 | E-04a-OnPrem_DataSets.biml 122 | 123 | 124 | E-04b-Blob_DataSets.biml 125 | E-04b-Blob_DataSets.biml 126 | 127 | 128 | E-04c-SqlAzure_DataSets.biml 129 | E-04c-SqlAzure_DataSets.biml 130 | 131 | 132 | E-05-Pipeline.biml 133 | E-05-Pipeline.biml 134 | 135 | 136 | 137 | 138 | Development 139 | 140 | bin 141 | 142 | 143 | 144 | 145 | SQLServer2017 146 | 147 | 148 | LastModifiedTime 149 | LastModifiedTime 150 | 2018-06-05T13:20:38.4304344Z 151 | 152 | 153 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/SSIS_Live_Master.dtproj.user: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Development 6 | 7 | 8 | 9 | false 10 | true 11 | 12 | 13 | LastModifiedTime 14 | LastModifiedTime 15 | 2018-06-05T13:20:38.4304344Z 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/bin/Development/SSIS_Live_Master.ispac: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/bin/Development/SSIS_Live_Master.ispac -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/obj/Development/BuildLog.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | SSIS_Live_Master 5 | 2018-08-09T11:53:14.6042154Z 6 | EncryptSensitiveWithUserKey 7 | 8 | 9 | 10 | 01_CreateStaging.dtsx 11 | 2018-08-09T11:53:00.8646346Z 12 | EncryptSensitiveWithUserKey 13 | 14 | 15 | -------------------------------------------------------------------------------- /SSIS and ADF with Biml/SSIS_Live_Master/SSIS_Live_Master/obj/Development/Project.params: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Synapse/SQLBits 2023 - Azure Synapse Link.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/Synapse/SQLBits 2023 - Azure Synapse Link.pdf -------------------------------------------------------------------------------- /Synapse/Synapse - SQL Bits 2023.ps1: -------------------------------------------------------------------------------- 1 | # Set all the variables :) 2 | $Subscription="" 3 | $Region = "eastus" 4 | $RG = "" 5 | $SynapseName = "" 6 | $StorageAccountName = "" 7 | $ContainerName = "" 8 | $username = "sqladmin" 9 | $password = "SuperSecretP@ssw0rd!" 10 | $poolname = "linkpool" 11 | $poolsize = "DW100c" 12 | $WinUser="Administrator" 13 | cmdkey /generic:Winserver.bwdemo.io /user:$WinUser /pass:$password 14 | 15 | # Set correct Subscription 16 | az account set -s $Subscription 17 | 18 | # Deploy all the things (takes ~ 10 minutes) 19 | # create an RG 20 | az group create -n $RG -l $Region 21 | 22 | # create a storage account 23 | az storage account create --name $StorageAccountName ` 24 | --resource-group $RG --location $Region ` 25 | --sku Standard_LRS --kind StorageV2 26 | 27 | # Add a container to our storage account 28 | az storage container create -n $ContainerName --account-name $StorageAccountName 29 | 30 | # Create a Synapse Workspace 31 | az synapse workspace create --file-system $ContainerName ` 32 | --name $SynapseName ` 33 | --resource-group $RG ` 34 | --sql-admin-login-password $password ` 35 | --sql-admin-login-user $username ` 36 | --storage-account $SynapseName 37 | 38 | # Add the dedicated SQL Pool 39 | az synapse sql pool create --name $poolname ` 40 | --performance-level $poolsize ` 41 | --resource-group $RG ` 42 | --workspace-name $SynapseName 43 | 44 | # Create a firewall rule (maybe make this a bit more restrictive :)) 45 | az synapse workspace firewall-rule create --name allowAll --workspace-name $SynapseName ` 46 | --resource-group $RG --start-ip-address 0.0.0.0 --end-ip-address 255.255.255.255 47 | 48 | # Pause the pool (if you don't want to use it immediately) 49 | az synapse sql pool pause --name $poolname --workspace-name $SynapseName --resource-group $RG 50 | 51 | # Resume the pool 52 | az synapse sql pool resume --name $poolname --workspace-name $SynapseName --resource-group $RG 53 | 54 | # Let us also grab the new sqlcmd 55 | # http://aka.ms/sqlcmd 56 | $URL=(((Invoke-WebRequest https://api.github.com/repos/microsoft/go-sqlcmd/releases/latest).Content | ConvertFrom-Json).assets ` 57 | | Where-Object {$_.content_type -eq 'application/zip'} |Where-Object { $_.name -like '*windows-x64*'}).browser_download_url 58 | $URL 59 | curl.exe -o sqlcmd.zip $URL -L 60 | Expand-Archive .\sqlcmd.zip -Force 61 | 62 | # Which has configs (and a modern interface) 63 | .\sqlcmd\sqlcmd config view 64 | 65 | # Make sure your source DB has a valid owner and Master key! 66 | .\sqlcmd\sqlcmd query "ALTER AUTHORIZATION ON DATABASE:: AdventureWorks2019 TO [demo]" --database master 67 | .\sqlcmd\sqlcmd query "CREATE MASTER KEY ENCRYPTION BY PASSWORD ='R@M@l@m@D!nD0ng'" --database AdventureWorks2019 68 | 69 | # we can also create a new config for synapse 70 | $env:sqlcmdpassword = $password 71 | .\sqlcmd\sqlcmd config add-user --username $username --name synapse --password-encryption none 72 | .\sqlcmd\sqlcmd config add-endpoint --address (($SynapseName) + ".sql.azuresynapse.net") --name synapse 73 | .\sqlcmd\sqlcmd config add-context --endpoint synapse --user synapse --name synapse 74 | 75 | # We have a second context 76 | .\sqlcmd\sqlcmd config get-contexts 77 | 78 | # The new one is our current context 79 | .\sqlcmd\sqlcmd query "SELECT @@ServerName" 80 | 81 | # While we're at it... Create a Master Key on our synapse pool 82 | .\sqlcmd\sqlcmd query "CREATE MASTER KEY" --database $poolname 83 | 84 | # And we can even simply add it to ADS 85 | .\sqlcmd\sqlcmd open ads 86 | 87 | # Jump between contexts 88 | .\sqlcmd\sqlcmd config use-context Winserver 89 | .\sqlcmd\sqlcmd query "SELECT @@ServerName" 90 | 91 | # If lost, get help... 92 | .\sqlcmd\sqlcmd --help 93 | .\sqlcmd\sqlcmd config --help 94 | 95 | # We still need an... 96 | # - Linked Service to Storage 97 | # - Integration Runtime 98 | # - Linked Service to SQL 99 | # - Synapse Link 100 | 101 | # And prepare the rest in the Portal... 102 | Start-Process ("https://portal.azure.com/#@" + (az account show --query tenantId -o tsv) + "/resource" + (az group show -n $RG --query id -o tsv)) 103 | 104 | # We need to configure the runtime in our Windows Server 105 | # https://www.microsoft.com/en-us/download/details.aspx?id=39717 106 | mstsc /v:Winserver.bwdemo.io /w:1280 /h:720 107 | 108 | .\sqlcmd\sqlcmd config use-context synapse 109 | .\sqlcmd\sqlcmd query "SELECT NAME FROM sys.tables" --database $poolname 110 | 111 | # When done, tidy up... 112 | az group delete -n $RG --yes 113 | .\sqlcmd\sqlcmd config delete-context synapse -------------------------------------------------------------------------------- /VSCode/keybindings.json: -------------------------------------------------------------------------------- 1 | // Place your key bindings in this file to override the defaultsauto[] 2 | [ 3 | { 4 | "key": "shift+f8", 5 | "command": "-editor.action.marker.prevInFiles", 6 | "when": "editorFocus" 7 | }, 8 | { 9 | "key": "shift+f8", 10 | "command": "workbench.action.terminal.runSelectedText" 11 | }, 12 | { 13 | "key": "ctrl+oem_102", 14 | "command": "workbench.action.terminal.focus" 15 | }, 16 | { 17 | "key": "ctrl+oem_102", 18 | "command": "workbench.action.focusActiveEditorGroup", 19 | "when": "terminalFocus" 20 | } 21 | ] -------------------------------------------------------------------------------- /VSCode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "workbench.colorTheme": "Default Light+", 3 | "security.workspace.trust.untrustedFiles": "open", 4 | "editor.minimap.enabled": false, 5 | "security.workspace.trust.enabled": false 6 | } -------------------------------------------------------------------------------- /misc/solisyon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bweissman/code/290f84e1f75bf92831cd845ce5bcb750fd3857eb/misc/solisyon.png --------------------------------------------------------------------------------