├── ORACLE ├── collectionInput.lst ├── collectionInput.lst-EXAMPLE ├── rbkDiscovery.csv ├── README.txt ├── dataCollector.sh ├── rbkDataCollection_11g.sql ├── rbkDataCollection_121.sql ├── rbkDataCollection_12c.sql └── rbkDataCollection.sql ├── SAAS ├── README.md └── README ├── CLOUD ├── .gitignore ├── EXAMPLES │ ├── gce_vmdisk_info-example.csv │ ├── azure_file_share_info-example.csv │ ├── azure_backup_vault_info-example.csv │ ├── aws_eks_nodegroups_info-example.csv │ ├── aws_DynamoDB_info-example.csv │ ├── azure_sql_info-example.csv │ ├── aws_secrets_numbers-example.csv │ ├── aws_sqs_numbers-example.csv │ ├── aws_kms_numbers-example.csv │ ├── aws_rds_info-example.csv │ ├── azure_vmdisk_info-example.csv │ ├── aws_eks_clusters_info-example.csv │ ├── aws_efs_info-example.csv │ ├── aws_fsx_filesystem_info-example.csv │ ├── aws_fsx_volume_info-example.csv │ ├── aws_ec2_unattached_volume_info-example.csv │ ├── azure_backup_vault_VM_policies-example.json │ ├── azure_backup_vault_VM_SQL_items-example.csv │ ├── azure_backup_vault_VM_SQL_policies-example.json │ ├── azure_backup_vault_VM_items-example.csv │ ├── aws_backup_costs-example.csv │ └── aws-backup-plans-info-example.json ├── Get-AWSSizingInfo-Permissions.cft ├── consolidate.ps1 └── README.md ├── MSSQL ├── SQLInstances.txt ├── Parse-RubrikSizing.ps1 ├── CollectSQLProfile.ps1 └── RubrikSQLProfile-DBInfo.sql ├── .gitignore ├── README.md ├── M365 └── README.md ├── IDENTITY ├── README.md └── Get-AdHumanIdentity.ps1 └── VMWARE └── Get-VMwareDiskStats.ps1 /ORACLE/collectionInput.lst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /SAAS/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | -------------------------------------------------------------------------------- /SAAS/README: -------------------------------------------------------------------------------- 1 | README IS BLANK 2 | -------------------------------------------------------------------------------- /CLOUD/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *.zip 3 | *.txt -------------------------------------------------------------------------------- /MSSQL/SQLInstances.txt: -------------------------------------------------------------------------------- 1 | rp-sql19ags-1a.perf.rubrik.com 2 | rp-sql12s-001.perf.rubrik.com -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | settings.json 2 | **/.DS_Store 3 | *.csv 4 | !/CLOUD/EXAMPLES/* 5 | CLOUD/*.json -------------------------------------------------------------------------------- /ORACLE/collectionInput.lst-EXAMPLE: -------------------------------------------------------------------------------- 1 | am1-shawmcel-l3 1521 CDBSRC1 121 2 | am1-shawmcel-l3 1521 CDBSRC2 112 3 | am1-shawmcel-l3 1521 CDBSRC3 193 4 | am1-shawmcel-l3 1521 CDBSRC4 210 5 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/gce_vmdisk_info-example.csv: -------------------------------------------------------------------------------- 1 | VM,NumDisks,SizeGiB,SizeTiB,SizeGB,SizeTB,Project,Status 2 | gcp-native-instance-1,1,10,0.0097656,10.737,0.0107370,se-lab-185822,RUNNING 3 | gcp-native-instance-1-fqts6,1,10,0.0097656,10.737,0.0107370,se-lab-185822,TERMINATED -------------------------------------------------------------------------------- /ORACLE/rbkDiscovery.csv: -------------------------------------------------------------------------------- 1 | CON_ID,CONNAME,DBSIZEMB,ALLOCATED_DBSIZEMB,DAILYCHANGERATE,DAILYREDOSIZEMB,DATAFILECOUNT,TABLESPACECOUNT,ENCRYPTEDTABLESPACECOUNT,ENCRYPTEDDATASIZEMB,BIGGESTBIGFILEMB,BIGFILETABLESPACECOUNT,BIGFILEDATASIZEMB,BLOCKSIZE,HOSTNAME,INSTNAME,DBVERSION,DBEDITION,PLATFORMNAME,DBNAME,DBUNIQUENAME,DBID,FLASHBACKENABLED,ARCHIVELOGENABLED,SPFILE,PATCHLEVEL,CPUCOUNT,RACENABLED,SGAMAXSIZE,SGATARGET,PGAAGGREGATETARGET,PHYSMEMORY,DNFSENABLED,GOLDENGATE,EXADATAENABLED,BCTENABLED,LOGARCHIVECONFIG,ARCHIVELAGTARGET,LOGFILECOUNT,TEMPFILECOUNT -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_file_share_info-example.csv: -------------------------------------------------------------------------------- 1 | "Name","StorageAccount","Tenant","Subscription","Region","ResourceGroup","QuotaGiB","UsedCapacityBytes","UsedCapacityGiB","UsedCapacityGB" 2 | "azure-file-share-1","storage-account-1","tenant-1","subscription-1","eastus2","resoruce-group-1","5120","0","0","0" 3 | "azure-file-share-2","storage-account-2","tenant-1","subscription-1","westus","cloud-shell-storage-westus","6","5368709120","5","5.369" 4 | "azure-file-share-3","storage-account-3","tenant-1","subscription-2","westus","cloud-shell-storage-westus","6","5368709120","5","5.369" -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_backup_vault_info-example.csv: -------------------------------------------------------------------------------- 1 | "Name","Type","Tenant","Subscription","Region","ResourceGroup","ProvisioningState","CrossSubscriptionRestoreState","ImmutabilitySettings" 2 | "Vault-1","Microsoft.RecoveryServices/vaults","Tenant-1","subscription-1","eastus","Site-recovery-vault-RG","Succeeded","Enabled", 3 | "Vault-2","Microsoft.RecoveryServices/vaults","Tenant-1","subscription-1","centralus","Site-recovery-vault-RG","Succeeded","Enabled", 4 | "Vault-3","Microsoft.RecoveryServices/vaults","Tenant-2","subscription-2","westus","Site-recovery-vault-RG-2","Succeeded","Enabled", -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rubrik Sizing Scripts 2 | This repository contains sizing scripts for use by Rubrik Sales Engineers and Rubrik customers to gather data for Rubrik sizing. 3 | 4 | CODE HERE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 5 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 6 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 7 | IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR 8 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 9 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 10 | OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_eks_nodegroups_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","NodegroupName","ClusterName","DiskSize","CapacityType","AmiType","NodegroupArn","NodeRole","Status","ReleaseVersion","Version","Region","Tag:TagName-00004","Tag:TagName-00031","Tag:TagName-00008","Tag:TagName-00032","Tag:TagName-00002","Tag:TagName-00005","Tag:TagName-00034" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","TagValue-00029","TagValue-00027","0","ON_DEMAND","AL2_x86_64","NodegroupArn-00000","NodeRole-00000","ACTIVE","1.30.2-20240817","1.30","us-west-2","TagValue-00087","TagValue-00027","TagValue-00029","TagValue-00027","TagValue-00089","TagValue-00026","TagValue-00076" 3 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_DynamoDB_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","TableName","TableId","TableArn","TableSizeBytes","TableStatus","TableSizeGiB","TableSizeTiB","TableSizeGB","TableSizeTB","ItemCount","DeletionProtectionEnabled","GlobalTableVersion","ProvisionedThroughputLastDecreaseDateTime","ProvisionedThroughputLastIncreaseDateTime","ProvisionedThroughput.NumberOfDecreasesToday","ProvisionedThroughputReadCapacityUnits","ProvisionedThroughputWriteCapacityUnits","BackupPlans","InBackupPlan" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","TableName-00000","TableId-00000","TableArn-00000","105","ACTIVE","0","0","0","0","1","False",,"1/1/0001 12:00:00 AM","1/1/0001 12:00:00 AM","0","20","20","","False" 3 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_sql_info-example.csv: -------------------------------------------------------------------------------- 1 | "Database","Server","ElasticPool","ManagedInstance","MaxSizeGiB","MaxSizeGB","Subscription","Region","ResourceGroup","DatabaseID","InstanceType","Status" 2 | "","","az-tong-pool","","32","34.36","se_roadrunner_perf_lab","eastus2","rg-lumnah-test","","GP_Gen5", 3 | "az-tong-sql1","az-lumnah-test","","","48","51.54","se_roadrunner_perf_lab","eastus2","rg-lumnah-test","8c49b71e-bef1-4d0d-8862-ab7734bac126","GP_Gen5","Online" 4 | "lumnah-test-db","az-lumnah-test","","","32","34.36","se_roadrunner_perf_lab","eastus2","rg-lumnah-test","c6147218-cd06-4d8b-b9b3-b4df4e312878","GP_S_Gen5","Paused" 5 | "","","","az-tong-managedinstance","64","68.719","se_roadrunner_perf_lab","westus3","rp-stong-cc-westus","","GP_Gen5", 6 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_secrets_numbers-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","Secrets" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","2" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","0" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-1","0" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-2","1" 6 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-2","2" 7 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-1","0" 8 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-1","0" 9 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-2","1" 10 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-2","2" 11 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-1","0" 12 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-1","0" 13 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-2","1" 14 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_sqs_numbers-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","Queues" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","2" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","0" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-1","15" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-2","0" 6 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-2","2" 7 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-1","0" 8 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-1","15" 9 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-2","0" 10 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-2","2" 11 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-1","0" 12 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-1","15" 13 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-2","0" 14 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_kms_numbers-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","Keys" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","24" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","10" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-1","17" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","us-east-2","13" 6 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-2","24" 7 | "AwsAccountId-00001","AwsAccountAlias-00001","us-west-1","10" 8 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-1","17" 9 | "AwsAccountId-00001","AwsAccountAlias-00001","us-east-2","13" 10 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-2","24" 11 | "AwsAccountId-00002","AwsAccountAlias-00002","us-west-1","10" 12 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-1","17" 13 | "AwsAccountId-00002","AwsAccountAlias-00002","us-east-2","13" 14 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_rds_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","DBName","DBInstanceIdentifier","SizeGiB","SizeTiB","SizeGB","SizeTB","Region","InstanceType","Engine","EngineVersion","DBInstanceStatus","BackupPlans","InBackupPlan","BackupRetentionPeriod","PreferredBackupWindow","StorageType" 2 | "AwsAccountId-00000","AwsAccountAlias-00000",,"DBInstanceIdentifier-00000","20","0.0195","21.475","0.0215","us-west-2","db.m6i.xlarge","sqlserver-web","15.00.4236.7.v1","available","","False","7","07:08-07:38","gp2" 3 | "AwsAccountId-00000","AwsAccountAlias-00000",,"DBInstanceIdentifier-00001","20","0.0195","21.475","0.0215","us-east-2","db.t3.micro","sqlserver-ex","15.00.4385.2.v1","available","","False","7","08:12-08:42","gp3" 4 | "AwsAccountId-00001","AwsAccountAlias-00001",,"DBInstanceIdentifier-00002","1","0.001","1.074","0.0011","us-east-1","db.serverless","aurora-postgresql","15.4","available","","False","7","03:13-03:43","aurora" 5 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_vmdisk_info-example.csv: -------------------------------------------------------------------------------- 1 | "Name","Disks","SizeGiB","SizeGB","Subscription","Tenant","Region","ResourceGroup","vmID","InstanceType","Status","HasMSSQL" 2 | "azure-vm-1","1","30","32.212","subscription-1","tenant-1","centralus","resource-group-1","fc0c344d-88c9-4cdc-9a80-e00c4c4d5933","Standard_DS1_v2","OK","No" 3 | "azure-vm-2","2","286","307.09","subscription-1","tenant-1","centralus","resource-group-1","1764c58f-3b92-4407-9f4e-249fa80e29ff","Standard_D16lds_v5","OK","No" 4 | "azure-vm-3","2","912","979.253","subscription-1","tenant-1","centralus","resource-group-1","3453ff2c-7d13-4fc3-bbe2-883aa3f3d700","Standard_D16s_v5","OK","No" 5 | "azure-sql-vm-1","3","2175","2335.388","subscription-1","tenant-1","centralus","resource-group-1","4cf4a4e4-d86d-48c5-911e-56f5b0c4ff33","Standard_E8bds_v5","OK","Yes" 6 | "azure-sql-vm-2","2","383","411.243","subscription-1","tenant-1","centralus","resource-group-1","d4f93fd6-c4e4-4205-9df1-7c65581aece1","Standard_D4s_v3","OK","No" 7 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_eks_clusters_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Name","Version","PlatformVersion","Status","Arn","RoleArn","Region","Tag:TagName-00004","Tag:TagName-00031","Tag:TagName-00005","Tag:TagName-00032","Tag:TagName-00033","Tag:TagName-00002","Tag:TagName-00034","Tag:TagName-00035" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","TagValue-00027","1.30","eks.7","ACTIVE","Arn-00000","RoleArn-00000","us-west-2","TagValue-00072","TagValue-00027","TagValue-00073","TagValue-00027","TagValue-00074","TagValue-00075","TagValue-00076","TagValue-00077" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","Name-00020","1.27","eks.22","ACTIVE","Arn-00001","RoleArn-00001","us-east-2",,,,,,,, 4 | "AwsAccountId-00001","AwsAccountAlias-00001","Name-00021","1.29","eks.12","ACTIVE","Arn-00002","RoleArn-00002","us-east-2",,,,,,,, 5 | "AwsAccountId-00001","AwsAccountAlias-00001","Name-00022","1.27","eks.22","ACTIVE","Arn-00003","RoleArn-00003","us-east-2",,,,,,,, 6 | "AwsAccountId-00002","AwsAccountAlias-00002","TagValue-00044","1.29","eks.12","ACTIVE","Arn-00004","RoleArn-00004","us-east-2",,,,,,,, 7 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_efs_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","FileSystemId","FileSystemProtection","Name","SizeInBytes","SizeGiB","SizeTiB","SizeGB","SizeTB","NumberOfMountTargets","OwnerId","PerformanceMode","ProvisionedThroughputInMibps","DBInstanceIdentifier","Region","ThroughputMode","BackupPlans","InBackupPlan","Tag:TagName-00030" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","FileSystemId-00000","ENABLED","Name-00016","3945266585600","3674.3158","3.5882","3945.2666","3.9453","3","AwsAccountId-00000","generalPurpose","0",,"us-west-2","elastic","","False", 3 | "AwsAccountId-00000","AwsAccountAlias-00000","FileSystemId-00001","DISABLED","Name-00017","4310760134656","4014.7082","3.9206","4310.7601","4.3108","3","AwsAccountId-00000","generalPurpose","0",,"us-west-2","elastic","","False", 4 | "AwsAccountId-00000","AwsAccountAlias-00000","FileSystemId-00002","ENABLED","Name-00018","112618481664","104.8841","0.1024","112.6185","0.1126","1","AwsAccountId-00000","generalPurpose","0",,"us-west-2","elastic","","False", 5 | "AwsAccountId-00001","AwsAccountAlias-00001","FileSystemId-00003","ENABLED","Name-00019","6144","0","0","0","0","3","AwsAccountId-00001","generalPurpose","0",,"us-west-2","bursting","","False","TagValue-00071" 6 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_fsx_filesystem_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","FileSystemId","FileSystemDNSName","FileSystemType","FileSystemTypeVersion","FileSystemOwnerId","FileSystemStorageType","Name","OnTapType","WindowsType","LustreType","OpenZFSType","StorageCapacityBytes","StorageCapacityGiB","StorageCapacityTiB","StorageCapacityGB","StorageCapacityTB","StorageUsedBytes","StorageUsedGiB","StorageUsedTiB","StorageUsedGB","StorageUsedTB","StorageCapacityUtilizationPercentage" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00004",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00023","True","False","False","False","23622320128000","22000","21.4844","23622.3201","23.6223","10731064958976","9994.083","9.7598","10731.065","10.7311", 3 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00005","FileSystemDNSName-00000","WINDOWS",,"AwsAccountId-00000","SSD","Name-00024","False","True","False","False","107374182400","100","0.0977","107.3742","0.1074",,,,,,"0.1064453125" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","FileSystemId-00006",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00025","True","False","False","False","1099511627776","1024","1","1099.5116","1.0995","114094080","0.1063","0.0001","0.1141","0.0001", 5 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_fsx_volume_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Region","FileSystemId","FileSystemDNSName","FileSystemType","FileSystemTypeVersion","FileSystemOwnerId","FileSystemStorageType","Name","VolumeId","VolumeType","LifeCycle","StorageUsedBytes","StorageUsedGiB","StorageUsedTiB","StorageUsedGB","StorageUsedTB","StorageCapacityBytes","StorageCapacityGiB","StorageCapacityTiB","StorageCapacityGB","StorageCapacityTB","BackupPlans","InBackupPlan" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00004",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00026","VolumeId-00005","ONTAP","CREATED","12020182388736","11194.6672","10.9323","12020.1824","12.0202","21990232555520","20480","20","21990.2326","21.9902","","False" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00004",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00027","VolumeId-00006","ONTAP","CREATED","67203072","0.0626","0.0001","0.0672","0.0001","1073741824","1","0.001","1.0737","0.0011","","False" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00004",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00028","VolumeId-00007","ONTAP","CREATED","67178496","0.0626","0.0001","0.0672","0.0001","1073741824","1","0.001","1.0737","0.0011","","False" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-2","FileSystemId-00004",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00029","VolumeId-00008","ONTAP","CREATED","557760512","0.5195","0.0005","0.5578","0.0006","10737418240","10","0.0098","10.7374","0.0107","","False" 6 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","FileSystemId-00006",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00026","VolumeId-00009","ONTAP","CREATED","54980018176","51.2041","0.05","54.98","0.055","1099511627776","1024","1","1099.5116","1.0995","","False" 7 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","FileSystemId-00006",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00030","VolumeId-00010","ONTAP","CREATED","5373775872","5.0047","0.0049","5.3738","0.0054","107374182400","100","0.0977","107.3742","0.1074","","False" 8 | "AwsAccountId-00000","AwsAccountAlias-00000","us-west-1","FileSystemId-00006",,"ONTAP",,"AwsAccountId-00000","SSD","Name-00027","VolumeId-00011","ONTAP","CREATED","56098816","0.0522","0.0001","0.0561","0.0001","1073741824","1","0.001","1.0737","0.0011","","False" 9 | -------------------------------------------------------------------------------- /ORACLE/README.txt: -------------------------------------------------------------------------------- 1 | ########## 2 | # 3 | # Rubrik Data Collection for Oracle Tooling 4 | # 5 | # Version: 1.2 6 | # 7 | # Developer: Shawn McElhinney 8 | # 9 | # Purpose: 10 | # This utility is designed to collect data from Oracle databases to assist with Rubrik solution sizing. 11 | # The utility will connect to the databases you define in collectionInput.lst and execute the appropriate rbkDataCollection.sql (based on your db version) to gather the data required 12 | # to begin properly sizing your Rubrik solution for Oracle. 13 | # 14 | # Component files: 15 | # collectionInput.lst -- a space-separated list of the databases in your landscape. Simply provide single line entries for each database in the following format: 16 | # hostname databasePort OracleSID databaseRelease 17 | # where databaseRelease is in a 3 digit format representing MAJOR and DOT release. For example: 10gR2 -> 102; 11gR2 -> 112; 12cR1 -> 121 etc. 18 | # save this file after update as it will be an input for the shell script. 19 | # 20 | # rbkDataCollection.sql -- sql script that collects pertinent database information to assist with sizing Rubrik for Oracle and dump output to a comma-separated file named rbkDiscovery.csv. 21 | # 22 | # dataCollector.sh -- This script will verify the existence of SQL*Plus, read through collectioInput.lst, execute rbkDataCollection.sql. At runtime, user will be prompted to enter the SYSTEM password for the associated databases. Output from the sql queries are dumped to rbkDiscovery.csv, which is what your Rubrik Sales Engineer will require to help with sizing. 23 | # 24 | # Execution Steps: 25 | # 1 - Update collectionInput.lst file with all Oracle databases that will be integrated with Rubrik. Enter the hostname, databasePort, ORACLE_SID and version for each instance or CDB (please check the expected format in collectionInput.lst-EXAMPLE file). If you utilizes PDB's in 12c+, the script will gather information for all PDBs that are installed in the CDB. If you utilizes RAC cluster, please add the instances of one node only. 26 | # 27 | # 2 - Ensure you have the SYSTEM password for all databases referenced in collectionInput.lst file. 28 | # 29 | # 3 - Execute dataCollector.sh provide the appropriate SYSTEM password when prompted 30 | # 31 | # 4 - Compress the resulting rbkDiscovery.csv & work with your Rubrik Sales Engineer to transfer the file to them via the most secure mechanism available. 32 | # 33 | ########## 34 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_ec2_unattached_volume_info-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","VolumeId","Name","SizeGiB","SizeTiB","SizeGB","SizeTB","Region","VolumeType","BackupPlans","InBackupPlan","Tag:TagName-00024","Tag:TagName-00028","Tag:TagName-00025","Tag:TagName-00001","Tag:TagName-00021","Tag:TagName-00026","Tag:TagName-00029","Tag:TagName-00027" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00000","","200","0.1953","214.748","0.2147","us-west-2","gp3","","False",,,,,,,, 3 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00001","","8","0.0078","8.59","0.0086","us-west-2","gp2","","False","TagValue-00051","TagValue-00055","TagValue-00052","TagValue-00043","TagValue-00050","TagValue-00053",,"TagValue-00054" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00002","","8","0.0078","8.59","0.0086","us-west-2","gp2","","False","TagValue-00056","TagValue-00058","TagValue-00057","TagValue-00043","TagValue-00050","TagValue-00053",,"TagValue-00054" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00003","","8","0.0078","8.59","0.0086","us-west-2","gp2","","False","TagValue-00060","TagValue-00061","TagValue-00059","TagValue-00043","TagValue-00050","TagValue-00053",,"TagValue-00054" 6 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00004","","8","0.0078","8.59","0.0086","us-west-2","gp2","","False","TagValue-00064","TagValue-00062","TagValue-00063","TagValue-00043","TagValue-00050","TagValue-00053",,"TagValue-00054" 7 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00005","","8","0.0078","8.59","0.0086","us-west-2","gp2","","False","TagValue-00066","TagValue-00065","TagValue-00067","TagValue-00043","TagValue-00050","TagValue-00053",,"TagValue-00054" 8 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00006","","100","0.0977","107.374","0.1074","us-west-2","gp3","","False",,,,,,,"TagValue-00068", 9 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00007","","100","0.0977","107.374","0.1074","us-west-2","gp3","","False",,,,,,,"TagValue-00069", 10 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00008","","500","0.4883","536.871","0.5369","us-west-2","gp3","","False",,,,,,,"TagValue-00070", 11 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00009","Name-00012","125","0.1221","134.218","0.1342","us-west-2","gp2","","False",,,,,,,, 12 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00000","Name-00013","1024","1","1099.512","1.0995","us-west-2","gp3","","False",,,,,,,, 13 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00001","","1024","1","1099.512","1.0995","us-west-2","gp3","","False",,,,,,,, 14 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00002","Name-00014","4096","4","4398.047","4.398","us-west-2","gp3","","False",,,,,,,, 15 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00003","Name-00015","5500","5.3711","5905.58","5.9056","us-west-2","gp3","","False",,,,,,,, 16 | "AwsAccountId-00000","AwsAccountAlias-00000","VolumeId-00004","","512","0.5","549.756","0.5498","us-west-2","gp3","","False",,,,,,,, 17 | -------------------------------------------------------------------------------- /CLOUD/Get-AWSSizingInfo-Permissions.cft: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: Create a cross-account role to run Rubrik's Sizing Script using StackSets 3 | 4 | Parameters: 5 | ScriptAWSAccountId: 6 | Type: String 7 | Description: The AWS account ID where the Rubrik Sizing Script is run from. 8 | AllowedPattern: '^\d{12}$' 9 | ConstraintDescription: 'The Script AWS Account ID must be a 12-digit number.' 10 | 11 | RoleName: 12 | Type: String 13 | Default: 'RubrikSizingScripts' 14 | Description: Name of the AWS IAM role to create 15 | AllowedPattern: '^[a-zA-Z0-9+=,.@_-]+$' 16 | ConstraintDescription: 'The role name can only contain alphanumeric characters and the following: +=,.@_-' 17 | 18 | Resources: 19 | CrossAccountRole: 20 | Type: 'AWS::IAM::Role' 21 | Properties: 22 | RoleName: !Ref RoleName 23 | AssumeRolePolicyDocument: 24 | Version: '2012-10-17' 25 | Statement: 26 | - Effect: 'Allow' 27 | Principal: 28 | AWS: 29 | - !Sub 'arn:aws:iam::${ScriptAWSAccountId}:root' 30 | Action: 'sts:AssumeRole' 31 | Policies: 32 | - PolicyName: 'RubrikSizingScriptPolicy' 33 | PolicyDocument: 34 | Version: '2012-10-17' 35 | Statement: 36 | - Sid: 'VisualEditor0' 37 | Effect: 'Allow' 38 | Action: 39 | - 'backup:ListBackupPlans' 40 | - 'backup:ListBackupSelections' 41 | - 'backup:GetBackupPlan' 42 | - 'backup:GetBackupSelection' 43 | - 'ce:GetCostAndUsage' 44 | - 'cloudwatch:GetMetricStatistics' 45 | - 'cloudwatch:ListMetrics' 46 | - 'dynamodb:ListTables' 47 | - 'dynamodb:DescribeTable' 48 | - 'ec2:DescribeInstances' 49 | - 'ec2:DescribeRegions' 50 | - 'ec2:DescribeVolumes' 51 | - 'eks:DescribeCluster' 52 | - 'eks:ListClusters' 53 | - 'eks:ListNodegroups' 54 | - 'elasticfilesystem:DescribeFileSystems' 55 | - 'fsx:DescribeFileSystems' 56 | - 'fsx:DescribeVolumes' 57 | - 'iam:ListAccountAliases' 58 | - 'kms:ListKeys' 59 | - 'organizations:ListAccounts' 60 | - 'rds:DescribeDBInstances' 61 | - 'rds:DescribeDBClusters' 62 | - 's3:GetBucketLocation' 63 | - 's3:ListAllMyBuckets' 64 | - 's3:GetBucketTagging' 65 | - 's3:ListStorageLensConfigurations' 66 | - 's3:GetStorageLensConfiguration' 67 | - 'secretsmanager:ListSecrets' 68 | - 'sts:AssumeRole' 69 | - 'sqs:ListQueues' 70 | Resource: '*' 71 | 72 | Outputs: 73 | RoleARN: 74 | Description: The ARN of the created cross-account role 75 | Value: !GetAtt CrossAccountRole.Arn 76 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_backup_vault_VM_policies-example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Tenant": "Tenant-1", 4 | "Subscription": "subscription-1", 5 | "Region": "eastus", 6 | "ResourceGroup": "ResourceGroup-1", 7 | "SnapshotRetentionInDays": 2, 8 | "ProtectedItemsCount": 0, 9 | "AzureBackupRGName": null, 10 | "AzureBackupRGNameSuffix": null, 11 | "PolicySubType": 1, 12 | "TieringPolicy": null, 13 | "SchedulePolicy": { 14 | "ScheduleRunFrequency": 1, 15 | "ScheduleRunDays": null, 16 | "ScheduleRunTimes": [ 17 | "2023-06-30T07:00:00Z" 18 | ], 19 | "ScheduleInterval": null, 20 | "ScheduleWindowStartTime": null, 21 | "ScheduleWindowDuration": null, 22 | "ScheduleRunTimeZone": "UTC" 23 | }, 24 | "RetentionPolicy": { 25 | "IsDailyScheduleEnabled": true, 26 | "IsWeeklyScheduleEnabled": false, 27 | "IsMonthlyScheduleEnabled": false, 28 | "IsYearlyScheduleEnabled": false, 29 | "DailySchedule": { 30 | "DurationCountInDays": 30, 31 | "RetentionTimes": [ 32 | "2023-06-30T07:00:00Z" 33 | ], 34 | "BackupManagementType": "AzureIaasVM" 35 | }, 36 | "WeeklySchedule": null, 37 | "MonthlySchedule": null, 38 | "YearlySchedule": null, 39 | "BackupManagementType": "AzureIaasVM" 40 | }, 41 | "Name": "DefaultPolicy", 42 | "WorkloadType": 1, 43 | "Id": "/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/Site-recovery-vault-eastus/backupPolicies/DefaultPolicy", 44 | "BackupManagementType": 1 45 | }, 46 | { 47 | "Tenant": "Tenant-1", 48 | "Subscription": "subscription-1", 49 | "Region": "eastus", 50 | "ResourceGroup": "ResourceGroup-1", 51 | "SnapshotRetentionInDays": 2, 52 | "ProtectedItemsCount": 0, 53 | "AzureBackupRGName": null, 54 | "AzureBackupRGNameSuffix": null, 55 | "PolicySubType": 2, 56 | "TieringPolicy": null, 57 | "SchedulePolicy": { 58 | "ScheduleRunFrequency": 3, 59 | "HourlySchedule": { 60 | "Interval": 4, 61 | "WindowStartTime": "2023-06-30T08:00:00Z", 62 | "WindowDuration": 12 63 | }, 64 | "DailySchedule": null, 65 | "WeeklySchedule": null, 66 | "ScheduleRunTimeZone": "UTC" 67 | }, 68 | "RetentionPolicy": { 69 | "IsDailyScheduleEnabled": true, 70 | "IsWeeklyScheduleEnabled": false, 71 | "IsMonthlyScheduleEnabled": false, 72 | "IsYearlyScheduleEnabled": false, 73 | "DailySchedule": { 74 | "DurationCountInDays": 30, 75 | "RetentionTimes": [ 76 | "2023-06-30T08:00:00Z" 77 | ], 78 | "BackupManagementType": "AzureIaasVM" 79 | }, 80 | "WeeklySchedule": null, 81 | "MonthlySchedule": null, 82 | "YearlySchedule": null, 83 | "BackupManagementType": "AzureIaasVM" 84 | }, 85 | "Name": "EnhancedPolicy", 86 | "WorkloadType": 1, 87 | "Id": "/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/Site-recovery-vault-eastus/backupPolicies/EnhancedPolicy", 88 | "BackupManagementType": 1 89 | } 90 | ] 91 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_backup_vault_VM_SQL_items-example.csv: -------------------------------------------------------------------------------- 1 | "Tenant","Subscription","Region","ResourceGroup","FriendlyName","ServerName","ParentName","ParentType","LastBackupErrorDetail","ProtectedItemDataSourceId","ProtectedItemHealthStatus","ProtectionStatus","PolicyId","ProtectionState","LastBackupStatus","LastBackupTime","ProtectionPolicyName","ExtendedInfo","DateOfPurge","DeleteState","Name","Id","LatestRecoveryPoint","SourceResourceId","WorkloadType","ContainerName","ContainerType","BackupManagementType" 2 | "Tenant-1","subscription-1","westus2","ResoruceGroup-1","azbp0_db7","windows-azbp0","MSSQLSERVER","AzureVmWorkloadSQLInstance",,"7661449799016875264","Healthy","Healthy","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/SQLPolicy","Protected","Healthy","12/20/2023 7:15:59AM","SQLPolicy",,,"NotDeleted","SQLDataBase;mssqlserver;azbp0_db7","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupFabrics/Azure/protectionContainers/VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0/protectedItems/SQLDataBase;mssqlserver;azbp0_db7",,"/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/RG-azbp-sqlvm/providers/Microsoft.Compute/virtualMachines/windows-azbp0","MSSQL","VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0","AzureVMAppContainer","AzureWorkload" 3 | "Tenant-1","subscription-1","westus2","ResoruceGroup-1","model","windows-azbp0","MSSQLSERVER","AzureVmWorkloadSQLInstance",,"7661449799211396532","Healthy","Healthy","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/SQLPolicy","Protected","Healthy","12/20/2023 7:13:59AM","SQLPolicy",,,"NotDeleted","SQLDataBase;mssqlserver;model","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupFabrics/Azure/protectionContainers/VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0/protectedItems/SQLDataBase;mssqlserver;model",,"/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/RG-azbp-sqlvm/providers/Microsoft.Compute/virtualMachines/windows-azbp0","MSSQL","VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0","AzureVMAppContainer","AzureWorkload" 4 | "Tenant-1","subscription-2","westus2","ResoruceGroup-1","azbp0_db6","windows-azbp0","MSSQLSERVER","AzureVmWorkloadSQLInstance",,"7661449799483830392","Healthy","Healthy","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/SQLPolicy","Protected","Healthy","12/20/2023 7:15:59AM","SQLPolicy",,,"NotDeleted","SQLDataBase;mssqlserver;azbp0_db6","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResoruceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupFabrics/Azure/protectionContainers/VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0/protectedItems/SQLDataBase;mssqlserver;azbp0_db6",,"/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/RG-azbp-sqlvm/providers/Microsoft.Compute/virtualMachines/windows-azbp0","MSSQL","VMAppContainer;compute;RG-azbp-sqlvm;windows-azbp0","AzureVMAppContainer","AzureWorkload" -------------------------------------------------------------------------------- /ORACLE/dataCollector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ##### 4 | # 5 | # name: dataCollector.sh 6 | # 7 | # version: 1.0 8 | # 9 | # assumptions: This utility assumes that the dba has access to the following information: 10 | # - SYSTEM account is used to access required dba* and v$* views 11 | # - Hostname of all database hosts 12 | # - Database Listener port for all databases 13 | # - Database Service Name for all databases 14 | # - Database version is provided for databases as a 3 digit number representing the MAJOR (ie: 12c) and DOT (ie: 12.1) release (for example:10gR2 -> 102; 11gR2 -> 112; 12cR1 -> 121 etc. ) 15 | # - access to sqlplus on the host executing the utility 16 | # 17 | # description: shell script to collect data on all Oracle databases defined as input 18 | # 19 | # input: collectionInput.lst -- a comma separated input file containing the following input: 20 | # databaseHostname,dbPort,dbServiceName,dbRelease 21 | # a unique entry should exist for EACH database requiring data collection 22 | # 23 | # output: rbkDiscovery.csv -- a comma separated list of critical database information required 24 | # for properly sizing Oracle databases on Rubrik. 25 | # 26 | # additional files: rbkDataCollection.sql -- the sql file executed to collect database information 27 | # 28 | ##### 29 | 30 | ##### 31 | # 32 | # Validate existance of sqlplus 33 | # 34 | ##### 35 | FILE=`which sqlplus` 36 | while ! [ -f "$FILE" ] 37 | do 38 | echo "sqlplus not found. Please provide an ORACLE_HOME location containing sqlplus." 39 | #accept user input 40 | unset FILE 41 | read OH 42 | #update $FILE to new value & recheck 43 | echo $OH 44 | export ORACLE_HOME=$OH 45 | export PATH=$PATH:$ORACLE_HOME/bin 46 | FILE=`which sqlplus` 47 | echo $FILE 48 | done 49 | 50 | # sleep 2 51 | 52 | ##### 53 | # 54 | # Verify collectionInput.lst exists & has content 55 | # loop through collectionInput.lst to collect required variables 56 | # 57 | ##### 58 | 59 | # confirm collectionInput.lst exists 60 | 61 | echo "Checking existance of collectionInput.lst" 62 | INPUT=collectionInput.lst 63 | 64 | [ ! -f "$INPUT" ] && { echo "Error: $0 file not found."; exit 2; } 65 | 66 | if [ -s "$INPUT" ] 67 | then 68 | 69 | IFS=' ' 70 | 71 | # collect values to create connect string to databases listed 72 | cat $INPUT | while read host port sid dbversion junk 73 | do 74 | echo "Connecting to database: "$sid 75 | echo $dbversion 76 | unset passwd 77 | unset login 78 | unset sql 79 | unset runSql 80 | #echo "-n Enter SYSTEM password for database "$sid" and press [ENTER]:" 81 | #printf '%s' "Enter the SYSTEM Password for database "$sid" and press [ENTER]: " 82 | echo "Enter SYSTEM password" 83 | read -s passwd < /dev/tty 84 | login=system/$passwd 85 | 86 | # determine database version from collectionInput.lst & set appropriate sql script for execution 87 | if test "$dbversion" -lt 120 88 | then 89 | sql="rbkDataCollection_11g.sql" 90 | elif test "$dbversion" -lt 122 91 | then 92 | sql="rbkDataCollection_121.sql" 93 | elif test "$dbversion" -lt 180 94 | then 95 | sql="rbkDataCollection_12c.sql" 96 | else 97 | sql="rbkDataCollection.sql" 98 | fi 99 | echo $sql 100 | # build sqlplus connection command based on collectionInput 101 | runSql="sqlplus "$login"@\"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST="$host")(PORT="$port"))(CONNECT_DATA=(SID="$sid")))\" @"$sql"" 102 | # execute the resulting sqlplus command 103 | eval "$runSql" 104 | done 105 | else 106 | echo "$INPUT is empty." 107 | exit 3; 108 | fi 109 | 110 | exit; 111 | -------------------------------------------------------------------------------- /CLOUD/consolidate.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | A script to combine CSV files with the same prefix into one file, keeping only one set of headers. 4 | .DESCRIPTION 5 | This PowerShell script takes in a directory path as a parameter, finds all CSV files within, and groups them by their prefix (the text before the first dash in the filename). 6 | Each group of files with the same prefix is combined into one CSV file. 7 | If a CSV file is empty, it will be ignored during the aggregation. 8 | The resulting combined files are saved in the directory from which the script is run. 9 | Headers from the original CSV files are included in the combined files intelligently: 10 | each combined file will only include one set of headers taken from the CSV files with the same prefix. 11 | The script also filters out any blank lines in the original CSV files. 12 | 13 | This script requires Powershell 7 or later. To install Powershell 7 use one of these links: 14 | 15 | MacOs: https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-macos?view=powershell-7.4 16 | Windows: https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.4 17 | 18 | .PARAMETER directoryPath 19 | The path to the directory that contains the CSV files to be combined. Defaults to the current directory if not provided. 20 | .EXAMPLE 21 | .\CombineCsvFiles.ps1 -directoryPath "C:\path\to\your\csv\files" 22 | #> 23 | 24 | param ( 25 | # Path to the directory containing the CSV files 26 | [string]$directoryPath = "./" 27 | ) 28 | 29 | function addNullValuesForFields($list) { 30 | # Determine all unique fields 31 | $allFields = @{} 32 | foreach ($obj in $list) { 33 | $properties = $obj.PSObject.Properties 34 | foreach ($property in $properties) { 35 | if (-not $allFields.ContainsKey($property.Name)) { 36 | $allFields[$property.Name] = $true 37 | } 38 | } 39 | } 40 | 41 | $allFields = $allFields.Keys 42 | 43 | # Ensure each object has all possible fields 44 | foreach ($obj in $list) { 45 | foreach ($field in $allFields) { 46 | if (-not $obj.PSObject.Properties.Name.Contains($field)) { 47 | $obj | Add-Member -MemberType NoteProperty -Name $field -Value $null -Force 48 | } 49 | } 50 | } 51 | } 52 | 53 | 54 | # Get all the CSV files in the directory 55 | $csvFiles = Get-ChildItem -Path $directoryPath -Filter "*.csv" -File -Recurse 56 | 57 | # Extract unique prefixes from filenames 58 | $prefixes = $csvFiles | ForEach-Object { $_.BaseName -replace "-.*"} | Sort-Object -Unique 59 | 60 | foreach ($prefix in $prefixes) { 61 | $combinedData = @() 62 | 63 | # Get the CSV files with the current prefix 64 | $prefixCsvFiles = $csvFiles | Where-Object { $_.BaseName -like "$prefix-*" } 65 | 66 | foreach ($csvFile in $prefixCsvFiles) { 67 | # Skip the current file if it's empty 68 | if ((Get-Content $csvFile.FullName) -eq $null) { continue } 69 | 70 | # Import the data 71 | $importedData = Import-Csv -Path $csvFile.FullName 72 | 73 | $combinedData += $importedData 74 | } 75 | 76 | # If no data to write, skip to the next prefix 77 | if ($combinedData.Count -eq 0) { continue } 78 | 79 | # Make sure every field is reported 80 | addNullValuesForFields($combinedData) 81 | 82 | # Write the combined data to a new CSV file in the directory the script is run from 83 | $combinedData | Export-Csv -Path "${prefix}_combined.csv" -NoTypeInformation 84 | } 85 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_backup_vault_VM_SQL_policies-example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Tenant": "Tenant-1", 4 | "Subscription": "subscription-1", 5 | "Region": "eastus", 6 | "ResourceGroup": "ResourceGroup-1", 7 | "ProtectedItemsCount": 0, 8 | "IsCompression": false, 9 | "IsDifferentialBackupEnabled": false, 10 | "IsLogBackupEnabled": true, 11 | "FullBackupSchedulePolicy": { 12 | "ScheduleRunFrequency": 1, 13 | "ScheduleRunDays": null, 14 | "ScheduleRunTimes": [ 15 | "2023-06-30T07:00:00Z" 16 | ], 17 | "ScheduleInterval": null, 18 | "ScheduleWindowStartTime": null, 19 | "ScheduleWindowDuration": null, 20 | "ScheduleRunTimeZone": "UTC" 21 | }, 22 | "DifferentialBackupSchedulePolicy": null, 23 | "LogBackupSchedulePolicy": { 24 | "ScheduleFrequencyInMins": 60 25 | }, 26 | "FullBackupRetentionPolicy": { 27 | "IsDailyScheduleEnabled": true, 28 | "IsWeeklyScheduleEnabled": false, 29 | "IsMonthlyScheduleEnabled": false, 30 | "IsYearlyScheduleEnabled": false, 31 | "DailySchedule": { 32 | "DurationCountInDays": 30, 33 | "RetentionTimes": [ 34 | "2023-06-30T07:00:00Z" 35 | ], 36 | "BackupManagementType": "" 37 | }, 38 | "WeeklySchedule": null, 39 | "MonthlySchedule": null, 40 | "YearlySchedule": null, 41 | "BackupManagementType": "" 42 | }, 43 | "DifferentialBackupRetentionPolicy": null, 44 | "LogBackupRetentionPolicy": { 45 | "RetentionDurationType": 1, 46 | "RetentionCount": 30 47 | }, 48 | "FullBackupTieringPolicy": null, 49 | "Name": "HourlyLogBackup", 50 | "WorkloadType": 4, 51 | "Id": "/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/HourlyLogBackup", 52 | "BackupManagementType": 6 53 | }, 54 | { 55 | "Tenant": "Tenant-1", 56 | "Subscription": "subscription-1", 57 | "Region": "centralus", 58 | "ResourceGroup": "EngResourceGroupWestUS2", 59 | "ProtectedItemsCount": 0, 60 | "IsCompression": false, 61 | "IsDifferentialBackupEnabled": false, 62 | "IsLogBackupEnabled": true, 63 | "FullBackupSchedulePolicy": { 64 | "ScheduleRunFrequency": 1, 65 | "ScheduleRunDays": null, 66 | "ScheduleRunTimes": [ 67 | "2021-03-10T06:00:00Z" 68 | ], 69 | "ScheduleInterval": null, 70 | "ScheduleWindowStartTime": null, 71 | "ScheduleWindowDuration": null, 72 | "ScheduleRunTimeZone": "UTC" 73 | }, 74 | "DifferentialBackupSchedulePolicy": null, 75 | "LogBackupSchedulePolicy": { 76 | "ScheduleFrequencyInMins": 60 77 | }, 78 | "FullBackupRetentionPolicy": { 79 | "IsDailyScheduleEnabled": true, 80 | "IsWeeklyScheduleEnabled": false, 81 | "IsMonthlyScheduleEnabled": false, 82 | "IsYearlyScheduleEnabled": false, 83 | "DailySchedule": { 84 | "DurationCountInDays": 30, 85 | "RetentionTimes": [ 86 | "2021-03-10T06:00:00Z" 87 | ], 88 | "BackupManagementType": "" 89 | }, 90 | "WeeklySchedule": null, 91 | "MonthlySchedule": null, 92 | "YearlySchedule": null, 93 | "BackupManagementType": "" 94 | }, 95 | "DifferentialBackupRetentionPolicy": null, 96 | "LogBackupRetentionPolicy": { 97 | "RetentionDurationType": 1, 98 | "RetentionCount": 30 99 | }, 100 | "FullBackupTieringPolicy": null, 101 | "Name": "HourlyLogBackup", 102 | "WorkloadType": 4, 103 | "Id": "/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/EngResourceGroupWestUS2/providers/Microsoft.RecoveryServices/vaults/vault-2/backupPolicies/HourlyLogBackup", 104 | "BackupManagementType": 6 105 | } 106 | ] -------------------------------------------------------------------------------- /MSSQL/Parse-RubrikSizing.ps1: -------------------------------------------------------------------------------- 1 | param($csvfile 2 | ,$delimiter = '|' 3 | ,[int[]]$HistogramBins=@(1,10,100,500,1000)) 4 | 5 | $data = Get-Content $csvfile 6 | 7 | if ($data[0].Substring(1,10) -ne "ServerName") 8 | { 9 | $Header ="ServerName","SQLVersion","name","recovery_model_desc","SevenDayLogBackupMB","AverageFullMB","AverageFullTimeSec","AverageLogTimeSec","DBTotalSizeMB","AverageLogBackupInterval","ChangeCapture","ColumnStoreIndex","Compression","FILESTREAM","InMemoryOLTP","Partitioning","TransparentDatabaseEncryption", "NumberOfFiles" 10 | $rawdata = Get-Content $csvfile | ConvertFrom-Csv -Delimiter $delimiter -Header $Header 11 | } 12 | else 13 | { 14 | $rawdata = Get-Content $csvfile | ConvertFrom-Csv -Delimiter $delimiter 15 | } 16 | 17 | $DailyLogChurn = ($rawdata | Measure-Object -Property SevenDayLogBackupMB -Sum).Sum/7 18 | $EstimatedChangePerc = $DailyLogChurn/($rawdata | Where-Object {$_.recovery_model_desc -ne 'SIMPLE'} | Measure-Object -Property DBTotalSizeMB -Sum).Sum 19 | 20 | $return = [ordered]@{ 21 | 'DB Count' = ($rawdata | Measure-Object).Count 22 | 'DBs in Full' = ($rawdata | Where-Object {$_.recovery_model_desc -ne 'SIMPLE'} | Measure-Object).Count 23 | 'Server Count' = ($rawdata | Group-Object -Property ServerName | Measure-Object).Count 24 | 'Total DB Size (GB)' = (($rawdata | Measure-Object -Property DBTotalSizeMB -Sum).Sum/1024).ToString('0.00') 25 | 'Avg Full Backup Time(Sec)' = ($rawdata | Measure-Object -Property 'AverageFullTimeSec' -Average).Average.ToString('0.00') 26 | 'Avg Log Backup Time(Sec)' = ($rawdata | Where-Object {$_.recovery_model_desc -ne 'SIMPLE'} | Measure-Object -Property 'AverageLogTimeSec' -Average).Average.ToString('0.00') 27 | 'Estimated Daily Change Rate (Perc)' = ($EstimatedChangePerc * 100).ToString('0.00') 28 | 'Estimated Daily Change Rate (GB)' = ((($rawdata | Measure-Object -Property DBTotalSizeMB -Sum).Sum)/1024 * $EstimatedChangePerc).ToString('0.00') 29 | 'Avg Log Backup Interval (min)' = ($rawdata | Where-Object {$_.recovery_model_desc -ne "SIMPLE"} | Measure-Object -Property 'AverageLogBackupInterval' -Average).Average.ToString('0.00') 30 | 'DBs with ChangeCapture' = ($rawdata | Measure-Object -Property 'ChangeCapture' -Sum).Sum 31 | 'DBs with ColumnStoreIndex' = ($rawdata | Measure-Object -Property 'ColumnStoreIndex' -Sum).Sum 32 | 'DBs with Compression' = ($rawdata | Measure-Object -Property 'Compression' -Sum).Sum 33 | 'DBs with FILESTREAM' = ($rawdata | Measure-Object -Property 'FILESTREAM' -Sum).Sum 34 | 'DBs with InMemoryOLTP' = ($rawdata | Measure-Object -Property 'InMemoryOLTP' -Sum).Sum 35 | 'DBs with Partitioning' = ($rawdata | Measure-Object -Property 'Partitioning' -Sum).Sum 36 | 'DBs with TransparentDatabaseEncryption' = ($rawdata | Measure-Object -Property 'TransparentDatabaseEncryption' -Sum).Sum 37 | 'DBs with Greater than 300 Files' = ($rawdata | Where-Object {[int]$_.NumberOfFiles -ge 300} | Measure-Object).Count 38 | 'DBs in an Availability Group' = ($rawdata | Where-Object {[string]::IsNullOrEmpty($_.AG_Name) -eq $false } | Measure-Object ).Count 39 | } 40 | 41 | $MaxDbCountSingleHost = ($rawdata | Group-Object ServerName | Sort-Object Count -Descending| Select-Object Name, Count -first 1) 42 | if($MaxDbCountSingleHost.Count -gt 500){ 43 | $return.Add("Max DB count for a single host [$($MaxDbCountSingleHost.Name)]",$($MaxDbCountSingleHost.Count)) 44 | } 45 | 46 | $BinStart = 0 47 | foreach($bin in $HistogramBins){ 48 | $BinCount = ($rawdata | Where-Object {[int]$_.DBTotalSizeMB/1024 -gt $BinStart -and [int]$_.DBTotalSizeMB/1024 -le $bin} | Measure-Object).Count 49 | $return.Add("Histogram (GBs) :$bin",$BinCount) 50 | $BinStart = $bin 51 | } 52 | 53 | $BinCount = ($rawdata | Where-Object {[int]$_.DBTotalSizeMB/1024 -gt $BinStart} | Measure-Object).Count 54 | $return.Add("Histogram:More",$BinCount) 55 | 56 | return $return | Format-Table -AutoSize -------------------------------------------------------------------------------- /MSSQL/CollectSQLProfile.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | MSSQL Database Sizing Scripts for Rubrik 4 | 5 | .EXAMPLE 6 | To run the script use the below command with the SQLInstance parameter. Then provide that parameter with a comma separated list of SQL Servers. The script will use Windows Authentication to collect data. 7 | PS C:\> .\CollectSQLProfile.ps1 -SQLInstance SQL1, SQL2, SQL3, SQL4\Instance1 8 | 9 | .EXAMPLE 10 | If you need to use SQL Authentication instead of Windows Authentication, then include the Credential parameter and provide it with a user name. You will be prompted for a password. 11 | PS C:\> .\CollectSQLProfile.ps1 -SQLInstance SQL1, SQL2, SQL3, SQL4\Instance1 -Credential sa 12 | 13 | .EXAMPLE 14 | Instead of giving a comma separated list of sql servers, you can use the InstancesFile parameter. Provide a file that contains a list of sql server instances. Each instance should be on a separate line. 15 | PS C:\> .\CollectSQLProfile.ps1 -InstancesFile SQLInstances.txt 16 | .NOTES 17 | Name: MSSQL Database Sizing Scripts for Rubrik 18 | Author: Mike Fal, Chris Lumnah 19 | #> 20 | #requires -Modules SqlServer 21 | [cmdletbinding()] 22 | param( 23 | [Parameter(ParameterSetName='List Of Instances')] 24 | [string[]] $SQLInstance, 25 | [Parameter(ParameterSetName='File Of Instances')] 26 | [String] $InstancesFile, 27 | [string] $OutPath = '.\', 28 | [string] $QueryPath = '.\', 29 | [Switch] $Anonymize, 30 | [ValidateNotNull()] 31 | [System.Management.Automation.PSCredential] 32 | [System.Management.Automation.Credential()] 33 | $Credential = [System.Management.Automation.PSCredential]::Empty 34 | ) 35 | BEGIN{ 36 | Import-Module SqlServer 37 | if(Get-Module -ListAvailable SqlServer){Import-Module SqlServer} 38 | $queries = Get-ChildItem $QueryPath -Filter "*.sql" 39 | $queries | ForEach-Object {$_ | Add-Member -MemberType NoteProperty -Name FileName -Value "$($_.Name.Replace('.sql',''))-$(Get-Date -Format 'yyyyMMddHHmm').csv"} 40 | $header = $true 41 | if (![string]::IsNullOrEmpty($InstancesFile)){ 42 | if (Test-Path $InstancesFile){ 43 | $SQLInstance = Get-Content -Path $InstancesFile 44 | } 45 | } 46 | } 47 | PROCESS{ 48 | foreach($i in $SQLInstance){ 49 | # $svr = new-object "Microsoft.SqlServer.Management.Smo.Server" $i; 50 | if (![string]::IsNullOrEmpty($Credential.UserName)){ 51 | $TestSQLConnection = Get-SQLInstance -ServerInstance $i -Credential $Credential -TrustServerCertificate -ErrorAction SilentlyContinue 52 | }else{ 53 | $TestSQLConnection = Get-SQLInstance -ServerInstance $i -TrustServerCertificate -ErrorAction SilentlyContinue 54 | } 55 | 56 | if ([string]::IsNullOrEmpty($TestSQLConnection.DisplayName)){ 57 | Write-Warning "!!!!!!! Can not connect to the SQL Service on: $i !!!!!!!" 58 | $i | Out-File -FilePath (Join-Path -Path $OutPath -ChildPath "SizingQuery-ServerWeCouldNotConnectTo.txt") -Append 59 | continue 60 | } 61 | 62 | if($Anonymize){ 63 | $serverid = [guid]::NewGuid() 64 | } 65 | 66 | foreach($q in $queries){ 67 | $sql = (Get-Content $q) -join "`n" 68 | if($Anonymize){$sql = $sql.Replace("@@SERVERNAME","'$serverid'")} 69 | $OutFile = Join-Path -Path $OutPath -ChildPath $q.filename 70 | 71 | Write-Verbose "Collecting data from $i" 72 | $output = Invoke-SqlCmd -ServerInstance "$i" -Database master -Query "$sql" -Credential $Credential -TrustServerCertificate 73 | 74 | if($header -eq $true){ 75 | $output | ConvertTo-Csv -Delimiter '|' -NoTypeInformation | Out-File $OutFile -Append 76 | } 77 | else{ 78 | $output | ConvertTo-Csv -Delimiter '|' -NoTypeInformation | Select-Object -skip 1 |Out-File $OutFile -Append 79 | } 80 | $output = "" 81 | } 82 | $header = $false 83 | } 84 | } 85 | END{} -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/azure_backup_vault_VM_items-example.csv: -------------------------------------------------------------------------------- 1 | "Tenant","Subscription","Region","ResourceGroup","VirtualMachineId","HealthStatus","IsInclusionList","DiskLunList","ProtectionStatus","PolicyId","ProtectionState","LastBackupStatus","LastBackupTime","ProtectionPolicyName","ExtendedInfo","DateOfPurge","DeleteState","Name","Id","LatestRecoveryPoint","SourceResourceId","WorkloadType","ContainerName","ContainerType","BackupManagementType" 2 | "Tenant-1","subscription-1","westus","ResourceGroup-1","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.Compute/virtualMachines/vm-1","Passed",,,"Healthy","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/DailyPolicy","Protected","Completed","7/22/2018 6:08:31AM","DailyPolicy",,,"NotDeleted","VM;iaasvmcontainerv2;vault-1;vm-1","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupFabrics/Azure/protectionContainers/IaasVMContainer;iaasvmcontainerv2;vault-1;vm-1/protectedItems/VM;iaasvmcontainerv2;vault-1;vm-1","7/22/2018 6:08:36AM","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.Compute/virtualMachines/vm-1","AzureVM","iaasvmcontainerv2;vault-1;vm-1","AzureVM","AzureVM" 3 | "Tenant-1","subscription-1","westus","ResoruceGroup-1","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.Compute/virtualMachines/vm-2","Passed",,,"Healthy","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupPolicies/DefaultPolicy","Protected","Completed","5/16/2017 11:10:29AM","DefaultPolicy",,,"NotDeleted","VM;iaasvmcontainerv2;vault-1;vm-2","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.RecoveryServices/vaults/vault-1/backupFabrics/Azure/protectionContainers/IaasVMContainer;iaasvmcontainerv2;vault-1;vm-2/protectedItems/VM;iaasvmcontainerv2;vault-1;vm-2","5/16/2017 11:10:34AM","/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/ResourceGroup-1/providers/Microsoft.Compute/virtualMachines/vm-2","AzureVM","iaasvmcontainerv2;vault-1;vm-2","AzureVM","AzureVM" 4 | "Tenant-2","subscription-2","westus2","ResoruceGroup-2","/subscriptions/12345678-1234-1234-1234-12345678abcd/resourceGroups/ResourceGroup-2/providers/Microsoft.Compute/virtualMachines/vm-3","Passed",,,"Healthy","/subscriptions/12345678-1234-1234-1234-12345678abcd/resourceGroups/ResourceGroup-2/providers/Microsoft.RecoveryServices/vaults/vault-3/backupPolicies/tco-scenario-1","Protected","Completed","12/19/2023 6:41:49AM","tco-scenario-1",,,"NotDeleted","VM;iaasvmcontainerv2;vault-2;vm-3","/subscriptions/12345678-1234-1234-1234-12345678abcd/resourceGroups/ResourceGroup-2/providers/Microsoft.RecoveryServices/vaults/vault-3/backupFabrics/Azure/protectionContainers/IaasVMContainer;iaasvmcontainerv2;vault-2;vm-3/protectedItems/VM;iaasvmcontainerv2;vault-2;vm-3","12/20/2023 6:35:00AM","/subscriptions/12345678-1234-1234-1234-12345678abcd/resourceGroups/ResourceGroup-2/providers/Microsoft.Compute/virtualMachines/vm-3","AzureVM","iaasvmcontainerv2;vault-2;vm-3","AzureVM","AzureVM" 5 | "Tenant-2","subscription-3","westus2","ResourceGroup-3","/subscriptions/12345678-1234-1234-1234-12345678ef12/resourceGroups/ResourceGroup-3/providers/Microsoft.Compute/virtualMachines/vm-4","Passed",,,"Healthy","/subscriptions/12345678-1234-1234-1234-12345678ef12/resourceGroups/ResourceGroup-3/providers/Microsoft.RecoveryServices/vaults/vault-4/backupPolicies/tco-scenario-2","Protected","Completed","12/20/2023 3:33:32AM","tco-scenario-2",,,"NotDeleted","VM;iaasvmcontainerv2;vault-3;vm-4","/subscriptions/12345678-1234-1234-1234-12345678ef12/resourceGroups/ResourceGroup-3/providers/Microsoft.RecoveryServices/vaults/vault-4/backupFabrics/Azure/protectionContainers/IaasVMContainer;iaasvmcontainerv2;vault-3;vm-4/protectedItems/VM;iaasvmcontainerv2;vault-3;vm-4","12/20/2023 3:33:37AM","/subscriptions/12345678-1234-1234-1234-12345678ef12/resourceGroups/ResourceGroup-3/providers/Microsoft.Compute/virtualMachines/vm-4","AzureVM","iaasvmcontainerv2;vault-3;vm-4","AzureVM","AzureVM" -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws_backup_costs-example.csv: -------------------------------------------------------------------------------- 1 | "AwsAccountId","AwsAccountAlias","Time-Period-Start","Time-Period-End","AWSBackupAmortizedCost","AWSBackupBlendedCost","AWSBackupNetAmortizedCost","AWSBackupNetUnblendedCost","AWSBackupNormalizedUsageAmount","AWSBackupUnblendedCost","AWSBackupUsageQuantity" 2 | "AwsAccountId-00000","AwsAccountAlias-00000","2023-10-01","2023-11-01","$1847.87","$1847.87","$1847.87","$1847.87","0","$1847.87","36957.372" 3 | "AwsAccountId-00000","AwsAccountAlias-00000","2023-11-01","2023-12-01","$1848.03","$1848.03","$1848.03","$1848.03","0","$1848.03","36960.552" 4 | "AwsAccountId-00000","AwsAccountAlias-00000","2023-12-01","2024-01-01","$1847.93","$1847.93","$1847.93","$1847.93","0","$1847.93","36958.547" 5 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-01-01","2024-02-01","$1847.29","$1847.29","$1847.29","$1847.29","0","$1847.29","36945.859" 6 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-02-01","2024-03-01","$1847.59","$1847.59","$1847.59","$1847.59","0","$1847.59","36951.705" 7 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-03-01","2024-04-01","$2023.92","$2023.92","$2023.92","$2023.92","0","$2023.92","40910.509" 8 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-04-01","2024-05-01","$2982.29","$2982.29","$2982.29","$2982.29","0","$2982.29","59645.827" 9 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-05-01","2024-06-01","$1847.59","$1847.59","$1847.59","$1847.59","0","$1847.59","36951.806" 10 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-06-01","2024-07-01","$1846.76","$1846.76","$1846.76","$1846.76","0","$1846.76","36935.145" 11 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-07-01","2024-08-01","$1847.42","$1847.42","$1847.42","$1847.42","0","$1847.42","36948.338" 12 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-08-01","2024-09-01","$1847.21","$1847.21","$1847.21","$1847.21","0","$1847.21","36944.18" 13 | "AwsAccountId-00000","AwsAccountAlias-00000","2024-09-01","2024-10-01","$1847.95","$1847.95","$1847.95","$1847.95","0","$1847.95","36958.979" 14 | "AwsAccountId-00001","AwsAccountAlias-00001","2023-10-01","2023-11-01","$0","$0","$0","$0","0","$0","0" 15 | "AwsAccountId-00001","AwsAccountAlias-00001","2023-11-01","2023-12-01","$0","$0","$0","$0","0","$0","0" 16 | "AwsAccountId-00001","AwsAccountAlias-00001","2023-12-01","2024-01-01","$0","$0","$0","$0","0","$0","0" 17 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-01-01","2024-02-01","$0","$0","$0","$0","0","$0","0" 18 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-02-01","2024-03-01","$0","$0","$0","$0","0","$0","0" 19 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-03-01","2024-04-01","$0","$0","$0","$0","0","$0","0" 20 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-04-01","2024-05-01","$0","$0","$0","$0","0","$0","0" 21 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-05-01","2024-06-01","$0","$0","$0","$0","0","$0","0" 22 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-06-01","2024-07-01","$0","$0","$0","$0","0","$0","0" 23 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-07-01","2024-08-01","$0","$0","$0","$0","0","$0","0" 24 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-08-01","2024-09-01","$0","$0","$0","$0","0","$0","0" 25 | "AwsAccountId-00001","AwsAccountAlias-00001","2024-09-01","2024-10-01","$0","$0","$0","$0","0","$0","0" 26 | "AwsAccountId-00002","AwsAccountAlias-00002","2023-10-01","2023-11-01","$0","$0","$0","$0","0","$0","0" 27 | "AwsAccountId-00002","AwsAccountAlias-00002","2023-11-01","2023-12-01","$0","$0","$0","$0","0","$0","0" 28 | "AwsAccountId-00002","AwsAccountAlias-00002","2023-12-01","2024-01-01","$0","$0","$0","$0","0","$0","0" 29 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-01-01","2024-02-01","$0","$0","$0","$0","0","$0","0" 30 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-02-01","2024-03-01","$0","$0","$0","$0","0","$0","0" 31 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-03-01","2024-04-01","$0","$0","$0","$0","0","$0","0" 32 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-04-01","2024-05-01","$0","$0","$0","$0","0","$0","0" 33 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-05-01","2024-06-01","$0","$0","$0","$0","0","$0","0" 34 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-06-01","2024-07-01","$0","$0","$0","$0","0","$0","0" 35 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-07-01","2024-08-01","$0","$0","$0","$0","0","$0","0" 36 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-08-01","2024-09-01","$0","$0","$0","$0","0","$0","0" 37 | "AwsAccountId-00002","AwsAccountAlias-00002","2024-09-01","2024-10-01","$0","$0","$0","$0","0","$0","0" 38 | -------------------------------------------------------------------------------- /M365/README.md: -------------------------------------------------------------------------------- 1 | ## Requirements 2 | 3 | * `PowerShell >= 5.1` for PowerShell Gallery. 4 | * Microsoft user permissions to run this script: Global Reader and Reports Reader 5 | 6 | * There are two ways users can authenticate to Exchange Online: 7 | * 1. App access: 8 | * Follow https://learn.microsoft.com/en-us/powershell/exchange/app-only-auth-powershell-v2?view=exchange-ps to create an app registration to be used for this script. 9 | * The API permissions required are: 'Reports.Read.All', 'User.Read.All', and 'Group.Read.All' from Microsoft Graph and `Exchange.ManageAsApp` from Office 365 Exchange Online. 10 | * To run this script successfully, you will need: 11 | a. Tenant ID. 12 | b. Client ID (App ID) of the app created above. 13 | c. Client Secret created on the app above. 14 | 15 | * 2. User access: 16 | * Login through the admin user account when prompted on the browser. 17 | 18 | 19 | ## Installation 20 | 21 | 1. Download the [Get-RubrikM365SizingInfo.ps1](https://github.com/rubrikinc/microsoft-365-sizing/archive/refs/heads/main.zip) PowerShell script to your local machine 22 | 2. Install the `Microsoft.Graph.Reports` and `ExchangeOnlineManagement` modules from the PowerShell Gallery 23 | 24 | ```powershell 25 | Install-Module Microsoft.Graph.Reports, Microsoft.Graph.Groups, ExchangeOnlineManagement 26 | ``` 27 | 28 | ## Usage 29 | 30 | 1. Open a PowerShell terminal and navigate to the folder/directory where you previously downloaded the [Get-RubrikM365SizingInfo.ps1](https://github.com/rubrikinc/microsoft-365-sizing/blob/main/Get-RubrikM365SizingInfo.ps1) file. 31 | 32 | 2. Run the script. 33 | 34 | ``` 35 | ./Get-RubrikM365SizingInfo.ps1 36 | ``` 37 | 38 | > NOTE - If you receive a PowerShell execution policy error message you can run the following command: 39 | 40 | ```powershell 41 | Set-ExecutionPolicy -Scope Process -ExecutionPolicy Bypass 42 | ``` 43 | 44 | 3. Authenticate and acknowledge report access permissions in the browser window/tab that appears. This will occur twice during the script execution. 45 | 46 | > Note: There is a known issue with the Microsoft authentication process that may result in an error message during the initial authentication process. If this occurs, re-run the script and the error will no longer show. 47 | 48 | 4. The script will run and the results will be written to a html file in the directory in which it was run. 49 | 50 | ``` 51 | .\RubrikMS365Sizing.html 52 | ``` 53 | 54 | ## Options 55 | 56 | If you want to run the script with app access, use the following: 57 | ``` 58 | ./Get-RubrikM365SizingInfo.ps1 -UseAppAccess $true 59 | ``` 60 | 61 | If you want to run the script against a single AD Group, use the following: 62 | ``` 63 | ./Get-RubrikM365SizingInfo.ps1 -ADGroup "RubrikEmployees" 64 | ``` 65 | 66 | The script will try to gather In Place Archive sizes for each mailbox. However, to do so, the script needs to query each mailbox user's information which can timeout for larger environments. If that's the case, you can skip gathering In Place Archives with the following: 67 | ``` 68 | ./Get-RubrikM365SizingInfo.ps1 -SkipArchiveMailbox $true 69 | ``` 70 | 71 | The script will try to gather stats for the Recoverable Items folder. This can also take awhile and timeout in larger environments. You can skip this by using: 72 | ``` 73 | ./Get-RubrikM365SizingInfo.ps1 -SkipRecoverableItems $true 74 | ``` 75 | 76 | The script will calculate annual growth rates for 10%, 20%, and 30% annual growth rates. You can change the 30% to a custom value such as 40% by using the following flag: 77 | ``` 78 | ./Get-RubrikM365SizingInfo.ps1 -AnnualGrowth 40 79 | ``` 80 | 81 | 82 | 83 | ## What information does the script access? 84 | 85 | The majority of the information collected is directly from the Microsoft 365 [Usage reports](https://docs.microsoft.com/en-us/microsoft-365/admin/activity-reports/activity-reports?view=o365-worldwide) that are found in the admin center. 86 | 87 | 88 | 89 | # Microsoft 365 Sizing PowerShell Script 90 | 91 | 92 | ``` 93 | ./Get-RubrikM365SizingInfo.ps1 94 | [INFO] Starting the Rubrik Microsoft 365 sizing script (v5.0). 95 | [INFO] Connecting to the Microsoft Graph API using 'Reports.Read.All', 'User.Read.All', and 'Group.Read.All' permissions. 96 | [INFO] Retrieving usage info for ... 97 | - Exchange 98 | - Usage report for Exchange output to: .\getMailboxUsageDetail.csv 99 | [INFO] Retrieving usage info for ... 100 | - OneDrive 101 | - Usage report for OneDrive output to: .\getOneDriveUsageAccountDetail.csv 102 | [INFO] Retrieving usage info for ... 103 | - SharePoint 104 | - Usage report for SharePoint output to: .\getSharePointSiteUsageDetail.csv 105 | [INFO] Retrieving historical usage reports 106 | [INFO] Current usage data and historical reports may differ pending deletions 107 | [INFO] OneDrive usage: 108 | - Current usage (calculated with per-user stats): 1043.92 GB 109 | - Usage on 2024-03-05: 90.76 GB 110 | - Usage on 2023-09-08: 976.86 GB 111 | - Growth over 180 days: 67.06 GB 112 | - Growth annualized per year: 135.98 GB, 13% 113 | [INFO] SharePoint usage: 114 | - Current usage (calculated with per-user stats): 30.06 GB 115 | - Usage on 2024-03-05: 30.06 GB 116 | - Usage on 2023-09-08: 21.27 GB 117 | - Growth over 180 days: 8.79 GB 118 | - Growth annualized per year: 17.82 GB, 59% 119 | [INFO] Exchange usage: 120 | - Current usage (calculated with per-user stats): 0.81 GB 121 | - Usage on 2024-03-05: 0.32 GB 122 | - Usage on 2023-09-08: 1.57 GB 123 | - Growth over 180 days: -0.76 GB 124 | - Growth annualized per year: -1.54 GB, -190% 125 | [NOTE] If the growth looks odd, try using a different period (parameter: -Period 7, 30, 90, 180) days 126 | [INFO] Calculating the forecasted total storage need for Rubrik. 127 | [INFO] Disconnecting from the Microsoft Graph API. 128 | Now gathering In Place Archive usage 129 | This may take awhile since stats need to be gathered per user 130 | Progress will be written as they are gathered 131 | [INFO] Switching to the Microsoft Exchange Online Module for more detailed reporting capabilities. 132 | [INFO] Retrieving all Exchange Mailbox In-Place Archive sizing 133 | [INFO] Found 4 mailboxes with In Place Archives 134 | [0 / 4] Processing mailboxes ... 135 | [INFO] Finished gathering stats on mailboxes with In Place Archive 136 | [INFO] Total # of mailboxes with In Place Archive: 4 137 | [INFO] Total size of mailboxes with In Place Archive: 0.01 GB 138 | [INFO] Total # of items of mailboxes with In Place Archive: 74 139 | [INFO] Disconnecting from the Microsoft Exchange Online Module 140 | 141 | M365 Sizing information has been written to /home/Rubrik-M365-Sizing-2024-03-07.html 142 | 143 | ``` 144 | 145 | 146 | ## Example Output 147 | 148 | ![image](https://user-images.githubusercontent.com/51362633/190453033-94379a84-8678-4592-9d9b-2b1dad96a521.png) 149 | 150 | 151 | 152 | 153 | -------------------------------------------------------------------------------- /MSSQL/RubrikSQLProfile-DBInfo.sql: -------------------------------------------------------------------------------- 1 | /********************************************************** 2 | Rubrik SQL Server profile queries. 3 | DBInfo 4 | *********************************************************/ 5 | 6 | /********************************************************** 7 | Get what Enterprise Features are enabled 8 | *********************************************************/ 9 | IF OBJECT_ID('tempdb.dbo.##enterprise_features') IS NOT NULL 10 | DROP TABLE ##enterprise_features 11 | 12 | CREATE TABLE ##enterprise_features( 13 | ServerName SYSNAME, 14 | dbid SYSNAME, 15 | dbname SYSNAME, 16 | feature_name VARCHAR(100), 17 | feature_id INT 18 | ) 19 | EXEC sp_MSforeachdb 20 | N' USE [?] 21 | -- IF (SELECT COUNT(*) FROM sys.dm_db_persisted_sku_features) > 0 22 | -- BEGIN 23 | INSERT INTO ##enterprise_features 24 | SELECT @@SERVERNAME, dbid=DB_ID(), dbname=DB_NAME(),feature_name,feature_id 25 | FROM sys.dm_db_persisted_sku_features 26 | -- END '; 27 | 28 | /********************************************************** 29 | Get how many database files each database has 30 | *********************************************************/ 31 | IF OBJECT_ID('tempdb.dbo.##database_files') IS NOT NULL 32 | DROP TABLE ##database_files 33 | 34 | CREATE TABLE ##database_files( 35 | ServerName SYSNAME, 36 | database_id SYSNAME, 37 | DatabaseName NVARCHAR(50), 38 | NumberOfFiles INT 39 | ) 40 | INSERT INTO ##database_files 41 | SELECT @@servername AS ServerName, 42 | database_id, 43 | DB_NAME(database_id) as DatabaseName, 44 | COUNT(database_id) as NumberOfFiles 45 | FROM sys.master_files 46 | GROUP BY database_id; 47 | /********************************************************** 48 | Get Availability Info 49 | *********************************************************/ 50 | IF OBJECT_ID('tempdb.dbo.##AG_Info') IS NOT NULL 51 | DROP TABLE ##AG_Info 52 | 53 | CREATE TABLE ##AG_Info( 54 | ServerName SYSNAME, 55 | DatabaseName NVARCHAR(50), 56 | AG_Name NVARCHAR(50), 57 | ) 58 | IF (SELECT cast(left(cast(serverproperty('productversion') AS VARCHAR), 4) AS DECIMAL(5, 1))) >= 11 59 | BEGIN 60 | INSERT INTO ##AG_Info 61 | SELECT @@servername AS ServerName 62 | , db_name(drs.database_id) AS dbname 63 | , ag_name 64 | FROM sys.dm_hadr_database_replica_states drs 65 | JOIN sys.dm_hadr_name_id_map map 66 | ON drs.group_id = map.ag_id 67 | END 68 | ELSE 69 | BEGIN 70 | INSERT INTO ##AG_Info 71 | SELECT @@servername AS ServerName 72 | , null as dbname 73 | , null as ag_name 74 | END; 75 | /********************************************************** 76 | Create the output for the server 77 | *********************************************************/ 78 | 79 | WITH LogBackupInfo 80 | AS 81 | ( 82 | SELECT database_name 83 | ,AVG(DATEDIFF(ss,backup_start_date,backup_finish_date)/1.0) AS AverageLogBackupTime 84 | ,SUM(backup_size/1024.0/1024.0) AS LogBackupTotalMB 85 | FROM msdb.dbo.backupset 86 | WHERE type = 'L' 87 | AND backup_finish_date > DATEADD(dd,-7,GETDATE()) 88 | GROUP BY database_name 89 | ), 90 | FullBackupInfo 91 | AS 92 | ( 93 | SELECT database_name 94 | ,AVG(backup_size/1024.0/1024.0) AS AverageBackupSizeMB 95 | ,AVG(DATEDIFF(ss,backup_start_date,backup_finish_date)/1.0) AS AverageBackupTime 96 | FROM msdb.dbo.backupset 97 | WHERE type = 'D' 98 | GROUP BY database_name 99 | ), 100 | LogBackupInterval 101 | AS 102 | ( 103 | SELECT a.database_name, a.backup_start_date, ISNULL( b.PrevBkpDate, a.backup_start_date ) PreviousBackupStartDate, DATEDIFF(mi,ISNULL( b.PrevBkpDate, a.backup_start_date ), a.backup_start_date) BackupInterval 104 | FROM msdb.dbo.backupset a 105 | OUTER APPLY ( SELECT TOP 1 backup_start_date AS PrevBkpDate 106 | FROM msdb.dbo.backupset bb WHERE bb.database_guid = a.database_guid 107 | AND bb.type = a.type AND bb.backup_start_date < a.backup_start_date and bb.backup_start_date > DATEADD(dd,-7,GETDATE()) ORDER BY bb.backup_start_date DESC) b 108 | WHERE type = 'L' 109 | AND backup_start_date > DATEADD(dd,-7,GETDATE()) 110 | ), 111 | EnterpriseFeatures 112 | AS 113 | ( 114 | SELECT dbname AS 'DatabaseName' 115 | ,[ChangeCapture] 116 | ,[ColumnStoreIndex] 117 | ,[Compression] 118 | ,[MultipleFSContainers] 119 | ,[InMemoryOLTP] 120 | ,[Partitioning] 121 | ,[TransparentDatabaseEncryption] 122 | FROM 123 | (SELECT dbname, feature_name FROM ##enterprise_features) e 124 | PIVOT 125 | ( COUNT(feature_name) FOR feature_name IN ([ChangeCapture], [ColumnStoreIndex], [Compression], [MultipleFSContainers],[InMemoryOLTP],[Partitioning],[TransparentDatabaseEncryption]) ) 126 | AS PVT 127 | ), 128 | DBInfo 129 | AS 130 | ( 131 | select 132 | db.name 133 | ,convert(bigint,sum(mf.size/128.0)) DBTotalSizeMB 134 | 135 | FROM sys.databases db 136 | JOIN sys.master_files mf ON db.database_id = mf.database_id 137 | group by db.name 138 | ), 139 | DBFiles 140 | AS 141 | ( 142 | SELECT * FROM ##database_files 143 | ), 144 | AGInfo 145 | AS( 146 | SELECT * FROM ##AG_Info 147 | ) 148 | 149 | SELECT 150 | @@SERVERNAME AS ServerName 151 | ,SERVERPROPERTY('ProductVersion') AS SQLVersion 152 | ,db.name 153 | ,db.recovery_model_desc 154 | ,ISNULL(lbi.LogBackupTotalMB,0) AS SevenDayLogBackupMB 155 | ,ISNULL(fbi.AverageBackupSizeMB,0) AS AverageFullMB 156 | ,ISNULL(fbi.AverageBackupTime,0) AS AverageFullTimeSec 157 | ,ISNULL(lbi.AverageLogBackupTime,0) AS AverageLogTimeSec 158 | ,DBInfo.DBTotalSizeMB 159 | ,AVG(lbii.BackupInterval) AS AverageLogBackupInterval 160 | ,ISNULL(ef.ChangeCapture,0) AS ChangeCapture 161 | ,ISNULL(ef.ColumnStoreIndex,0) AS ColumnStoreIndex 162 | ,ISNULL(ef.[Compression],0) AS Compression 163 | ,ISNULL(ef.[MultipleFSContainers],0) AS FILESTREAM 164 | ,ISNULL(ef.[InMemoryOLTP], 0) AS InMemoryOLTP 165 | ,ISNULL(ef.[Partitioning],0) AS Partitioning 166 | ,ISNULL(ef.[TransparentDatabaseEncryption],0) as TransparentDatabaseEncryption 167 | ,DBFiles.NumberOfFiles 168 | ,AG_Name 169 | FROM sys.databases db 170 | JOIN DBInfo ON db.name = DBInfo.name 171 | LEFT OUTER JOIN LogBackupInfo lbi ON db.name = lbi.database_name 172 | LEFT OUTER JOIN FullBackupInfo fbi ON db.name = fbi.database_name 173 | LEFT OUTER JOIN LogBackupInterval lbii ON db.name = lbii.database_name 174 | LEFT OUTER JOIN EnterpriseFeatures ef ON db.name = ef.DatabaseName 175 | LEFT OUTER JOIN AGInfo agi on db.name = agi.DatabaseName 176 | JOIN DBFiles ON db.name = DBFiles.DatabaseName 177 | WHERE db.database_id != 2 178 | GROUP BY db.name 179 | ,db.recovery_model_desc 180 | ,DBInfo.DBTotalSizeMB 181 | ,ISNULL(lbi.LogBackupTotalMB,0) 182 | ,ISNULL(fbi.AverageBackupSizeMB,0) 183 | ,ISNULL(fbi.AverageBackupTime,0) 184 | ,ISNULL(lbi.AverageLogBackupTime,0) 185 | ,ISNULL(ef.ChangeCapture,0) 186 | ,ISNULL(ef.[ColumnStoreIndex],0) 187 | ,ISNULL(ef.[Compression],0) 188 | ,ISNULL(ef.[MultipleFSContainers],0) 189 | ,ISNULL(ef.[InMemoryOLTP],0) 190 | ,ISNULL(ef.[Partitioning],0) 191 | ,ISNULL(ef.[TransparentDatabaseEncryption],0) 192 | ,DBFiles.NumberOfFiles 193 | ,AG_Name 194 | ORDER BY name 195 | -------------------------------------------------------------------------------- /IDENTITY/README.md: -------------------------------------------------------------------------------- 1 | # Rubrik Identity Auditing Scripts for Licensing 2 | 3 | ## Overview 4 | 5 | This repository contains a suite of PowerShell scripts designed specifically for Rubrik's customers to audit their user identities for licensing purposes. The scripts connect to both on-premises Active Directory (AD) and Microsoft Entra ID (formerly Azure Active Directory) to count the number of unique human identities. This data is then used to ensure fair and accurate licensing of Rubrik's products. 6 | 7 | The primary goal of these scripts is to distinguish between human users and non-human accounts (e.g., service accounts, applications) to avoid over-licensing. The scripts generate detailed reports that can be shared with Rubrik. 8 | 9 | ## Scripts 10 | 11 | This repository contains two main scripts: 12 | 13 | 1. **`Get-AdHumanIdentity.ps1`**: For auditing on-premises Active Directory. 14 | 2. **`Get-EntraHumanIdentity.ps1`**: For auditing Microsoft Entra ID. 15 | 16 | --- 17 | 18 | ## `Get-AdHumanIdentity.ps1` 19 | 20 | This script is a Rubrik utility for counting human identities in a customer's Active Directory (AD) environment. The data collected is used for licensing Rubrik's products. The script identifies and categorizes all user and service accounts to determine the number of unique human users. 21 | 22 | ### Features 23 | 24 | - **Multi-Domain Auditing**: Scans a single domain, a list of specified domains, or all domains in the current forest. 25 | - **Account Classification**: Categorizes user accounts based on their activity status to help differentiate between human and non-human accounts: 26 | - **Active**: Users who have logged in within the last 180 days. 27 | - **Inactive**: Users who have not logged in for more than 180 days. 28 | - **Never Logged In**: Accounts that have never recorded a logon event. 29 | - **Service Account Detection**: Identifies different types of non-human accounts to exclude them from the human identity count: 30 | - Managed Service Accounts (MSA) 31 | - Group Managed Service Accounts (gMSA) 32 | - Accounts with the `PasswordNeverExpires` flag set. 33 | - Accounts matching custom naming patterns (e.g., `*svc*`, `*_bot`). 34 | - **Flexible Reporting**: Generates reports in two modes: 35 | - `UserPerOU`: A detailed breakdown of accounts per Organizational Unit (OU). 36 | - `Summary`: A high-level summary of accounts per domain. 37 | - **CSV Export**: Automatically exports the audit results to a timestamped CSV file that can be shared with Rubrik. 38 | - **Logging**: Creates a detailed log file for each execution, capturing all actions and potential errors. 39 | 40 | ### Prerequisites 41 | 42 | - **PowerShell Version**: 5.1 or later. 43 | - **Active Directory Module**: The `ActiveDirectory` PowerShell module must be installed. This is typically included with the Remote Server Administration Tools (RSAT) for Active Directory Domain Services. 44 | - **Permissions**: The user running the script must have sufficient permissions to read user and service account information from the target Active Directory domains. 45 | 46 | ### Parameters 47 | 48 | | Parameter | Description | Required | Default Value | 49 | | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------| -------- | ------------- | 50 | | `SpecificDomains` | An array of fully qualified domain names to audit (e.g., `"corp.domain.local"`). If omitted, all domains in the forest are audited. | No | (All domains in the forest) | 51 | | `UserServiceAccountNamesLike` | An array of wildcard patterns to identify service accounts by name (e.g., `"*svc*"`, `"*_bot*"`). | No | (None) | 52 | | `Mode` | The reporting mode. Can be `UserPerOU` for a detailed report or `Summary` for a domain-level summary. | Yes | `UserPerOU` | 53 | 54 | ### Usage Examples 55 | 56 | **Example 1: Audit all domains in the forest with a detailed per-OU report.** 57 | 58 | ```powershell 59 | .\Get-AdHumanIdentity.ps1 -Mode UserPerOU 60 | ``` 61 | 62 | **Example 2: Audit a specific domain and identify service accounts by name, with a summary report.** 63 | 64 | ```powershell 65 | .\Get-AdHumanIdentity.ps1 -SpecificDomains "corp.domain.local" -UserServiceAccountNamesLike "*svc*", "*_bot*" -Mode Summary 66 | ``` 67 | 68 | ### Output 69 | 70 | - **CSV Report**: A CSV file named `UserAudit__.csv` is created in the `ADReports` directory. 71 | - **Log File**: A log file named `AD_Audit_.log` is created in the `ADReports` directory. 72 | 73 | --- 74 | 75 | ## `Get-EntraHumanIdentity.ps1` 76 | 77 | This script is a Rubrik utility for counting human identities in a customer's Entra ID tenant. The data collected is used for licensing Rubrik's products. The script connects to the Microsoft Graph API to identify and categorize all user accounts, service principals, and applications to determine the number of unique human users. 78 | 79 | ### Features 80 | 81 | - **Microsoft Graph Integration**: Connects to Microsoft Graph with the necessary permissions to read identity data. 82 | - **User Activity Analysis**: Identifies inactive users based on a configurable number of days of inactivity to help differentiate between active and dormant human users. 83 | - **Service Account Identification**: Flags potential service accounts based on naming patterns in their User Principal Name (UPN) to exclude them from the human identity count. 84 | - **Application and Service Principal Ownership**: Optionally performs a deep scan to count the number of applications and service principals owned by each user, which helps in distinguishing human from non-human accounts. 85 | - **Comprehensive Reporting**: Generates reports in two modes: 86 | - `Full`: A detailed per-user report. 87 | - `Summary`: An aggregated report by domain. 88 | - **Multiple Export Formats**: Exports reports in both CSV and a user-friendly HTML format, which can be shared with Rubrik. 89 | - **Automated Module Installation**: Checks for and installs the required Microsoft Graph PowerShell modules if they are not already present. 90 | - **Logging**: Creates a detailed log file for each execution. 91 | 92 | ### Prerequisites 93 | 94 | - **PowerShell Version**: 7.0 or later. 95 | - **Microsoft Graph Modules**: The script requires the following PowerShell modules: 96 | - `Microsoft.Graph.Users` 97 | - `Microsoft.Graph.Applications` 98 | - `Microsoft.Graph.Identity.DirectoryManagement` 99 | The script will attempt to install these modules automatically if they are missing. 100 | - **Permissions**: The user running the script must have permissions to grant consent for the required Microsoft Graph API scopes (`User.Read.All`, `Directory.Read.All`, `Application.Read.All`, `AuditLog.Read.All`). This may require an administrator account. 101 | 102 | ### Parameters 103 | 104 | | Parameter | Description | Required | Default Value | 105 | | ----------------------------- | ---------------------------------------------------------------------------------------------------------- | -------- | ------------- | 106 | | `UserServiceAccountNamesLike` | An array of patterns to identify service accounts by their UPN (e.g., `"svc-"`, `"sa-"`). | No | (None) | 107 | | `Mode` | The reporting mode. Can be `Full` for a detailed report or `Summary` for an aggregated report. | Yes | `Full` | 108 | | `DaysInactive` | The number of days of inactivity to use when flagging users as inactive. | No | `180` | 109 | | `CheckOwnership` | A switch parameter that, when present, enables the time-consuming check for application and service principal ownership. | No | (Not present) | 110 | 111 | ### Usage Examples 112 | 113 | **Example 1: Generate a full report with ownership analysis, for users inactive for 90 days.** 114 | 115 | ```powershell 116 | .\Get-EntraHumanIdentity.ps1 -Mode Full -DaysInactive 90 -CheckOwnership 117 | ``` 118 | 119 | **Example 2: Generate a summary report, identifying service accounts with "svc-" in their UPN.** 120 | 121 | ```powershell 122 | .\Get-EntraHumanIdentity.ps1 -Mode Summary -UserServiceAccountNamesLike "svc-" 123 | ``` 124 | 125 | ### Output 126 | 127 | - **CSV Reports**: CSV files are created in the `EntraReports` directory (e.g., `Full_ByUser_.csv`, `Full_ByDomain_.csv`). 128 | - **HTML Report**: A single HTML file summarizing the audit is created in the `EntraReports` directory (e.g., `Full_Report_.html`). 129 | - **Log File**: A log file named `EntraAudit_.log` is created in the `EntraReports` directory. 130 | 131 | --- 132 | 133 | ## How to Use 134 | 135 | 1. **Download the scripts:** Download the scripts from this repository to a machine that has access to your identity systems (Active Directory or Entra ID). 136 | 2. **Open a PowerShell terminal.** 137 | 3. **Run the desired script with the appropriate parameters.** Make sure you meet the prerequisites for the script you are running. 138 | 4. **Share the generated reports with your Rubrik representative.** 139 | 140 | ## Contributing 141 | 142 | These scripts are provided by Rubrik for licensing purposes. For support, please contact your Rubrik representative. 143 | 144 | ## License 145 | 146 | This project is licensed under the MIT License. See the `LICENSE` file for details. 147 | -------------------------------------------------------------------------------- /VMWARE/Get-VMwareDiskStats.ps1: -------------------------------------------------------------------------------- 1 | #requires -module VMware.PowerCLI 2 | # https://build.rubrik.com 3 | 4 | <# 5 | .SYNOPSIS 6 | Pulls VMware VM and VMDK write throughput disk stats for Rubrik sizing. 7 | 8 | .DESCRIPTION 9 | The Get-VMwareDiskStats.ps1 script pulls VM and VMDK write throughput disk stats for Rubrik sizing. 10 | Requires PowerCLI and a vCenter read-only user. 11 | 12 | .NOTES 13 | Written by Steven Tong for community usage 14 | GitHub: stevenctong 15 | Date: 10/20/21 16 | 17 | vCenter stats collection should be configured for at least Level 2, duration 5 min, saved for 5 days. 18 | If there is not enough space in vCenter to save it for 5 days then run this script multiple times to collect the data. 19 | Make sure that the data gathering covers the busiest days for the VMs. 20 | To configure stats collection, click on your vCenter -> Configure -> General -> Edit 21 | 22 | $highKBps and $lowKBps are set to highlight any VMDKs that we need to take a closer look at for sizing. 23 | 24 | This script will attempt to gather 5 min stats for a list of VMs. 25 | Three different results files may be created: 26 | 1) VM level stats - for each 5 min time period, contains the sum of all VM write throughput 27 | 2) VMDK level stats - a list of any VMDK that has a write throughput greater than $lowKBps 28 | 3) Raw stats that are gathered - if the # of rows are too large, the output will be split by each day 29 | 30 | By default, the script will gather stats for all VMs listed in vCenter. 31 | You can also pass in a CSV with a list of VMs to gather stats for. 32 | The list of VMs should have a column "Name" with a list of VMs to gather stats for. 33 | 34 | A list of VMs in vCenter can also be generated with this script which can be edited and then passed back in. 35 | 36 | If you are using Windows Powershell (not Core) you can also store vCenter credentials using: 37 | - New-VICredentialStoreItem 38 | 39 | .EXAMPLE 40 | ./Get-VMwareDiskStats.ps1 41 | Prompts for vCenter server & login and gets stats for all VMs. 42 | 43 | ./Get-VMwareDiskStats.ps1 -server 44 | Prompts for vCenter username & password and gets stats on all VMs. 45 | 46 | ./Get-VMwareDiskStats.ps1 -server -username -password 47 | Pass your vCenter credentials as parameters and get stats on all VMs. 48 | 49 | ./Get-VMwareDiskStats.ps1 -outputVMfile 50 | Outputs a list of VMs from vCenter and exits script. Prompts for vCenter info. 51 | 52 | ./Get-VMwareDiskStats.ps1 -importVMfile 53 | Imports a list of VMs to gather stats on. VMs should be under a "Name" column. Prompts for vCenter info. 54 | 55 | #> 56 | 57 | param ( 58 | [CmdletBinding()] 59 | 60 | # vCenter server hostname or IP address 61 | [Parameter(Mandatory=$false)] 62 | [string]$server = '', 63 | 64 | # vCenter username 65 | [Parameter(Mandatory=$false)] 66 | [string]$user = '', 67 | 68 | # vCenter password 69 | [Parameter(Mandatory=$false)] 70 | [string]$password = '', 71 | 72 | # Specify a filename just to output a list of VMs to a CSV file 73 | [Parameter(Mandatory=$false)] 74 | [string]$outputVMfile = '', 75 | 76 | # Specify a filename with a list of VMs to gather stats on using the "Name" column 77 | [Parameter(Mandatory=$false)] 78 | [string]$importVMfile = $null 79 | ) 80 | 81 | Import-Module VMware.PowerCLI 82 | 83 | # Base filename to output the stats to 84 | $vmCSVoutput = "./vm_level_disk_stats-" 85 | $vmdkCSVoutput = "./vmdk_level_disk_stats-" 86 | $rawCSVoutput = "./vm_raw_disk_stats-" 87 | 88 | # List of stats to gather 89 | $statList = @('virtualDisk.write.average') 90 | # Max sustained KBps to highlight for a VMDK 91 | $highKBps = 50000 92 | # Warning sustained KBps to highlight for a VMDK 93 | $lowKBps = 40000 94 | 95 | ### Begin - PowerCLI authentication ### 96 | if (!$server) { $server = Read-Host "vCenter hostname or IP" } 97 | 98 | if (!$user) { 99 | Connect-VIServer -server $server 100 | } else { 101 | Connect-VIServer -server $server -user $user -password $password 102 | } 103 | ### End - PowerCLI authentication 104 | 105 | $date = Get-Date 106 | 107 | # Get a list of VMs, output to the specified CSV file, and exit script 108 | if ($outputVMfile -ne '') 109 | { 110 | Get-VM | Export-Csv -NoTypeInformation -Path $outVMFile 111 | exit 112 | } 113 | 114 | # If a CSV with a list of VMs is specified, import it. 115 | # Otherwise, get a list of all VMs from vCenter to run the stats collection against. 116 | if ($importVMfile -ne '') 117 | { 118 | Write-Host "`nImporting VM list CSV file: $importVMfile`n" -foregroundcolor Green 119 | $vmList = Import-CSV $importVMfile | Sort-Object 120 | } else { 121 | $vmList = Get-VM | Sort-Object 122 | } 123 | 124 | # Initialize arrays to hold the gathered data in 125 | $vmDataArray = @() 126 | $vmdkDataArray = @() 127 | 128 | # For each VM, gather status 129 | foreach ($vm in $vmList) 130 | { 131 | Write-Host "Getting stats for VM: $($vm.Name)" 132 | # Loop through each stat we want to gather 133 | foreach ($stat in $statList) 134 | { 135 | $data = Get-Stat -entity $vm.Name -stat $stat -interval 5 136 | 137 | # If there is no "Instance" (disk) value then add it to the VM level array 138 | $vmDataArray += $data | Where Instance -eq '' 139 | 140 | # If there is "Instance" (disk) value then add it to the VMDK level array 141 | $vmdkDataArray += $data | Where Instance -ne '' 142 | } # foreach ($stat in $statList) 143 | } # foreach ($vm in $vmList) 144 | 145 | # Initialize array to hold calculated results in 146 | $vmResults = @() 147 | 148 | # For VM level stats, group everything by Timestamp 149 | $vmGroup = $vmDataArray | Group-Object -Property Timestamp 150 | 151 | # For VM level stats, sum up the values for each Timestamp 152 | foreach ($i in $vmGroup) 153 | { 154 | $KBpsPerTimestamp = [PSCustomObject] @{ 155 | KBps = ($i.group | Measure-Object -Property Value -Sum).Sum 156 | Timestamp = $i.name 157 | MetricId = $i.group[0].MetricId 158 | } 159 | $vmResults += $KBpsPerTimestamp 160 | } # foreach ($i in $vmGroup) 161 | 162 | # For each VMDK level stat, build a list of VMDKs whose Value are greater than $highKBps 163 | $highVMDKs = $vmdkDataArray | Where Value -ge $highKBps | Sort-Object -Property Value -Descending | 164 | Select @{n='KBps'; e={$_.Value}}, @{n='VM'; e={$_.Entity}}, 'Instance', 'Timestamp', 'MetricId' 165 | 166 | # For each VMDK level stat, build a list of VMDKs whose Value are greater than $lowKBps 167 | $lowVMDKs = $vmdkDataArray | Where { $_.value -ge $lowKBps -and $_.value -lt $highKBps } | 168 | Sort-Object -Property Value -Descending | Select @{n='KBps'; e={$_.Value}}, @{n='VM'; e={$_.Entity}}, 'Instance', 'Timestamp', 'MetricId' 169 | 170 | Write-Host "" 171 | Write-Host "These VMDKs had sustained write throughput that crossed the low threshold of $lowKBps KBps:" -foregroundcolor green 172 | $lowVMDKs | Format-Table 173 | 174 | Write-Host "These VMDKs had sustained write throughput that crossed the high threshold of $highKBps KBps:" -foregroundcolor green 175 | $highVMDKs | Format-Table 176 | 177 | Write-Host "Top 5 time time periods that had the highest sustained write throughput:" -foregroundcolor green 178 | $vmResults | Sort-Object KBps -Desc | Select -First 5 | Format-Table 179 | 180 | Write-Host 181 | Write-Host "Total times a VMDK crossed the low threshold of $lowKBps KBps: $($lowVMDKs.count)" 182 | Write-Host "Total times a VMDK crossed the high threshold of $highKBps KBps: $($highVMDKs.count)" 183 | Write-Host "" 184 | Write-Host "Number of unique VMs that crossed the low threshold of $lowKBps KBps: $(($lowVMDKs.VM | 185 | Sort-Object | Unique).count)" -foregroundcolor green 186 | Write-Host "Number of unique VMs that crossed the high threshold of $highKBps KBps: $(($highVMDKs.VM | 187 | Sort-Object | Unique).count)" -foregroundcolor green 188 | Write-Host "Highest sustained write throughput: $(($vmResults.KBps | Measure -Maximum).maximum) KBps" -foregroundcolor green 189 | Write-Host "Average sustained write throughput: $((($vmResults.KBps | Measure -Average).average).ToString("#.###")) KBps" -foregroundcolor green 190 | Write-Host 191 | 192 | if ($vmResults.count -gt 0) { 193 | $vmResults | Export-CSV -NoTypeInformation -Path $vmCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv 194 | Write-Host "VM level results output to: $vmCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv" -foregroundcolor green 195 | } else { 196 | Write-Host "No VM level results to output" -foregroundcolor green 197 | } 198 | 199 | # Combine VMDKs that are above $lowKBps and $highKBps into one and output to a CSV file if there are results 200 | $highVMDKs += $lowVMDKs 201 | if ($highVMDKs.count -gt 0) { 202 | $highVMDKs | Export-CSV -NoTypeInformation -Path $vmdkCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv 203 | Write-Host "VMDK level results output to: $vmdkCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv" -foregroundcolor green 204 | } else { 205 | Write-Host "No VMDK throughput higher than low threshold to output" -foregroundcolor green 206 | } 207 | 208 | # Combine the raw results for the VM summary results and VMDK results 209 | $vmDataArray += $vmdkDataArray 210 | 211 | # Excel supports 1M rows. If the raw data is less than a large number, output raw results to a single file 212 | # Otherwise, split up outputting the raw data by date 213 | if ($vmDataArray.count -eq 0) { 214 | Write-Host "No raw data captured to output" -foregroundcolor green 215 | } elseif ($vmDataArray.count -lt 800000) 216 | { 217 | $vmDataArray | Select 'Timestamp', 'Value', 'Entity', 'Instance', 'MetricId', 'Unit', 'IntervalSecs' | 218 | Export-CSV -NoTypeInformation -Path $rawCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv 219 | Write-Host "Raw data captured output to: $rawCSVoutput$($date.ToString("yyyy-MM-dd_HHmm")).csv" -foregroundcolor green 220 | } else { 221 | Write-Host "Splitting raw data output by date since number of rows was very large" 222 | for ($days = 0; $days -gt -7; $days--) { 223 | $dateComp = $date.date.adddays($days) 224 | 225 | if (($vmDataArray | Where-object { $_.timestamp.date -eq $dateComp.date }).count -gt 0) { 226 | $vmDataArray | Where-object { $_.timestamp.date -eq $dateComp.date } | 227 | Select 'Timestamp', 'Value', 'Entity', 'Instance', 'MetricId', 'Unit', 'IntervalSecs' | 228 | Export-CSV -NoTypeInformation -Path $rawCSVoutput$($dateComp.ToString("MM-dd"))_$($date.ToString("yyyy-MM-dd_HHmm")).csv 229 | Write-Host "Raw data captured output to: $rawCSVoutput$($dateComp.ToString("MM-dd"))_$($date.ToString("yyyy-MM-dd_HHmm")).csv" -foregroundcolor green 230 | } 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /ORACLE/rbkDataCollection_11g.sql: -------------------------------------------------------------------------------- 1 | REM Oracle Data Collection Script 2 | 3 | -- connect to the system schema 4 | --conn SYSTEM@$1 5 | 6 | -- create temporary table to hold all collected 7 | 8 | create global temporary table rubrikDataCollection 9 | ( 10 | con_id number, 11 | conName varchar2(128), 12 | dbSizeMB number, 13 | allocated_dbSizeMB number, 14 | biggestBigfileMB number, 15 | dailyChangeRate number, 16 | dailyRedoSize number, 17 | datafileCount number, 18 | hostName varchar2(64), 19 | instName varchar2(16), 20 | dbVersion varchar2(17), 21 | -- dbEdition varchar2(7), 22 | -- changing dbEdition size to support v$instance.version size 23 | dbEdition varchar2(100), 24 | platformName varchar2(101), 25 | dbName varchar2(9), 26 | dbUniqueName varchar2(30), 27 | dbID varchar2(200), 28 | flashbackEnabled varchar2(18), 29 | archiveLogEnabled varchar2(12), 30 | spfile varchar2(200), 31 | patchLevel varchar2(100), 32 | cpuCount number, 33 | blockSize number, 34 | racEnabled varchar2(20), 35 | sgaMaxSize number, 36 | sgaTarget number, 37 | pgaAggregateTarget number, 38 | physMemory number, 39 | dNFSenabled varchar2(20), 40 | GoldenGate varchar2(20), 41 | exadataEnabled varchar2(20), 42 | bctEnabled varchar2(20), 43 | LogArchiveConfig varchar2(200), 44 | ArchiveLagTarget number, 45 | tablespaceCount number, 46 | encryptedTablespaceCount number, 47 | encryptedDataSizeMB number, 48 | bigfileTablespaceCount number, 49 | bigfileDataSizeMB number, 50 | logfileCount number, 51 | tempfileCount number 52 | ) 53 | on commit preserve rows; 54 | 55 | 56 | insert into rubrikDataCollection 57 | ( 58 | conName, 59 | hostName, 60 | instName, 61 | dbVersion, 62 | platformName, 63 | dbName, 64 | dbUniqueName, 65 | dbID, 66 | flashbackEnabled, 67 | archiveLogEnabled 68 | ) 69 | select db.name, 70 | inst.host_name, 71 | inst.instance_name, 72 | inst.version, 73 | db.platform_name, 74 | db.name, 75 | db.db_unique_name, 76 | db.dbid, 77 | db.flashback_on, 78 | db.log_mode 79 | from v$instance inst, 80 | v$database db 81 | / 82 | 83 | 84 | -- to be improved 85 | -- dbEdition (EE, SE) info 86 | UPDATE rubrikDataCollection rbk 87 | SET dbEdition = (select * from v$version where ROWNUM = 1) 88 | WHERE instName = (select instance_name from v$instance) 89 | and hostName= (select host_name from v$instance); 90 | 91 | UPDATE rubrikDataCollection rbk 92 | SET spfile = (select decode(count(*), 0, 'NO', 'YES') from v$parameter where name='spfile') 93 | WHERE instName = (select instance_name from v$instance) 94 | and hostName= (select host_name from v$instance); 95 | 96 | UPDATE rubrikDataCollection rbk 97 | SET patchLevel = (select * from (select comments from DBA_REGISTRY_HISTORY where ACTION_TIME is not null order by action_time desc) where ROWNUM = 1) 98 | WHERE instName = (select instance_name from v$instance) 99 | and hostName= (select host_name from v$instance); 100 | 101 | UPDATE rubrikDataCollection rbk 102 | SET cpuCount = (SELECT value from v$parameter where name='cpu_count') 103 | WHERE instName = (select instance_name from v$instance) 104 | and hostName= (select host_name from v$instance); 105 | 106 | UPDATE rubrikDataCollection rbk 107 | SET blockSize = (SELECT value from v$parameter where name='db_block_size') 108 | WHERE instName = (select instance_name from v$instance) 109 | and hostName= (select host_name from v$instance); 110 | 111 | UPDATE rubrikDataCollection rbk 112 | SET racEnabled = (SELECT value from v$parameter where name='cluster_database') 113 | WHERE instName = (select instance_name from v$instance) 114 | and hostName= (select host_name from v$instance); 115 | 116 | UPDATE rubrikDataCollection rbk 117 | SET sgaMaxSize = (SELECT value from v$parameter where name='sga_max_size') 118 | WHERE instName = (select instance_name from v$instance) 119 | and hostName= (select host_name from v$instance); 120 | 121 | UPDATE rubrikDataCollection rbk 122 | SET sgaTarget = (SELECT value from v$parameter where name='sga_target') 123 | WHERE instName = (select instance_name from v$instance) 124 | and hostName= (select host_name from v$instance); 125 | 126 | UPDATE rubrikDataCollection rbk 127 | SET pgaAggregateTarget = (SELECT value from v$parameter where name='pga_aggregate_target') 128 | WHERE instName = (select instance_name from v$instance) 129 | and hostName= (select host_name from v$instance); 130 | 131 | UPDATE rubrikDataCollection rbk 132 | SET physMemory = (SELECT max(value) from dba_hist_osstat where stat_name = 'PHYSICAL_MEMORY_BYTES') 133 | WHERE instName = (select instance_name from v$instance) 134 | and hostName= (select host_name from v$instance); 135 | 136 | UPDATE rubrikDataCollection rbk 137 | SET dNFSenabled = (select decode(count(*), 0, 'No', 'Yes') from v$dnfs_servers) 138 | WHERE instName = (select instance_name from v$instance) 139 | and hostName= (select host_name from v$instance); 140 | 141 | UPDATE rubrikDataCollection rbk 142 | SET dbSizeMB = (select sum(bytes)/1024/1024 bytes from dba_segments) 143 | WHERE instName = (select instance_name from v$instance) 144 | and hostName= (select host_name from v$instance); 145 | 146 | UPDATE rubrikDataCollection rbk 147 | SET allocated_dbSizeMB = (select sum(bytes)/1024/1024 BYTES from v$datafile) 148 | WHERE instName = (select instance_name from v$instance) 149 | and hostName= (select host_name from v$instance); 150 | 151 | UPDATE rubrikDataCollection rbk 152 | SET GoldenGate = (select decode(count(*), 0, 'No', 'Yes') from v$archive_dest where status = 'VALID' and target = 'STANDBY') 153 | WHERE instName = (select instance_name from v$instance) 154 | and hostName= (select host_name from v$instance); 155 | 156 | UPDATE rubrikDataCollection rbk 157 | SET exadataEnabled = (select decode(count(*), 0, 'No', 'Yes') from v$cell) 158 | WHERE instName = (select instance_name from v$instance) 159 | and hostName= (select host_name from v$instance); 160 | 161 | UPDATE rubrikDataCollection rbk 162 | SET bctEnabled = (select status from v$block_change_tracking) 163 | WHERE instName = (select instance_name from v$instance) 164 | and hostName= (select host_name from v$instance); 165 | 166 | UPDATE rubrikDataCollection rbk 167 | SET LogArchiveConfig = (SELECT value from v$parameter where name='log_archive_config') 168 | WHERE instName = (select instance_name from v$instance) 169 | and hostName= (select host_name from v$instance); 170 | 171 | UPDATE rubrikDataCollection rbk 172 | SET LogArchiveConfig = 'NO' 173 | WHERE instName = (select instance_name from v$instance) 174 | and hostName= (select host_name from v$instance) 175 | and LogArchiveConfig is null; 176 | 177 | UPDATE rubrikDataCollection rbk 178 | SET ArchiveLagTarget = (SELECT value from v$parameter where name='archive_lag_target') 179 | WHERE instName = (select instance_name from v$instance) 180 | and hostName= (select host_name from v$instance); 181 | 182 | UPDATE rubrikDataCollection rbk 183 | SET tablespaceCount = (select count(*) from v$tablespace) 184 | WHERE instName = (select instance_name from v$instance) 185 | and hostName= (select host_name from v$instance); 186 | 187 | UPDATE rubrikDataCollection rbk 188 | SET encryptedTablespaceCount = (select count(*) from dba_tablespaces where encrypted='YES') 189 | WHERE instName = (select instance_name from v$instance) 190 | and hostName= (select host_name from v$instance); 191 | 192 | UPDATE rubrikDataCollection rbk 193 | SET encryptedDataSizeMB = (select sum(bytes/1024/1024) from (select sum(bytes) bytes from dba_data_files dbf, dba_tablespaces tbsp where dbf.tablespace_name=tbsp.tablespace_name and tbsp.encrypted='YES')) 194 | WHERE instName = (select instance_name from v$instance) 195 | and hostName= (select host_name from v$instance); 196 | 197 | UPDATE rubrikDataCollection rbk 198 | SET encryptedDataSizeMB = 0 199 | WHERE instName = (select instance_name from v$instance) 200 | and hostName= (select host_name from v$instance) 201 | and encryptedDataSizeMB is null; 202 | 203 | UPDATE rubrikDataCollection rbk 204 | SET bigfileTablespaceCount = (select count(*) from dba_tablespaces where bigfile='YES') 205 | WHERE instName = (select instance_name from v$instance) 206 | and hostName= (select host_name from v$instance); 207 | 208 | UPDATE rubrikDataCollection rbk 209 | SET biggestBigfileMB = (select sum(bytes/1024/1024) from (select max(bytes) bytes from dba_data_files dbf, dba_tablespaces tbsp where dbf.tablespace_name=tbsp.tablespace_name and tbsp.bigfile='YES')) 210 | WHERE instName = (select instance_name from v$instance) 211 | and hostName= (select host_name from v$instance); 212 | 213 | UPDATE rubrikDataCollection rbk 214 | SET biggestBigfileMB = 0 215 | WHERE instName = (select instance_name from v$instance) 216 | and hostName= (select host_name from v$instance) 217 | and biggestBigfileMB is null; 218 | 219 | UPDATE rubrikDataCollection rbk 220 | SET bigfileDataSizeMB = (select sum(bytes/1024/1024) from (select sum(bytes) bytes from dba_data_files dbf, dba_tablespaces tbsp where dbf.tablespace_name=tbsp.tablespace_name and tbsp.bigfile='YES')) 221 | WHERE instName = (select instance_name from v$instance) 222 | and hostName= (select host_name from v$instance); 223 | 224 | UPDATE rubrikDataCollection rbk 225 | SET bigfileDataSizeMB = 0 226 | WHERE instName = (select instance_name from v$instance) 227 | and hostName= (select host_name from v$instance) 228 | and bigfileDataSizeMB is null; 229 | 230 | -- 20220310 smcelhinney removing division by 100 from dailyChangeRate as it negatively skews change rate 231 | -- 20230321 updated change rate calculations to leverage cdb_segments to determine actual space USED instead of ALLOCATED - smcelhinney 232 | UPDATE rubrikDataCollection rbk 233 | SET dailyChangeRate = (select dailyChangeRate from (select round((avg(redo_size)/sum(sgmt.bytes)),8) dailyChangeRate from dba_segments sgmt, (select trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time)))) 234 | WHERE instName = (select instance_name from v$instance) 235 | and hostName= (select host_name from v$instance); 236 | 237 | UPDATE rubrikDataCollection rbk 238 | SET datafileCount = (select count(*) from v$datafile) 239 | WHERE instName = (select instance_name from v$instance) 240 | and hostName= (select host_name from v$instance); 241 | 242 | UPDATE rubrikDataCollection rbk 243 | SET logfileCount = (select count(*) from v$logfile) 244 | WHERE instName = (select instance_name from v$instance) 245 | and hostName= (select host_name from v$instance); 246 | 247 | UPDATE rubrikDataCollection rbk 248 | SET logfileCount = (select count(*) from v$logfile) 249 | WHERE instName = (select instance_name from v$instance) 250 | and hostName= (select host_name from v$instance); 251 | 252 | UPDATE rubrikDataCollection rbk 253 | SET tempfileCount = (select count(*) from v$tempfile) 254 | WHERE instName = (select instance_name from v$instance) 255 | and hostName= (select host_name from v$instance); 256 | 257 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 258 | UPDATE rubrikDataCollection rbk 259 | SET dailyRedoSize = (select dailyRedoSize from (select avg(redo_size/1024/1024) dailyRedoSize from (select trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time)))) 260 | WHERE instName = (select instance_name from v$instance) 261 | and hostName= (select host_name from v$instance); 262 | 263 | UPDATE rubrikDataCollection rbk 264 | SET dailyRedoSize = 0 265 | WHERE instName = (select instance_name from v$instance) 266 | and hostName= (select host_name from v$instance) 267 | and dailyRedoSize is null; 268 | 269 | 270 | -- update temp table with con_name for recorded con_id 271 | update rubrikDataCollection rbk set con_id=0 where con_id is null; 272 | 273 | commit; 274 | 275 | -- format data collected for json output 276 | set linesize 32000 277 | set colsep ,, 278 | set headsep off 279 | set head off 280 | set trimspool on 281 | set trimout on 282 | set feedback off 283 | set pagesize 0 284 | set wrap off 285 | 286 | spool rbkDiscovery.csv append 287 | 288 | -- 20230322 - reordering query output to logically group data - smcelhinney 289 | -- 20240827 - changing column separator to prevent data shift due to DG setting in LogArchiveConfig - smcelhinney 290 | select con_id ||',,'|| 291 | conName ||',,'|| 292 | dbSizeMB ||',,'|| 293 | allocated_dbSizeMB ||',,'|| 294 | dailyChangeRate ||',,'|| 295 | dailyRedoSize ||',,'|| 296 | datafileCount ||',,'|| 297 | tablespaceCount ||',,'|| 298 | encryptedTablespaceCount ||',,'|| 299 | encryptedDataSizeMB ||',,'|| 300 | biggestBigfileMB ||',,'|| 301 | bigfileTablespaceCount ||',,'|| 302 | bigfileDataSizeMB ||',,'|| 303 | blockSize ||',,'|| 304 | hostName ||',,'|| 305 | instName ||',,'|| 306 | dbVersion ||',,'|| 307 | dbEdition ||',,'|| 308 | platformName ||',,'|| 309 | dbName ||',,'|| 310 | dbUniqueName ||',,'|| 311 | dbID ||',,'|| 312 | flashbackEnabled ||',,'|| 313 | archiveLogEnabled ||',,'|| 314 | spfile ||',,'|| 315 | patchLevel ||',,'|| 316 | cpuCount ||',,'|| 317 | racEnabled ||',,'|| 318 | sgaMaxSize ||',,'|| 319 | sgaTarget ||',,'|| 320 | pgaAggregateTarget ||',,'|| 321 | physMemory ||',,'|| 322 | dNFSenabled ||',,'|| 323 | GoldenGate ||',,'|| 324 | exadataEnabled ||',,'|| 325 | bctEnabled ||',,'|| 326 | LogArchiveConfig ||',,'|| 327 | ArchiveLagTarget ||',,'|| 328 | logfileCount ||',,'|| 329 | tempfileCount 330 | from rubrikDataCollection; 331 | 332 | spool off 333 | 334 | truncate table rubrikDataCollection; 335 | 336 | drop table rubrikDataCollection; 337 | 338 | exit; 339 | -------------------------------------------------------------------------------- /CLOUD/README.md: -------------------------------------------------------------------------------- 1 | # Cloud Sizing Scripts README 2 | 3 | This contains information about Rubrik scripts for collecting sizing information for resources across AWS, Azure, and GCP. Below you will find detailed instructions on setting up prerequisites, running the scripts, and understanding their functionalities, including the anonymization feature. 4 | 5 | ## Table of Contents 6 | 7 | 1. [Introduction](#introduction) 8 | 2. [AWS](#aws) 9 | - [Prerequisites](#aws-prerequisites) 10 | - [Running the Script](#running-the-aws-script) 11 | 3. [Azure](#azure) 12 | - [Prerequisites](#azure-prerequisites) 13 | - [Running the Script](#running-the-azure-script) 14 | 4. [GCP](#gcp) 15 | - [Prerequisites](#gcp-prerequisites) 16 | - [Running the Script](#running-the-gcp-script) 17 | 5. [Anonymization Feature](#anonymization-feature) 18 | 6. [FAQ](#faq) 19 | 20 | --- 21 | 22 | ## Introduction 23 | 24 | This repository contains scripts designed to collect and report on cloud resources across AWS, Azure, and GCP. These scripts help gather essential sizing data which will be used for scaling and pricing Rubrik solutions. 25 | 26 | --- 27 | 28 | ## AWS 29 | 30 | ### Authenticaition 31 | 32 | 1. Use the local AWS profile (IAM user) to login to just one account. 33 | 1. Use a list of local AWS profiles to login to and query multiple accounts. 34 | 1. Use a cross account role with a list of AWS accounts that you provide to assume role into each account. 35 | 1. Use a cross account role with an AWS Org and have the script automatically discover all of your accounts and query each one. 36 | 1. Have the script query AWS SSO for a list of accounts. Each account will be accessed via the AWS SSO parameter set that is specified. 37 | 38 | 39 | 40 | ### AWS Prerequisites 41 | 42 | To run the AWS sizing script, ensure you have the following: 43 | 44 | - PowerShell 7.4.5 or higher 45 | - AWS PowerShell modules installed: 46 | - An AWS account with the necessary permissions. 47 | - The following AWS permissions are required to run the script: 48 | ```json 49 | { 50 | "Version": "2012-10-17", 51 | "Statement": [ 52 | { 53 | "Sid": "VisualEditor0", 54 | "Effect": "Allow", 55 | "Action": [ 56 | "backup:ListBackupPlans", 57 | "backup:ListBackupSelections", 58 | "backup:GetBackupPlan", 59 | "backup:GetBackupSelection", 60 | "ce:GetCostAndUsage", 61 | "cloudwatch:GetMetricStatistics", 62 | "cloudwatch:ListMetrics", 63 | "dynamodb:ListTables", 64 | "dynamodb:DescribeTable", 65 | "ec2:DescribeInstances", 66 | "ec2:DescribeRegions", 67 | "ec2:DescribeVolumes", 68 | "eks:DescribeCluster", 69 | "eks:ListClusters", 70 | "eks:ListNodegroups", 71 | "elasticfilesystem:DescribeFileSystems", 72 | "fsx:DescribeFileSystems", 73 | "fsx:DescribeVolumes", 74 | "iam:ListAccountAliases", 75 | "kms:ListKeys", 76 | "organizations:ListAccounts", 77 | "rds:DescribeDBClusters", 78 | "rds:DescribeDBInstances", 79 | "s3:GetBucketLocation", 80 | "s3:ListAllMyBuckets", 81 | "s3:GetBucketTagging", 82 | "s3:ListStorageLensConfigurations", 83 | "s3:GetStorageLensConfiguration", 84 | "secretsmanager:ListSecrets", 85 | "sts:AssumeRole", 86 | "sqs:ListQueues" 87 | ], 88 | "Resource": "*" 89 | } 90 | ] 91 | } 92 | ``` 93 | - These permissions can be installed in a cross account role by using the [Get-AWSSizingInfo-Permissions.cft](Get-AWSSizingInfo-Permissions.cft) CloudFormation template. This cross account role can be installed in multiple AWS accounts by using a [CloudFormation Stack Set](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/what-is-cfnstacksets.html). 94 | 95 | ### Running the AWS Script 96 | 97 | There are two options for running the AWS sizing script. It can run from the AWS Cloud Shell (easiest) or from a local laptop or server (more difficult). For very large environments where the script may run longer than 20-30 minutes, running the script on a laptop or server may be necessary. This is due to the Cloud Shell's default inactivity timeout. 98 | 99 | To run the script from the AWS Cloud Shell do the following: 100 | 101 | 1. Open [AWS Cloud Shell](https://docs.aws.amazon.com/cloudshell/latest/userguide/welcome.html) in an AWS account with a profile to run the script. 102 | 1. Start PowerShell by running: 103 | ```shell 104 | pwsh 105 | ``` 106 | 107 | To run the script from a local laptop or server do the following: 108 | 109 | 1. Verify that PowerShell v7.4.5 or higher is installed. 110 | 1. Install the AWS modules for PowerShell with the following command: 111 | ```powershell 112 | Install-Module AWS.Tools.Common,AWS.Tools.EC2,AWS.Tools.S3,AWS.Tools.RDS,AWS.Tools.SecurityToken,AWS.Tools.Organizations,AWS.Tools.IdentityManagement,AWS.Tools.CloudWatch,AWS.Tools.ElasticFileSystem,AWS.Tools.SSO,AWS.Tools.SSOOIDC,AWS.Tools.FSX,AWS.Tools.Backup,AWS.Tools.CostExplorer,AWS.Tools.DynamoDBv2,AWS.Tools.SQS,AWS.Tools.SecretsManager,AWS.Tools.KeyManagementService,AWS.Tools.EKS 113 | 114 | ``` 115 | 1. Ensure AWS credentials are set up by using the `Set-AWSCredential` command. For example: 116 | ```powershell 117 | Set-AWSCredential -AccessKey 'YourAccessKey' -SecretKey 'YourSecretKey' -Region 'YourRegion' 118 | ``` 119 | 120 | In both cases run the sizing script with the appropriate options and send the data back to Rubrik. 121 | 1. Execute the script: 122 | ```powershell 123 | .\Get-AWSSizingInfo.ps1 124 | ``` 125 | 1. The script will output a summary to the console and create a zip file with CSV and JSON files, along with a LOG of the console output. 126 | 1. Please download the ZIP file and send it to your Rubrik representative. 127 | 128 | ### Aurora DB Processing 129 | 130 | Aurora databases are processed at the cluster level rather than the instance level. This is because Aurora storage is allocated at the cluster level, and Rubrik protects Aurora clusters (not individual instances). As a result, multiple Aurora instances belonging to the same cluster will appear as a single cluster entry in the output. 131 | 132 | ### S3 Storage Lens for Current Version Storage Metrics 133 | 134 | The script can collect **CurrentVersionStorageBytes** metrics for S3 buckets, which shows the size of current (non-versioned) objects per storage class. This data is useful for understanding how much storage is used by current versions vs. previous versions in versioned buckets. 135 | 136 | **To enable this feature:** 137 | 138 | 1. **Create an S3 Storage Lens configuration** in your AWS account: 139 | - Go to **S3** → **Storage Lens** → **Dashboards** → **Create dashboard** 140 | - Give it a name (e.g., `sizing-dashboard`) 141 | - Under **Metrics export**, enable **CloudWatch publishing** 142 | - Save the configuration 143 | 144 | 2. **Wait for metrics to be published:** 145 | - Storage Lens metrics are published **once per day** 146 | - It can take **up to 48 hours** for the first metrics to appear in CloudWatch after enabling 147 | 148 | **Cost considerations:** 149 | - S3 Storage Lens **free metrics** are available at no additional cost 150 | - **CloudWatch publishing** of Storage Lens metrics incurs CloudWatch metrics charges: 151 | - CloudWatch custom metrics: ~$0.30 per metric per month (first 10,000 metrics) 152 | - Storage Lens publishes multiple metrics per bucket per storage class 153 | - For large environments with many buckets, this can add up 154 | - Review [AWS CloudWatch pricing](https://aws.amazon.com/cloudwatch/pricing/) and [S3 Storage Lens pricing](https://aws.amazon.com/s3/pricing/) for current rates 155 | 156 | If Storage Lens with CloudWatch publishing is not configured, the script will continue to run but these fields will not be populated. 157 | 158 | ### Troubleshooting 159 | 160 | #### Explicit deny in a service control policy 161 | 162 | - Problem: 163 | 164 | When running the script against AWS SSO or an AWS Org the following type of error may occur: 165 | 166 | ``` 167 | Failed to get Backup Plans Info for region us-east-2 in account 123456789012 168 | Error: User: arn:aws:sts::123456789012:assumed-role/AWSReservedSSO_AdministratorAccess_1234567890abcdef/firstname.lastname@company.com is not authorized to perform: backup:ListBackupPlans with an explicit deny in a service control policy 169 | ``` 170 | 171 | - Solution: 172 | 173 | An AWS service control policy is in place that prevents running commands in the region that is listed. Use the -Regions parameter to restrict the script to only run in regions that are supported by the organization. 174 | 175 | 176 | #### Invalid grant provided 177 | 178 | - Problem: 179 | 180 | When using AWS SSO the following error may occur while authorizing the script in SSO: 181 | 182 | ``` 183 | Allow access to your data? invalid_grant 184 | Invalid grant provided 185 | ``` 186 | 187 | - Solution: 188 | 189 | AWS SSO is not being accessed in the correct region. By default, the script uses `us-east-1` to communicate with AWS SSO. To look up the proper region for AWS SSO select `Access Keys` next to the Parameter Set that is being used. The next screen will specify a region where the access keys can be used. This is the region for the AWS SSO. Next specify the `-SSORegion` flag and use the region that was discovered. 190 | 191 | --- 192 | 193 | ## Azure 194 | 195 | ### Azure Prerequisites 196 | 197 | To run the Azure sizing script, ensure you have the following: 198 | 199 | - Azure AD account with "Reader" and "Reader and Data Access" roles on each subscription. 200 | - PowerShell 7 installed if running locally. 201 | - Required Azure PowerShell modules installed: 202 | ```powershell 203 | Install-Module Az.Accounts,Az.Compute,Az.Storage,Az.Sql,Az.SqlVirtualMachine,Az.ResourceGraph,Az.Monitor,Az.Resources,Az.RecoveryServices,Az.CostManagement,Az.CosmosDB 204 | ``` 205 | 206 | ### Running the Azure Script 207 | 208 | 1. **From Azure Cloud Shell (preferred):** 209 | - Login to the Azure portal and open [Azure Cloud Shell](https://learn.microsoft.com/en-us/azure/cloud-shell/get-started/classic?source=recommendations&tabs=azurecli). 210 | - Install the necessary module: 211 | ```powershell 212 | Install-Module Az.CostManagement 213 | ``` 214 | - Upload and run the script: 215 | ```powershell 216 | .\Get-AzureSizingInfo.ps1 217 | ``` 218 | 219 | 2. **From a local system:** 220 | - Install PowerShell 7 and necessary Azure modules as mentioned above. 221 | - Login to Azure: 222 | ```powershell 223 | Connect-AzAccount 224 | ``` 225 | - Run the script: 226 | ```powershell 227 | .\Get-AzureSizingInfo.ps1 228 | ``` 229 | 230 | 4. The script will output a summary to the console and create a zip file with CSV and JSON files, along with a LOG of the console output. Please download the ZIP file and send it to your Rubrik representative. 231 | 232 | --- 233 | 234 | ## GCP 235 | 236 | ### GCP Prerequisites 237 | 238 | To run the GCP sizing script, ensure you have the following: 239 | 240 | - GCP account with necessary IAM permissions: "compute.instances.list", "compute.disks.get", "resourcemanager.projects.get". 241 | - GCP Cloud SDK installed or use GCP Cloud Shell. 242 | 243 | ### Running the GCP Script 244 | 245 | 1. **From GCP Cloud Shell:** 246 | - Login and initialize [GCP Cloud Shell](https://cloud.google.com/shell): 247 | ```shell 248 | gcloud init 249 | ``` 250 | 251 | 2. **Using Cloud Tools for PowerShell:** 252 | - Login to GCP: 253 | ```powershell 254 | gcloud auth list 255 | gcloud config list 256 | ``` 257 | - Run the script: 258 | ```powershell 259 | .\Get-GCPSizingInfo.ps1 260 | ``` 261 | 262 | 34. The script will output a summary to the console and create a zip file with a CSV file, along with a LOG of the console output. Please download the ZIP file and send it to your Rubrik representative. 263 | 264 | --- 265 | 266 | ## Anonymization Feature 267 | 268 | The anonymization feature allows you to anonymize specific fields in the output to protect sensitive information. You can use the flags on any/all the 3 AWS/Azure/GCP scripts. 269 | 270 | - Use the tag `-Anonymize`. Fields anonymized by default are as follows: 271 | 272 | - **AWS:** "AwsAccountId", "AwsAccountAlias", "BucketName", "Name", "InstanceId", "VolumeId", "RDSInstance", "DBInstanceIdentifier", "FileSystemId", "FileSystemDNSName", "FileSystemOwnerId", "OwnerId", "RuleId", "RuleName", "BackupPlanArn", "BackupPlanId", "VersionId", "RequestId" 273 | - **GCP:** "Name", "Project", "VMName", "DiskName", "Id", "DiskEncryptionKey" 274 | - **Azure:** "SubscriptionId", "Subscription", "Tenant", "Name", "ResourceGroup", "VirtualMachineId", "PolicyId", "ProtectionPolicyName", "Id", "SourceResourceId", "ContainerName", "FriendlyName", "ServerName", "ParentName", "ProtectedItemDataSourceId", "StorageAccount", "Database", "Server", "ElasticPool", "ManagedInstance", "DatabaseID", "vmID" 275 | 276 | - To customize anonymization: 277 | - Anonymize additional fields: `-AnonymizeFields "NewField1,NewField2"` 278 | - To not anonymize certain fields: `-NotAnonymizeFields "Name,Id"` 279 | 280 | - A CSV file corresponding each anonymized key to value is outputted when you run any of the 3 scripts. Note this will not be contained in the ZIP file, and is only outside the ZIP file. This will help you correspond the numbers outputted to the resources, even if you choose to send anonymized data to Rubrik. 281 | 282 | - The output log will also not be in the ZIP; the output log will be created outside the ZIP, and one can manually 'clean/sanitize' sensitive information from that log before sending it to one's Rubrik representative. 283 | 284 | --- 285 | 286 | ## FAQ 287 | 288 | ### How do I get started with AWS Organizations and AWS SSO? 289 | 290 | For detailed instructions on setting up AWS Organizations and AWS SSO, refer to official AWS documentation. 291 | 292 | ### What IAM permissions are required for running the scripts? 293 | 294 | Ensure the respective IAM permissions as outlined in the script prerequisites section for each cloud provider. 295 | 296 | ### How do I run the script with custom settings, such as using AWS SSO or querying certain regions? 297 | 298 | One can read the detailed parameter list and how to interact with them in the documentation at the top of each script. There are also examples provided of how to use these parameters to customize your data query. 299 | 300 | ### How can I verify my current cloud context? 301 | 302 | - **GCP:** 303 | ```shell 304 | gcloud auth list 305 | gcloud config list 306 | ``` 307 | - **Azure:** 308 | ```powershell 309 | Connect-AzAccount 310 | ``` 311 | - **AWS:** 312 | ```powershell 313 | Set-AWSCredential -AccessKey 'YourAccessKey' -SecretKey 'YourSecretKey' -Region 'YourRegion' 314 | ``` 315 | 316 | For any further queries or issues, refer to the detailed documentation at the top of each script or contact your Rubrik representative. 317 | -------------------------------------------------------------------------------- /IDENTITY/Get-AdHumanIdentity.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | This script is a Rubrik utility for counting human identities in a customer's Active Directory (AD) environment. The data collected is used for licensing Rubrik's products. The script identifies and categorizes all user and service accounts to determine the number of unique human users. 4 | 5 | .DESCRIPTION 6 | The Get-AdHumanIdentity.ps1 script is a specialized tool for Rubrik's customers to generate a report of their Active Directory identities for licensing purposes. The script connects to the customer's AD environment to query for user and service accounts, and then categorizes them to accurately count the number of human identities. 7 | 8 | The primary goal of this script is to provide an accurate count of human users to ensure fair and accurate licensing of Rubrik's products. The script distinguishes between human users and non-human accounts (e.g., service accounts) to avoid over-licensing. 9 | 10 | The script gathers the following information to assist in the identity counting process: 11 | - **Account Activity**: Determines if accounts are active, inactive, or have never been used, based on their last logon timestamp. This helps in excluding dormant accounts from the count of active users. 12 | - **Service Account Types**: Identifies various types of non-human accounts, including: 13 | - Managed Service Accounts (MSAs) 14 | - Group Managed Service Accounts (gMSAs) 15 | - Accounts with passwords set to never expire. 16 | - Accounts matching specific naming patterns (e.g., "svc_*"). 17 | - **Reporting Granularity**: Offers two levels of reporting to provide flexibility in how the data is presented: 18 | - **UserPerOU**: A detailed report with counts broken down by each Organizational Unit (OU). 19 | - **Summary**: A high-level report with aggregated counts for each domain. 20 | 21 | The script generates a CSV report that can be shared with Rubrik for licensing purposes. 22 | 23 | .PARAMETER SpecificDomains 24 | This is an optional parameter that allows you to specify which Active Directory domains to audit. If you do not use this parameter, the script will automatically discover and audit all domains in the current AD forest. 25 | 26 | To use this parameter, provide a list of fully qualified domain names (FQDNs). 27 | Example: -SpecificDomains "corp.example.com", "dev.example.com" 28 | 29 | .PARAMETER UserServiceAccountNamesLike 30 | This is an optional parameter that allows you to identify service accounts based on their names. You can provide a list of wildcard patterns, and any user account with a name matching one of these patterns will be flagged as a service account in the report. 31 | 32 | This is useful for identifying service accounts that are not formally registered as MSAs or gMSAs. 33 | Example: -UserServiceAccountNamesLike "*svc*", "*_bot", "testuser*" 34 | 35 | .PARAMETER Mode 36 | This is a required parameter that controls the level of detail in the final report. You must choose one of the following two modes: 37 | - 'UserPerOU': This mode provides a very detailed report, with a separate entry for each Organizational Unit (OU). This is the recommended mode for a granular analysis. 38 | - 'Summary': This mode provides a high-level overview, with a single entry for each domain, showing the total counts for all account types. 39 | 40 | The default value is 'UserPerOU'. 41 | 42 | .EXAMPLE 43 | Example 1: Perform a detailed audit of the entire forest and identify service accounts by name. 44 | 45 | .\Get-AdHumanIdentity.ps1 -UserServiceAccountNamesLike "*svc*", "*_app" -Mode UserPerOU 46 | 47 | This command will: 48 | - Scan all domains in the current AD forest. 49 | - Flag any user account with a name containing "svc" or "_app" as a service account. 50 | - Generate a detailed report with account counts for each OU. 51 | - Save the report to a CSV file in the .\ADReports directory. 52 | 53 | .EXAMPLE 54 | Example 2: Perform a summary audit of a single, specific domain. 55 | 56 | .\Get-AdHumanIdentity.ps1 -SpecificDomains "corp.example.com" -Mode Summary 57 | 58 | This command will: 59 | - Connect only to the "corp.example.com" domain. 60 | - Generate a high-level summary report for that domain. 61 | - Save the report to a CSV file in the .\ADReports directory. 62 | 63 | .NOTES 64 | - **Prerequisites**: The computer running this script must have the Active Directory module for PowerShell installed. This is part of the Remote Server Administration Tools (RSAT). 65 | - **Permissions**: The user account running this script must have read permissions for user and service account objects in the target Active Directory domains. 66 | - **Execution Policy**: You may need to adjust the PowerShell execution policy to run this script. You can do this by running "Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope Process". 67 | - **Culture Settings**: The script temporarily sets the culture to 'en-US' to ensure that dates and times are parsed correctly. This change is reverted at the end of the script. 68 | #> 69 | 70 | 71 | param ( 72 | [string[]]$UserServiceAccountNamesLike = @(), 73 | [string[]]$SpecificDomains, 74 | [ValidateSet("UserPerOU", "Summary")] 75 | [string]$Mode = "UserPerOU" 76 | ) 77 | 78 | # === Logging Setup === 79 | $timestamp = Get-Date -Format "yyyyMMdd_HHmmss" 80 | $outputPath = ".\ADReports" 81 | if (-not (Test-Path $outputPath)) { New-Item -Path $outputPath -ItemType Directory | Out-Null } 82 | 83 | $logPath = Join-Path $outputPath "AD_Audit_$timestamp.log" 84 | function Write-Log { 85 | param ( 86 | [string]$Message, 87 | [string]$Level = "INFO" 88 | ) 89 | $formatted = "[{0}] [{1}] {2}" -f (Get-Date -Format "yyyy-MM-dd HH:mm:ss"), $Level, $Message 90 | Add-Content -Path $logPath -Value $formatted 91 | Write-Host $Message 92 | } 93 | 94 | function Initialize-Prerequisites { 95 | $requiredPSVersion = [Version]"5.1" 96 | $moduleName = "ActiveDirectory" 97 | 98 | if ($PSVersionTable.PSVersion -lt $requiredPSVersion) { 99 | Write-Log "PowerShell $requiredPSVersion or higher is required. Current version: $($PSVersionTable.PSVersion)" 100 | exit 101 | } 102 | 103 | try { 104 | if (-not (Get-Module -ListAvailable -Name $moduleName)) { 105 | Write-Log "Required module '$moduleName' not found. Please install RSAT: Active Directory Tools." 106 | exit 107 | } 108 | Import-Module $moduleName -ErrorAction Stop 109 | } catch { 110 | Write-Log "Failed to import '$moduleName'. Ensure it's installed and accessible. $_" 111 | exit 112 | } 113 | 114 | # Culture preservation 115 | $script:OriginalCulture = [System.Globalization.CultureInfo]::CurrentCulture 116 | $script:OriginalUICulture = [System.Globalization.CultureInfo]::CurrentUICulture 117 | 118 | [System.Threading.Thread]::CurrentThread.CurrentCulture = 'en-US' 119 | [System.Threading.Thread]::CurrentThread.CurrentUICulture = 'en-US' 120 | 121 | Write-Log "Prerequisites validated. Environment initialized." -ForegroundColor Green 122 | } 123 | 124 | Initialize-Prerequisites 125 | 126 | # ===================== 127 | # Helper Functions 128 | # ===================== 129 | 130 | function Get-OUFromDN { 131 | param ([string]$dn) 132 | ($dn -split '(? sysdate - 7 group by trunc(completion_time), con_id) group by sgmt.con_id) where con_id=rbk.con_id) 265 | WHERE instName = (select instance_name from v$instance) 266 | and hostName= (select host_name from v$instance) 267 | and con_id=rbk.con_id; 268 | 269 | -- v$datafile is container-aware (no need for container clause) 270 | UPDATE rubrikDataCollection rbk 271 | SET datafileCount = (select count(*) from v$datafile where con_id=rbk.con_id group by con_id) 272 | WHERE instName = (select instance_name from v$instance) 273 | and hostName= (select host_name from v$instance) 274 | and con_id=rbk.con_id; 275 | 276 | -- v$logfile is container-aware (no need for container clause) 277 | UPDATE rubrikDataCollection rbk 278 | SET logfileCount = (select count(*) from v$logfile where con_id=rbk.con_id group by con_id) 279 | WHERE instName = (select instance_name from v$instance) 280 | and hostName= (select host_name from v$instance) 281 | and con_id=rbk.con_id; 282 | 283 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 284 | UPDATE rubrikDataCollection rbk 285 | SET logfileCount = (SELECT SUM(total) 286 | FROM ( 287 | select count(*) total from v$logfile where con_id=0 288 | UNION ALL 289 | select count(*) total from v$logfile where con_id=1 290 | )) 291 | WHERE instName = (select instance_name from v$instance) 292 | and hostName= (select host_name from v$instance) 293 | and rbk.con_id=1; 294 | 295 | UPDATE rubrikDataCollection rbk 296 | SET logfileCount = 0 297 | WHERE instName = (select instance_name from v$instance) 298 | and hostName= (select host_name from v$instance) 299 | and logfileCount is null; 300 | 301 | -- v$tempfile is container-aware (no need for container clause) 302 | UPDATE rubrikDataCollection rbk 303 | SET tempfileCount = (select count(*) from v$tempfile where con_id=rbk.con_id group by con_id) 304 | WHERE instName = (select instance_name from v$instance) 305 | and hostName= (select host_name from v$instance) 306 | and con_id=rbk.con_id; 307 | 308 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 309 | UPDATE rubrikDataCollection rbk 310 | SET tempfileCount = (SELECT SUM(total) 311 | FROM ( 312 | select count(*) total from v$tempfile where con_id=0 313 | UNION ALL 314 | select count(*) total from v$tempfile where con_id=1 315 | )) 316 | WHERE instName = (select instance_name from v$instance) 317 | and hostName= (select host_name from v$instance) 318 | and rbk.con_id=1; 319 | 320 | UPDATE rubrikDataCollection rbk 321 | SET tempfileCount = 0 322 | WHERE instName = (select instance_name from v$instance) 323 | and hostName= (select host_name from v$instance) 324 | and tempfileCount is null; 325 | 326 | -- v$archived_log is container-aware (no need for container clause) 327 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 328 | UPDATE rubrikDataCollection rbk 329 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=rbk.con_id) 330 | WHERE instName = (select instance_name from v$instance) 331 | and hostName= (select host_name from v$instance) 332 | and con_id=rbk.con_id; 333 | 334 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 335 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 336 | UPDATE rubrikDataCollection rbk 337 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=0) 338 | WHERE instName = (select instance_name from v$instance) 339 | and hostName= (select host_name from v$instance) 340 | and rbk.con_id=1; 341 | 342 | UPDATE rubrikDataCollection rbk 343 | SET dailyRedoSize = 0 344 | WHERE instName = (select instance_name from v$instance) 345 | and hostName= (select host_name from v$instance) 346 | and dailyRedoSize is null; 347 | 348 | -- update temp table with con_name for recorded con_id 349 | update rubrikDataCollection rbk set con_id=0 where con_id is null; 350 | -- update root container and pdb seed names to include cdb database name 351 | update rubrikDataCollection rbk set conName=(select name ||'.CDB$ROOT'from v$database) where conName='CDB$ROOT'; 352 | update rubrikDataCollection rbk set conName=(select name ||'.PDB$SEED'from v$database) where conName='PDB$SEED'; 353 | -- update remaining pdbs to append CDB name so pdb/cdb relationships are not lost in the csv 354 | update rubrikDataCollection rbk set conName=(select name ||'.' from v$database)||conName where con_id>2; 355 | 356 | commit; 357 | 358 | -- format data collected for csv output 359 | --set markup csv on 360 | set linesize 32000 361 | set colsep ,, 362 | set headsep off 363 | set head off 364 | set trimspool on 365 | set trimout on 366 | set feedback off 367 | set pagesize 0 368 | set wrap off 369 | 370 | spool rbkDiscovery.csv append 371 | 372 | -- select * from rubrikDataCollection; 373 | -- 20230322 - reordering query output to logically group data - smcelhinney 374 | -- 20240827 - changing column seperator to prevent data shift due to DG setting in LogArchiveConfig - smcelhinney 375 | select con_id ||',,'|| 376 | conName ||',,'|| 377 | dbSizeMB ||',,'|| 378 | allocated_dbSizeMB ||',,'|| 379 | dailyChangeRate ||',,'|| 380 | dailyRedoSize ||',,'|| 381 | datafileCount ||',,'|| 382 | tablespaceCount ||',,'|| 383 | encryptedTablespaceCount ||',,'|| 384 | encryptedDataSizeMB ||',,'|| 385 | biggestBigfileMB ||',,'|| 386 | bigfileTablespaceCount ||',,'|| 387 | bigfileDataSizeMB ||',,'|| 388 | blockSize ||',,'|| 389 | hostName ||',,'|| 390 | instName ||',,'|| 391 | dbVersion ||',,'|| 392 | dbEdition ||',,'|| 393 | platformName ||',,'|| 394 | dbName ||',,'|| 395 | dbUniqueName ||',,'|| 396 | dbID ||',,'|| 397 | flashbackEnabled ||',,'|| 398 | archiveLogEnabled ||',,'|| 399 | spfile ||',,'|| 400 | patchLevel ||',,'|| 401 | cpuCount ||',,'|| 402 | racEnabled ||',,'|| 403 | sgaMaxSize ||',,'|| 404 | sgaTarget ||',,'|| 405 | pgaAggregateTarget ||',,'|| 406 | physMemory ||',,'|| 407 | dNFSenabled ||',,'|| 408 | GoldenGate ||',,'|| 409 | exadataEnabled ||',,'|| 410 | bctEnabled ||',,'|| 411 | LogArchiveConfig ||',,'|| 412 | ArchiveLagTarget ||',,'|| 413 | logfileCount ||',,'|| 414 | tempfileCount 415 | from rubrikDataCollection; 416 | 417 | spool off 418 | 419 | truncate table rubrikDataCollection; 420 | 421 | drop table rubrikDataCollection; 422 | 423 | exit; 424 | -------------------------------------------------------------------------------- /ORACLE/rbkDataCollection_12c.sql: -------------------------------------------------------------------------------- 1 | REM Oracle Data Collection Script 2 | 3 | -- connect to the system schema 4 | --conn SYSTEM@$1 5 | 6 | -- create private temporary table to hold all collected 7 | 8 | create global temporary table rubrikDataCollection 9 | ( 10 | con_id number, 11 | conName varchar2(128), 12 | dbSizeMB number, 13 | allocated_dbSizeMB number, 14 | biggestBigfileMB number, 15 | dailyChangeRate number, 16 | dailyRedoSize number, 17 | datafileCount number, 18 | hostName varchar2(64), 19 | instName varchar2(16), 20 | dbVersion varchar2(17), 21 | -- dbEdition varchar2(7), 22 | -- updating dbEdition to support larger entries in v$instance.version 23 | dbEdition varchar2(100), 24 | platformName varchar2(101), 25 | dbName varchar2(9), 26 | dbUniqueName varchar2(30), 27 | dbID varchar2(200), 28 | flashbackEnabled varchar2(18), 29 | archiveLogEnabled varchar2(12), 30 | spfile varchar2(200), 31 | patchLevel varchar2(100), 32 | cpuCount number, 33 | blockSize number, 34 | racEnabled varchar2(20), 35 | sgaMaxSize number, 36 | sgaTarget number, 37 | pgaAggregateTarget number, 38 | physMemory number, 39 | dNFSenabled varchar2(20), 40 | GoldenGate varchar2(20), 41 | exadataEnabled varchar2(20), 42 | bctEnabled varchar2(20), 43 | LogArchiveConfig varchar2(200), 44 | ArchiveLagTarget number, 45 | tablespaceCount number, 46 | encryptedTablespaceCount number, 47 | encryptedDataSizeMB number, 48 | bigfileTablespaceCount number, 49 | bigfileDataSizeMB number, 50 | logfileCount number, 51 | tempfileCount number 52 | ) 53 | on commit preserve rows; 54 | 55 | insert into rubrikDataCollection 56 | ( 57 | con_id, 58 | conName, 59 | hostName, 60 | instName, 61 | dbVersion, 62 | dbEdition, 63 | platformName, 64 | dbName, 65 | dbUniqueName, 66 | dbID, 67 | flashbackEnabled, 68 | archiveLogEnabled 69 | ) 70 | select cont.con_id, 71 | cont.name, 72 | inst.host_name, 73 | inst.instance_name, 74 | inst.version, 75 | inst.edition, 76 | db.platform_name, 77 | db.name, 78 | db.db_unique_name, 79 | cont.dbid, 80 | db.flashback_on, 81 | db.log_mode 82 | from v$instance inst, 83 | v$database db, 84 | v$containers cont 85 | / 86 | 87 | UPDATE rubrikDataCollection rbk 88 | SET spfile = (select decode(count(*), 0, 'NO', 'YES') from v$parameter where name='spfile') 89 | WHERE instName = (select instance_name from v$instance) 90 | and hostName= (select host_name from v$instance); 91 | 92 | -- result is the latest patch 93 | UPDATE rubrikDataCollection rbk 94 | SET patchLevel = (select * from (select description from dba_registry_sqlpatch order by ACTION_TIME desc) where ROWNUM = 1) 95 | WHERE instName = (select instance_name from v$instance) 96 | and hostName= (select host_name from v$instance); 97 | 98 | UPDATE rubrikDataCollection rbk 99 | SET cpuCount = (SELECT value from v$parameter where name='cpu_count') 100 | WHERE instName = (select instance_name from v$instance) 101 | and hostName= (select host_name from v$instance); 102 | 103 | UPDATE rubrikDataCollection rbk 104 | SET blockSize = (SELECT value from v$parameter where name='db_block_size') 105 | WHERE instName = (select instance_name from v$instance) 106 | and hostName= (select host_name from v$instance); 107 | 108 | UPDATE rubrikDataCollection rbk 109 | SET racEnabled = (SELECT value from v$parameter where name='cluster_database') 110 | WHERE instName = (select instance_name from v$instance) 111 | and hostName= (select host_name from v$instance); 112 | 113 | UPDATE rubrikDataCollection rbk 114 | SET sgaMaxSize = (SELECT value from v$parameter where name='sga_max_size') 115 | WHERE instName = (select instance_name from v$instance) 116 | and hostName= (select host_name from v$instance); 117 | 118 | UPDATE rubrikDataCollection rbk 119 | SET sgaTarget = (SELECT value from v$parameter where name='sga_target') 120 | WHERE instName = (select instance_name from v$instance) 121 | and hostName= (select host_name from v$instance); 122 | 123 | UPDATE rubrikDataCollection rbk 124 | SET pgaAggregateTarget = (SELECT value from v$parameter where name='pga_aggregate_target') 125 | WHERE instName = (select instance_name from v$instance) 126 | and hostName= (select host_name from v$instance); 127 | 128 | UPDATE rubrikDataCollection rbk 129 | SET physMemory = (SELECT max(value) from dba_hist_osstat where stat_name = 'PHYSICAL_MEMORY_BYTES') 130 | WHERE instName = (select instance_name from v$instance) 131 | and hostName= (select host_name from v$instance); 132 | 133 | UPDATE rubrikDataCollection rbk 134 | SET dNFSenabled = (select decode(count(*), 0, 'NO', 'YES') from v$dnfs_servers) 135 | WHERE instName = (select instance_name from v$instance) 136 | and hostName= (select host_name from v$instance); 137 | 138 | -- cdb_segments can use the containers clause. 139 | -- this query returns USED space per container 140 | UPDATE rubrikDataCollection rbk 141 | SET dbSizeMB = (select sum(bytes)/1024/1024 bytes from containers(cdb_segments) where con_id=rbk.con_id) 142 | WHERE instName = (select instance_name from v$instance) 143 | and hostName= (select host_name from v$instance) 144 | and con_id=rbk.con_id; 145 | 146 | -- 20230322 - updated query to use containers clause 147 | UPDATE rubrikDataCollection rbk 148 | SET allocated_dbSizeMB = (select sum(bytes/1024/1024) bytes from containers(v$datafile) where con_id=rbk.con_id) 149 | WHERE instName = (select instance_name from v$instance) 150 | and hostName= (select host_name from v$instance) 151 | and con_id=rbk.con_id; 152 | 153 | 154 | -- v$archive_dest is container-aware 155 | -- if cont_id is 0, it means the entire CDB 156 | UPDATE rubrikDataCollection rbk 157 | SET GoldenGate = (select decode(count(*), 0, 'NO', 'YES') from v$archive_dest where status = 'VALID' and target = 'STANDBY' and con_id=0) 158 | WHERE instName = (select instance_name from v$instance) 159 | and hostName= (select host_name from v$instance); 160 | 161 | UPDATE rubrikDataCollection rbk 162 | SET GoldenGate = (select decode(count(*), 0, 'NO', 'YES') from v$archive_dest where status = 'VALID' and target = 'STANDBY' and con_id=rbk.con_id) 163 | WHERE instName = (select instance_name from v$instance) 164 | and hostName= (select host_name from v$instance) 165 | and con_id=rbk.con_id; 166 | 167 | -- gv$cell is container-aware 168 | -- if cont_id is 0, it means the entire CDB 169 | UPDATE rubrikDataCollection rbk 170 | SET exadataEnabled = (select decode(count(*), 0, 'NO', 'YES') from v$cell where con_id=0) 171 | WHERE instName = (select instance_name from v$instance) 172 | and hostName= (select host_name from v$instance); 173 | 174 | UPDATE rubrikDataCollection rbk 175 | SET exadataEnabled = (select decode(count(*), 0, 'NO', 'YES') from v$cell where con_id=rbk.con_id) 176 | WHERE instName = (select instance_name from v$instance) 177 | and hostName= (select host_name from v$instance) 178 | and con_id=rbk.con_id; 179 | 180 | -- v$block_change_tracking is container-aware 181 | -- for now, BCT is allowed only in CDB 182 | UPDATE rubrikDataCollection rbk 183 | SET bctEnabled = (select status from v$block_change_tracking) 184 | WHERE instName = (select instance_name from v$instance) 185 | and hostName= (select host_name from v$instance); 186 | 187 | UPDATE rubrikDataCollection rbk 188 | SET LogArchiveConfig = (SELECT value from v$parameter where name='log_archive_config') 189 | WHERE instName = (select instance_name from v$instance) 190 | and hostName= (select host_name from v$instance); 191 | 192 | UPDATE rubrikDataCollection rbk 193 | SET LogArchiveConfig = 'NO' 194 | WHERE instName = (select instance_name from v$instance) 195 | and hostName= (select host_name from v$instance) 196 | and LogArchiveConfig is null; 197 | 198 | UPDATE rubrikDataCollection rbk 199 | SET ArchiveLagTarget = (SELECT value from v$parameter where name='archive_lag_target') 200 | WHERE instName = (select instance_name from v$instance) 201 | and hostName= (select host_name from v$instance); 202 | 203 | -- v$tablespace is container-aware 204 | UPDATE rubrikDataCollection rbk 205 | SET tablespaceCount = (select count(*) from v$tablespace where con_id=rbk.con_id group by con_id) 206 | WHERE instName = (select instance_name from v$instance) 207 | and hostName= (select host_name from v$instance) 208 | and con_id=rbk.con_id; 209 | 210 | -- containers clause works on dba_tablespaces 211 | UPDATE rubrikDataCollection rbk 212 | SET encryptedTablespaceCount = (select count(*) from containers(dba_tablespaces) where encrypted='YES' and con_id=rbk.con_id) 213 | WHERE instName = (select instance_name from v$instance) 214 | and hostName= (select host_name from v$instance) 215 | and con_id=rbk.con_id; 216 | 217 | -- containers clause works on dba_data_files and dba_tablespaces 218 | UPDATE rubrikDataCollection rbk 219 | SET encryptedDataSizeMB = (select sum(bytes/1024/1024) from (select dbf.con_id,sum(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.encrypted='YES' group by dbf.con_id) where con_id=rbk.con_id) 220 | WHERE instName = (select instance_name from v$instance) 221 | and hostName= (select host_name from v$instance) 222 | and con_id=rbk.con_id; 223 | 224 | UPDATE rubrikDataCollection rbk 225 | SET encryptedDataSizeMB = 0 226 | WHERE instName = (select instance_name from v$instance) 227 | and hostName= (select host_name from v$instance) 228 | and encryptedDataSizeMB is null; 229 | 230 | UPDATE rubrikDataCollection rbk 231 | SET bigfileTablespaceCount = (select count(*) from containers(dba_tablespaces) where bigfile='YES' and con_id=rbk.con_id) 232 | WHERE instName = (select instance_name from v$instance) 233 | and hostName= (select host_name from v$instance) 234 | and con_id=rbk.con_id; 235 | 236 | -- containers clause works on dba_data_files and dba_tablespaces 237 | UPDATE rubrikDataCollection rbk 238 | SET biggestBigfileMB = (select sum(bytes/1024/1024) from (select dbf.con_id, max(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.bigfile='YES' group by dbf.con_id) where con_id=rbk.con_id) 239 | WHERE instName = (select instance_name from v$instance) 240 | and hostName= (select host_name from v$instance) 241 | and con_id=rbk.con_id; 242 | 243 | UPDATE rubrikDataCollection rbk 244 | SET biggestBigfileMB = 0 245 | WHERE instName = (select instance_name from v$instance) 246 | and hostName= (select host_name from v$instance) 247 | and biggestBigfileMB is null; 248 | 249 | UPDATE rubrikDataCollection rbk 250 | SET bigfileDataSizeMB = (select sum(bytes/1024/1024) from (select dbf.con_id, sum(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.bigfile='YES' group by dbf.con_id) where con_id=rbk.con_id) 251 | WHERE instName = (select instance_name from v$instance) 252 | and hostName= (select host_name from v$instance) 253 | and con_id=rbk.con_id; 254 | 255 | UPDATE rubrikDataCollection rbk 256 | SET bigfileDataSizeMB = 0 257 | WHERE instName = (select instance_name from v$instance) 258 | and hostName= (select host_name from v$instance) 259 | and bigfileDataSizeMB is null; 260 | 261 | -- v$datafile and v$archived_log are container-aware (no need for container clause) 262 | -- 20220310 smcelhinney removing division by 100 from dailyChangeRate as it negatively skews change rate 263 | -- 20230321 updated change rate calculations to leverage cdb_segments to determine actual space USED instead of ALLOCATED - smcelhinney 264 | UPDATE rubrikDataCollection rbk 265 | SET dailyChangeRate = (select dailyChangeRate from (select dbf.con_id, round((avg(redo_size)/sum(sgmt.bytes)),8) dailyChangeRate from containers(cdb_segments) sgmt, (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from containers(v$archived_log) where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by sgmt.con_id) where con_id=rbk.con_id) 266 | WHERE instName = (select instance_name from v$instance) 267 | and hostName= (select host_name from v$instance) 268 | and con_id=rbk.con_id; 269 | 270 | -- v$datafile is container-aware (no need for container clause) 271 | UPDATE rubrikDataCollection rbk 272 | SET datafileCount = (select count(*) from v$datafile where con_id=rbk.con_id group by con_id) 273 | WHERE instName = (select instance_name from v$instance) 274 | and hostName= (select host_name from v$instance) 275 | and con_id=rbk.con_id; 276 | 277 | -- v$logfile is container-aware (no need for container clause) 278 | UPDATE rubrikDataCollection rbk 279 | SET logfileCount = (select count(*) from v$logfile where con_id=rbk.con_id group by con_id) 280 | WHERE instName = (select instance_name from v$instance) 281 | and hostName= (select host_name from v$instance) 282 | and con_id=rbk.con_id; 283 | 284 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 285 | UPDATE rubrikDataCollection rbk 286 | SET logfileCount = (SELECT SUM(total) 287 | FROM ( 288 | select count(*) total from v$logfile where con_id=0 289 | UNION ALL 290 | select count(*) total from v$logfile where con_id=1 291 | )) 292 | WHERE instName = (select instance_name from v$instance) 293 | and hostName= (select host_name from v$instance) 294 | and rbk.con_id=1; 295 | 296 | UPDATE rubrikDataCollection rbk 297 | SET logfileCount = 0 298 | WHERE instName = (select instance_name from v$instance) 299 | and hostName= (select host_name from v$instance) 300 | and logfileCount is null; 301 | 302 | -- v$tempfile is container-aware (no need for container clause) 303 | UPDATE rubrikDataCollection rbk 304 | SET tempfileCount = (select count(*) from v$tempfile where con_id=rbk.con_id group by con_id) 305 | WHERE instName = (select instance_name from v$instance) 306 | and hostName= (select host_name from v$instance) 307 | and con_id=rbk.con_id; 308 | 309 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 310 | UPDATE rubrikDataCollection rbk 311 | SET tempfileCount = (SELECT SUM(total) 312 | FROM ( 313 | select count(*) total from v$tempfile where con_id=0 314 | UNION ALL 315 | select count(*) total from v$tempfile where con_id=1 316 | )) 317 | WHERE instName = (select instance_name from v$instance) 318 | and hostName= (select host_name from v$instance) 319 | and rbk.con_id=1; 320 | 321 | UPDATE rubrikDataCollection rbk 322 | SET tempfileCount = 0 323 | WHERE instName = (select instance_name from v$instance) 324 | and hostName= (select host_name from v$instance) 325 | and tempfileCount is null; 326 | 327 | -- v$archived_log is container-aware (no need for container clause) 328 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 329 | UPDATE rubrikDataCollection rbk 330 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=rbk.con_id) 331 | WHERE instName = (select instance_name from v$instance) 332 | and hostName= (select host_name from v$instance) 333 | and con_id=rbk.con_id; 334 | 335 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 336 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 337 | UPDATE rubrikDataCollection rbk 338 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=0) 339 | WHERE instName = (select instance_name from v$instance) 340 | and hostName= (select host_name from v$instance) 341 | and rbk.con_id=1; 342 | 343 | UPDATE rubrikDataCollection rbk 344 | SET dailyRedoSize = 0 345 | WHERE instName = (select instance_name from v$instance) 346 | and hostName= (select host_name from v$instance) 347 | and dailyRedoSize is null; 348 | 349 | -- update temp table with con_name for recorded con_id 350 | update rubrikDataCollection rbk set con_id=0 where con_id is null; 351 | -- update root container and pdb seed names to include cdb database name 352 | update rubrikDataCollection rbk set conName=(select name ||'.CDB$ROOT'from v$database) where conName='CDB$ROOT'; 353 | update rubrikDataCollection rbk set conName=(select name ||'.PDB$SEED'from v$database) where conName='PDB$SEED'; 354 | -- update remaining pdbs to append CDB name so pdb/cdb relationships are not lost in the csv 355 | update rubrikDataCollection rbk set conName=(select name ||'.' from v$database)||conName where con_id>2; 356 | 357 | commit; 358 | 359 | -- format data collected for csv output 360 | --set markup csv on 361 | set linesize 32000 362 | set colsep ,, 363 | set headsep off 364 | set head off 365 | set trimspool on 366 | set trimout on 367 | set feedback off 368 | set pagesize 0 369 | set wrap off 370 | 371 | spool rbkDiscovery.csv append 372 | 373 | -- select * from rubrikDataCollection; 374 | -- 20230322 - reordering query output to logically group data - smcelhinney 375 | -- 20240827 - changing column seperator to prevent data shift due to DG setting in LogArchiveConfig - smcelhinney 376 | select con_id ||',,'|| 377 | conName ||',,'|| 378 | dbSizeMB ||',,'|| 379 | allocated_dbSizeMB ||',,'|| 380 | dailyChangeRate ||',,'|| 381 | dailyRedoSize ||',,'|| 382 | datafileCount ||',,'|| 383 | tablespaceCount ||',,'|| 384 | encryptedTablespaceCount ||',,'|| 385 | encryptedDataSizeMB ||',,'|| 386 | biggestBigfileMB ||',,'|| 387 | bigfileTablespaceCount ||',,'|| 388 | bigfileDataSizeMB ||',,'|| 389 | blockSize ||',,'|| 390 | hostName ||',,'|| 391 | instName ||',,'|| 392 | dbVersion ||',,'|| 393 | dbEdition ||',,'|| 394 | platformName ||',,'|| 395 | dbName ||',,'|| 396 | dbUniqueName ||',,'|| 397 | dbID ||',,'|| 398 | flashbackEnabled ||',,'|| 399 | archiveLogEnabled ||',,'|| 400 | spfile ||',,'|| 401 | patchLevel ||',,'|| 402 | cpuCount ||',,'|| 403 | racEnabled ||',,'|| 404 | sgaMaxSize ||',,'|| 405 | sgaTarget ||',,'|| 406 | pgaAggregateTarget ||',,'|| 407 | physMemory ||',,'|| 408 | dNFSenabled ||',,'|| 409 | GoldenGate ||',,'|| 410 | exadataEnabled ||',,'|| 411 | bctEnabled ||',,'|| 412 | LogArchiveConfig ||',,'|| 413 | ArchiveLagTarget ||',,'|| 414 | logfileCount ||',,'|| 415 | tempfileCount 416 | from rubrikDataCollection; 417 | 418 | spool off 419 | 420 | truncate table rubrikDataCollection; 421 | 422 | drop table rubrikDataCollection; 423 | 424 | exit; 425 | -------------------------------------------------------------------------------- /ORACLE/rbkDataCollection.sql: -------------------------------------------------------------------------------- 1 | REM Oracle Data Collection Script 2 | 3 | -- connect to the system schema 4 | --conn SYSTEM@$1 5 | 6 | -- create private temporary table to hold all collected 7 | 8 | create global temporary table rubrikDataCollection 9 | ( 10 | con_id number, 11 | conName varchar2(128), 12 | dbSizeMB number, 13 | allocated_dbSizeMB number, 14 | biggestBigfileMB number, 15 | dailyChangeRate number, 16 | dailyRedoSize number, 17 | datafileCount number, 18 | hostName varchar2(64), 19 | instName varchar2(16), 20 | dbVersion varchar2(17), 21 | -- dbEdition varchar2(7), 22 | -- updating dbEdition to support larger settings in v$instance.version 23 | dbEdition varchar2(100), 24 | platformName varchar2(101), 25 | dbName varchar2(9), 26 | dbUniqueName varchar2(30), 27 | dbID varchar2(200), 28 | flashbackEnabled varchar2(18), 29 | archiveLogEnabled varchar2(12), 30 | spfile varchar2(200), 31 | patchLevel varchar2(100), 32 | cpuCount number, 33 | blockSize number, 34 | racEnabled varchar2(20), 35 | sgaMaxSize number, 36 | sgaTarget number, 37 | pgaAggregateTarget number, 38 | physMemory number, 39 | dNFSenabled varchar2(20), 40 | GoldenGate varchar2(20), 41 | exadataEnabled varchar2(20), 42 | bctEnabled varchar2(20), 43 | LogArchiveConfig varchar2(200), 44 | ArchiveLagTarget number, 45 | tablespaceCount number, 46 | encryptedTablespaceCount number, 47 | encryptedDataSizeMB number, 48 | bigfileTablespaceCount number, 49 | bigfileDataSizeMB number, 50 | logfileCount number, 51 | tempfileCount number 52 | ) 53 | on commit preserve rows; 54 | 55 | insert into rubrikDataCollection 56 | ( 57 | con_id, 58 | conName, 59 | hostName, 60 | instName, 61 | dbVersion, 62 | dbEdition, 63 | platformName, 64 | dbName, 65 | dbUniqueName, 66 | dbID, 67 | flashbackEnabled, 68 | archiveLogEnabled 69 | ) 70 | select cont.con_id, 71 | cont.name, 72 | inst.host_name, 73 | inst.instance_name, 74 | inst.version_full, 75 | inst.edition, 76 | db.platform_name, 77 | db.name, 78 | db.db_unique_name, 79 | cont.dbid, 80 | db.flashback_on, 81 | db.log_mode 82 | from v$instance inst, 83 | v$database db, 84 | v$containers cont 85 | / 86 | 87 | UPDATE rubrikDataCollection rbk 88 | SET spfile = (select decode(count(*), 0, 'NO', 'YES') from v$parameter where name='spfile') 89 | WHERE instName = (select instance_name from v$instance) 90 | and hostName= (select host_name from v$instance); 91 | 92 | -- result is the latest patch 93 | UPDATE rubrikDataCollection rbk 94 | SET patchLevel = (select * from (select description from dba_registry_sqlpatch order by TARGET_BUILD_TIMESTAMP desc) where ROWNUM = 1) 95 | WHERE instName = (select instance_name from v$instance) 96 | and hostName= (select host_name from v$instance); 97 | 98 | UPDATE rubrikDataCollection rbk 99 | SET cpuCount = (SELECT value from v$parameter where name='cpu_count') 100 | WHERE instName = (select instance_name from v$instance) 101 | and hostName= (select host_name from v$instance); 102 | 103 | UPDATE rubrikDataCollection rbk 104 | SET blockSize = (SELECT value from v$parameter where name='db_block_size') 105 | WHERE instName = (select instance_name from v$instance) 106 | and hostName= (select host_name from v$instance); 107 | 108 | UPDATE rubrikDataCollection rbk 109 | SET racEnabled = (SELECT value from v$parameter where name='cluster_database') 110 | WHERE instName = (select instance_name from v$instance) 111 | and hostName= (select host_name from v$instance); 112 | 113 | UPDATE rubrikDataCollection rbk 114 | SET sgaMaxSize = (SELECT value from v$parameter where name='sga_max_size') 115 | WHERE instName = (select instance_name from v$instance) 116 | and hostName= (select host_name from v$instance); 117 | 118 | UPDATE rubrikDataCollection rbk 119 | SET sgaTarget = (SELECT value from v$parameter where name='sga_target') 120 | WHERE instName = (select instance_name from v$instance) 121 | and hostName= (select host_name from v$instance); 122 | 123 | UPDATE rubrikDataCollection rbk 124 | SET pgaAggregateTarget = (SELECT value from v$parameter where name='pga_aggregate_target') 125 | WHERE instName = (select instance_name from v$instance) 126 | and hostName= (select host_name from v$instance); 127 | 128 | UPDATE rubrikDataCollection rbk 129 | SET physMemory = (SELECT max(value) from dba_hist_osstat where stat_name = 'PHYSICAL_MEMORY_BYTES') 130 | WHERE instName = (select instance_name from v$instance) 131 | and hostName= (select host_name from v$instance); 132 | 133 | UPDATE rubrikDataCollection rbk 134 | SET dNFSenabled = (select decode(count(*), 0, 'NO', 'YES') from v$dnfs_servers) 135 | WHERE instName = (select instance_name from v$instance) 136 | and hostName= (select host_name from v$instance); 137 | 138 | -- cdb_segments can use the containers clause. 139 | -- this query returns USED space per container 140 | UPDATE rubrikDataCollection rbk 141 | SET dbSizeMB = (select sum(bytes)/1024/1024 MB from containers(cdb_segments) where con_id=rbk.con_id) 142 | WHERE instName = (select instance_name from v$instance) 143 | and hostName= (select host_name from v$instance) 144 | and con_id=rbk.con_id; 145 | 146 | -- v$datafile can use the containers clause 147 | -- this query returns the total space ALLOCATED to each container 148 | UPDATE rubrikDataCollection rbk 149 | SET allocated_dbSizeMB = (select sum(bytes/1024/1024) bytes from containers(v$datafile) where con_id=rbk.con_id) 150 | WHERE instName = (select instance_name from v$instance) 151 | and hostName= (select host_name from v$instance) 152 | and con_id=rbk.con_id; 153 | 154 | 155 | -- v$archive_dest is container-aware 156 | -- if cont_id is 0, it means the entire CDB 157 | UPDATE rubrikDataCollection rbk 158 | SET GoldenGate = (select decode(count(*), 0, 'NO', 'YES') from v$archive_dest where status = 'VALID' and target = 'STANDBY' and con_id=0) 159 | WHERE instName = (select instance_name from v$instance) 160 | and hostName= (select host_name from v$instance); 161 | 162 | -- this query addresses GoldenGate for each container 163 | UPDATE rubrikDataCollection rbk 164 | SET GoldenGate = (select decode(count(*), 0, 'NO', 'YES') from v$archive_dest where status = 'VALID' and target = 'STANDBY' and con_id=rbk.con_id) 165 | WHERE instName = (select instance_name from v$instance) 166 | and hostName= (select host_name from v$instance) 167 | and con_id=rbk.con_id; 168 | 169 | -- gv$cell is container-aware 170 | -- if cont_id is 0, it means the entire CDB 171 | UPDATE rubrikDataCollection rbk 172 | SET exadataEnabled = (select decode(count(*), 0, 'NO', 'YES') from v$cell where con_id=0) 173 | WHERE instName = (select instance_name from v$instance) 174 | and hostName= (select host_name from v$instance); 175 | 176 | -- this query addresses Exadata for each container (overkill) 177 | UPDATE rubrikDataCollection rbk 178 | SET exadataEnabled = (select decode(count(*), 0, 'NO', 'YES') from v$cell where con_id=rbk.con_id) 179 | WHERE instName = (select instance_name from v$instance) 180 | and hostName= (select host_name from v$instance) 181 | and con_id=rbk.con_id; 182 | 183 | -- v$block_change_tracking is container-aware 184 | -- for now, BCT is allowed only in CDB 185 | UPDATE rubrikDataCollection rbk 186 | SET bctEnabled = (select status from v$block_change_tracking) 187 | WHERE instName = (select instance_name from v$instance) 188 | and hostName= (select host_name from v$instance); 189 | 190 | UPDATE rubrikDataCollection rbk 191 | SET LogArchiveConfig = (SELECT value from v$parameter where name='log_archive_config') 192 | WHERE instName = (select instance_name from v$instance) 193 | and hostName= (select host_name from v$instance); 194 | 195 | -- Need to explicitly set a value for LogArchiveConfig is the previous query returns a null value 196 | UPDATE rubrikDataCollection rbk 197 | SET LogArchiveConfig = 'NO' 198 | WHERE instName = (select instance_name from v$instance) 199 | and hostName= (select host_name from v$instance) 200 | and LogArchiveConfig is null; 201 | 202 | UPDATE rubrikDataCollection rbk 203 | SET ArchiveLagTarget = (SELECT value from v$parameter where name='archive_lag_target') 204 | WHERE instName = (select instance_name from v$instance) 205 | and hostName= (select host_name from v$instance); 206 | 207 | -- v$tablespace is container-aware 208 | UPDATE rubrikDataCollection rbk 209 | SET tablespaceCount = (select count(*) from v$tablespace where con_id=rbk.con_id group by con_id) 210 | WHERE instName = (select instance_name from v$instance) 211 | and hostName= (select host_name from v$instance) 212 | and con_id=rbk.con_id; 213 | 214 | -- containers clause works on dba_tablespaces 215 | UPDATE rubrikDataCollection rbk 216 | SET encryptedTablespaceCount = (select count(*) from containers(dba_tablespaces) where encrypted='YES' and con_id=rbk.con_id) 217 | WHERE instName = (select instance_name from v$instance) 218 | and hostName= (select host_name from v$instance) 219 | and con_id=rbk.con_id; 220 | 221 | -- containers clause works on dba_data_files and dba_tablespaces 222 | UPDATE rubrikDataCollection rbk 223 | SET encryptedDataSizeMB = (select sum(bytes/1024/1024) from (select dbf.con_id,sum(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.encrypted='YES' group by dbf.con_id) where con_id=rbk.con_id) 224 | WHERE instName = (select instance_name from v$instance) 225 | and hostName= (select host_name from v$instance) 226 | and con_id=rbk.con_id; 227 | 228 | -- need to explicitly set encryptedDataSizeMB if previous query returns a null value 229 | UPDATE rubrikDataCollection rbk 230 | SET encryptedDataSizeMB = 0 231 | WHERE instName = (select instance_name from v$instance) 232 | and hostName= (select host_name from v$instance) 233 | and encryptedDataSizeMB is null; 234 | 235 | UPDATE rubrikDataCollection rbk 236 | SET bigfileTablespaceCount = (select count(*) from containers(dba_tablespaces) where bigfile='YES' and con_id=rbk.con_id) 237 | WHERE instName = (select instance_name from v$instance) 238 | and hostName= (select host_name from v$instance) 239 | and con_id=rbk.con_id; 240 | 241 | -- containers clause works on dba_data_files and dba_tablespaces 242 | UPDATE rubrikDataCollection rbk 243 | SET biggestBigfileMB = (select sum(bytes/1024/1024) from (select dbf.con_id, max(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.bigfile='YES' group by dbf.con_id) where con_id=rbk.con_id) 244 | WHERE instName = (select instance_name from v$instance) 245 | and hostName= (select host_name from v$instance) 246 | and con_id=rbk.con_id; 247 | 248 | -- need to explicitly set biggestBigfileMB if the previous query returns a null value 249 | UPDATE rubrikDataCollection rbk 250 | SET biggestBigfileMB = 0 251 | WHERE instName = (select instance_name from v$instance) 252 | and hostName= (select host_name from v$instance) 253 | and biggestBigfileMB is null; 254 | 255 | UPDATE rubrikDataCollection rbk 256 | SET bigfileDataSizeMB = (select sum(bytes/1024/1024) from (select dbf.con_id, sum(bytes) bytes from containers(dba_data_files) dbf, containers(dba_tablespaces) tbsp where dbf.tablespace_name=tbsp.tablespace_name and dbf.con_id=tbsp.con_id and tbsp.bigfile='YES' group by dbf.con_id) where con_id=rbk.con_id) 257 | WHERE instName = (select instance_name from v$instance) 258 | and hostName= (select host_name from v$instance) 259 | and con_id=rbk.con_id; 260 | 261 | -- need to explicitly set the bigfileDataSizeMB if the previous query returns a null value 262 | UPDATE rubrikDataCollection rbk 263 | SET bigfileDataSizeMB = 0 264 | WHERE instName = (select instance_name from v$instance) 265 | and hostName= (select host_name from v$instance) 266 | and bigfileDataSizeMB is null; 267 | 268 | -- v$datafile and v$archived_log are container-aware (no need for container clause) 269 | -- 20220310 removed division by 100 from change rate calc as it is skewing change rate down smcelhinney 270 | -- 20230321 updated change rate calculations to leverage cdb_segments to determine actual space USED instead of ALLOCATED - smcelhinney 271 | UPDATE rubrikDataCollection rbk 272 | SET dailyChangeRate = (select dailyChangeRate from (select sgmt.con_id, round((avg(redo_size)/sum(sgmt.bytes)),8) dailyChangeRate from containers(cdb_segments) sgmt, (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from containers(v$archived_log) where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by sgmt.con_id) where con_id=rbk.con_id) 273 | WHERE instName = (select instance_name from v$instance) 274 | and hostName= (select host_name from v$instance) 275 | and con_id=rbk.con_id; 276 | 277 | -- v$datafile is container-aware (no need for container clause) 278 | UPDATE rubrikDataCollection rbk 279 | SET datafileCount = (select count(*) from v$datafile where con_id=rbk.con_id group by con_id) 280 | WHERE instName = (select instance_name from v$instance) 281 | and hostName= (select host_name from v$instance) 282 | and con_id=rbk.con_id; 283 | 284 | -- v$logfile is container-aware (no need for container clause) 285 | UPDATE rubrikDataCollection rbk 286 | SET logfileCount = (select count(*) from v$logfile where con_id=rbk.con_id group by con_id) 287 | WHERE instName = (select instance_name from v$instance) 288 | and hostName= (select host_name from v$instance) 289 | and con_id=rbk.con_id; 290 | 291 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 292 | UPDATE rubrikDataCollection rbk 293 | SET logfileCount = (SELECT SUM(total) 294 | FROM ( 295 | select count(*) total from v$logfile where con_id=0 296 | UNION ALL 297 | select count(*) total from v$logfile where con_id=1 298 | )) 299 | WHERE instName = (select instance_name from v$instance) 300 | and hostName= (select host_name from v$instance) 301 | and rbk.con_id=1; 302 | 303 | UPDATE rubrikDataCollection rbk 304 | SET logfileCount = 0 305 | WHERE instName = (select instance_name from v$instance) 306 | and hostName= (select host_name from v$instance) 307 | and logfileCount is null; 308 | 309 | -- v$tempfile is container-aware (no need for container clause) 310 | UPDATE rubrikDataCollection rbk 311 | SET tempfileCount = (select count(*) from v$tempfile where con_id=rbk.con_id group by con_id) 312 | WHERE instName = (select instance_name from v$instance) 313 | and hostName= (select host_name from v$instance) 314 | and con_id=rbk.con_id; 315 | 316 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 317 | UPDATE rubrikDataCollection rbk 318 | SET tempfileCount = (SELECT SUM(total) 319 | FROM ( 320 | select count(*) total from v$tempfile where con_id=0 321 | UNION ALL 322 | select count(*) total from v$tempfile where con_id=1 323 | )) 324 | WHERE instName = (select instance_name from v$instance) 325 | and hostName= (select host_name from v$instance) 326 | and rbk.con_id=1; 327 | 328 | -- need to explicitly set tempfileCount if the previous query returns a null value 329 | UPDATE rubrikDataCollection rbk 330 | SET tempfileCount = 0 331 | WHERE instName = (select instance_name from v$instance) 332 | and hostName= (select host_name from v$instance) 333 | and tempfileCount is null; 334 | 335 | -- v$archived_log is container-aware (no need for container clause) 336 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 337 | UPDATE rubrikDataCollection rbk 338 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=rbk.con_id) 339 | WHERE instName = (select instance_name from v$instance) 340 | and hostName= (select host_name from v$instance) 341 | and con_id=rbk.con_id; 342 | 343 | -- as Multitenant instance won't have con_id=0, the result will be add into the root container (con_id=1) 344 | -- 20230322 - updating query to return dailyRedoSize in MB - smcelhinney 345 | UPDATE rubrikDataCollection rbk 346 | SET dailyRedoSize = (select dailyRedoSize from (select con_id, avg(redo_size/1024/1024) dailyRedoSize from (select con_id, trunc(completion_time) rundate, sum(blocks*block_size) redo_size from v$archived_log where first_time > sysdate - 7 group by trunc(completion_time), con_id) group by con_id) where con_id=0) 347 | WHERE instName = (select instance_name from v$instance) 348 | and hostName= (select host_name from v$instance) 349 | and rbk.con_id=1; 350 | 351 | -- need to explicitly set dailyRedoSize if the previous query return a null value 352 | UPDATE rubrikDataCollection rbk 353 | SET dailyRedoSize = 0 354 | WHERE instName = (select instance_name from v$instance) 355 | and hostName= (select host_name from v$instance) 356 | and dailyRedoSize is null; 357 | 358 | -- update temp table with con_name for recorded con_id 359 | update rubrikDataCollection rbk set con_id=0 where con_id is null; 360 | -- update root container and pdb seed names to include cdb database name 361 | update rubrikDataCollection rbk set conName=(select name ||'.CDB$ROOT'from v$database) where conName='CDB$ROOT'; 362 | update rubrikDataCollection rbk set conName=(select name ||'.PDB$SEED'from v$database) where conName='PDB$SEED'; 363 | -- update remaining pdbs to append CDB name so pdb/cdb relationships are not lost in the csv 364 | update rubrikDataCollection rbk set conName=(select name ||'.' from v$database)||conName where con_id>2; 365 | 366 | commit; 367 | 368 | -- format data collected for csv output 369 | -- set markup csv on 370 | set linesize 32000 371 | set colsep ,, 372 | set headsep off 373 | set head off 374 | set trimspool on 375 | set trimout on 376 | set feedback off 377 | set pagesize 0 378 | set wrap off 379 | 380 | spool rbkDiscovery.csv append 381 | 382 | -- select * from rubrikDataCollection; 383 | -- 20230322 - reordering query output to logically group data - smcelhinney 384 | -- 20240827 - changing column seperator to prevent data shift due to DG setting in LogArchiveConfig - smcelhinney 385 | select con_id ||',,'|| 386 | conName ||',,'|| 387 | dbSizeMB ||',,'|| 388 | allocated_dbSizeMB ||',,'|| 389 | dailyChangeRate ||',,'|| 390 | dailyRedoSize ||',,'|| 391 | datafileCount ||',,'|| 392 | tablespaceCount ||',,'|| 393 | encryptedTablespaceCount ||',,'|| 394 | encryptedDataSizeMB ||',,'|| 395 | biggestBigfileMB ||',,'|| 396 | bigfileTablespaceCount ||',,'|| 397 | bigfileDataSizeMB ||',,'|| 398 | blockSize ||',,'|| 399 | hostName ||',,'|| 400 | instName ||',,'|| 401 | dbVersion ||',,'|| 402 | dbEdition ||',,'|| 403 | platformName ||',,'|| 404 | dbName ||',,'|| 405 | dbUniqueName ||',,'|| 406 | dbID ||',,'|| 407 | flashbackEnabled ||',,'|| 408 | archiveLogEnabled ||',,'|| 409 | spfile ||',,'|| 410 | patchLevel ||',,'|| 411 | cpuCount ||',,'|| 412 | racEnabled ||',,'|| 413 | sgaMaxSize ||',,'|| 414 | sgaTarget ||',,'|| 415 | pgaAggregateTarget ||',,'|| 416 | physMemory ||',,'|| 417 | dNFSenabled ||',,'|| 418 | GoldenGate ||',,'|| 419 | exadataEnabled ||',,'|| 420 | bctEnabled ||',,'|| 421 | LogArchiveConfig ||',,'|| 422 | ArchiveLagTarget ||',,'|| 423 | logfileCount ||',,'|| 424 | tempfileCount 425 | from rubrikDataCollection; 426 | 427 | spool off 428 | 429 | truncate table rubrikDataCollection; 430 | 431 | drop table rubrikDataCollection; 432 | 433 | exit; 434 | -------------------------------------------------------------------------------- /CLOUD/EXAMPLES/aws-backup-plans-info-example.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AdvancedBackupSettings": [], 4 | "BackupPlan": { 5 | "AdvancedBackupSettings": [], 6 | "BackupPlanName": "BackupPlanName-00000", 7 | "Rules": [ 8 | { 9 | "CompletionWindowMinutes": 10080, 10 | "CopyActions": [], 11 | "EnableContinuousBackup": false, 12 | "Lifecycle": { 13 | "DeleteAfterDays": 35, 14 | "MoveToColdStorageAfterDays": 0, 15 | "OptInToArchiveForSupportedResources": false 16 | }, 17 | "RecoveryPointTags": {}, 18 | "RuleId": "RuleId-00000", 19 | "RuleName": "RuleName-00000", 20 | "ScheduleExpression": "cron(0 5 ? * * *)", 21 | "ScheduleExpressionTimezone": "Etc/UTC", 22 | "StartWindowMinutes": 480, 23 | "TargetBackupVaultName": "TargetBackupVaultName-00000" 24 | } 25 | ] 26 | }, 27 | "BackupPlanArn": "BackupPlanArn-00000", 28 | "BackupPlanId": "BackupPlanId-00000", 29 | "CreationDate": "2022-06-06T23:33:40.022Z", 30 | "CreatorRequestId": "CreatorRequestId-00000", 31 | "DeletionDate": "0001-01-01T00:00:00", 32 | "LastExecutionDate": "2024-10-02T05:43:06.3719999Z", 33 | "VersionId": "VersionId-00000", 34 | "ResponseMetadata": { 35 | "RequestId": "RequestId-00000", 36 | "Metadata": {}, 37 | "ChecksumAlgorithm": 0, 38 | "ChecksumValidationStatus": 0 39 | }, 40 | "ContentLength": 1004, 41 | "HttpStatusCode": 200, 42 | "Resources": [] 43 | }, 44 | { 45 | "AdvancedBackupSettings": [], 46 | "BackupPlan": { 47 | "AdvancedBackupSettings": [], 48 | "BackupPlanName": "BackupPlanName-00001", 49 | "Rules": [ 50 | { 51 | "CompletionWindowMinutes": 10080, 52 | "CopyActions": [], 53 | "EnableContinuousBackup": false, 54 | "Lifecycle": { 55 | "DeleteAfterDays": 35, 56 | "MoveToColdStorageAfterDays": 0, 57 | "OptInToArchiveForSupportedResources": false 58 | }, 59 | "RecoveryPointTags": {}, 60 | "RuleId": "RuleId-00001", 61 | "RuleName": "RuleName-00001", 62 | "ScheduleExpression": "cron(0 5 ? * * *)", 63 | "ScheduleExpressionTimezone": "Etc/UTC", 64 | "StartWindowMinutes": 480, 65 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 66 | } 67 | ] 68 | }, 69 | "BackupPlanArn": "BackupPlanArn-00001", 70 | "BackupPlanId": "BackupPlanId-00001", 71 | "CreationDate": "2023-02-02T15:36:57.6909999Z", 72 | "CreatorRequestId": null, 73 | "DeletionDate": "0001-01-01T00:00:00", 74 | "LastExecutionDate": "2024-10-01T07:05:31.052Z", 75 | "VersionId": "VersionId-00001", 76 | "ResponseMetadata": { 77 | "RequestId": "RequestId-00001", 78 | "Metadata": {}, 79 | "ChecksumAlgorithm": 0, 80 | "ChecksumValidationStatus": 0 81 | }, 82 | "ContentLength": 883, 83 | "HttpStatusCode": 200, 84 | "Resources": [] 85 | }, 86 | { 87 | "AdvancedBackupSettings": [], 88 | "BackupPlan": { 89 | "AdvancedBackupSettings": [], 90 | "BackupPlanName": "BackupPlanName-00002", 91 | "Rules": [ 92 | { 93 | "CompletionWindowMinutes": 10080, 94 | "CopyActions": [], 95 | "EnableContinuousBackup": false, 96 | "Lifecycle": { 97 | "DeleteAfterDays": 35, 98 | "MoveToColdStorageAfterDays": 0, 99 | "OptInToArchiveForSupportedResources": false 100 | }, 101 | "RecoveryPointTags": {}, 102 | "RuleId": "RuleId-00002", 103 | "RuleName": "RuleName-00001", 104 | "ScheduleExpression": "cron(0 5 ? * * *)", 105 | "ScheduleExpressionTimezone": "Etc/UTC", 106 | "StartWindowMinutes": 480, 107 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 108 | } 109 | ] 110 | }, 111 | "BackupPlanArn": "BackupPlanArn-00002", 112 | "BackupPlanId": "BackupPlanId-00002", 113 | "CreationDate": "2023-04-27T16:43:22.5239999Z", 114 | "CreatorRequestId": null, 115 | "DeletionDate": "0001-01-01T00:00:00", 116 | "LastExecutionDate": "2024-10-02T05:09:24.1659998Z", 117 | "VersionId": "VersionId-00002", 118 | "ResponseMetadata": { 119 | "RequestId": "RequestId-00002", 120 | "Metadata": {}, 121 | "ChecksumAlgorithm": 0, 122 | "ChecksumValidationStatus": 0 123 | }, 124 | "ContentLength": 893, 125 | "HttpStatusCode": 200, 126 | "Resources": [ 127 | "Resources-00000" 128 | ] 129 | }, 130 | { 131 | "AdvancedBackupSettings": [], 132 | "BackupPlan": { 133 | "AdvancedBackupSettings": [], 134 | "BackupPlanName": "BackupPlanName-00003", 135 | "Rules": [ 136 | { 137 | "CompletionWindowMinutes": 10080, 138 | "CopyActions": [], 139 | "EnableContinuousBackup": false, 140 | "Lifecycle": { 141 | "DeleteAfterDays": 12775, 142 | "MoveToColdStorageAfterDays": 0, 143 | "OptInToArchiveForSupportedResources": false 144 | }, 145 | "RecoveryPointTags": {}, 146 | "RuleId": "RuleId-00003", 147 | "RuleName": "RuleName-00001", 148 | "ScheduleExpression": "cron(0 5 ? * * *)", 149 | "ScheduleExpressionTimezone": "America/Los_Angeles", 150 | "StartWindowMinutes": 480, 151 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 152 | }, 153 | { 154 | "CompletionWindowMinutes": 10080, 155 | "CopyActions": [], 156 | "EnableContinuousBackup": false, 157 | "Lifecycle": { 158 | "DeleteAfterDays": 90, 159 | "MoveToColdStorageAfterDays": 0, 160 | "OptInToArchiveForSupportedResources": false 161 | }, 162 | "RecoveryPointTags": {}, 163 | "RuleId": "RuleId-00004", 164 | "RuleName": "RuleName-00002", 165 | "ScheduleExpression": "cron(0 5 ? * 7 *)", 166 | "ScheduleExpressionTimezone": "America/Los_Angeles", 167 | "StartWindowMinutes": 480, 168 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 169 | }, 170 | { 171 | "CompletionWindowMinutes": 10080, 172 | "CopyActions": [], 173 | "EnableContinuousBackup": false, 174 | "Lifecycle": { 175 | "DeleteAfterDays": 2555, 176 | "MoveToColdStorageAfterDays": 90, 177 | "OptInToArchiveForSupportedResources": true 178 | }, 179 | "RecoveryPointTags": {}, 180 | "RuleId": "RuleId-00005", 181 | "RuleName": "RuleName-00003", 182 | "ScheduleExpression": "cron(0 5 1 * ? *)", 183 | "ScheduleExpressionTimezone": "America/Los_Angeles", 184 | "StartWindowMinutes": 480, 185 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 186 | }, 187 | { 188 | "CompletionWindowMinutes": 10080, 189 | "CopyActions": [], 190 | "EnableContinuousBackup": false, 191 | "Lifecycle": { 192 | "DeleteAfterDays": 30, 193 | "MoveToColdStorageAfterDays": 0, 194 | "OptInToArchiveForSupportedResources": false 195 | }, 196 | "RecoveryPointTags": {}, 197 | "RuleId": "RuleId-00006", 198 | "RuleName": "RuleName-00004", 199 | "ScheduleExpression": "cron(0 1 ? * * *)", 200 | "ScheduleExpressionTimezone": "America/Los_Angeles", 201 | "StartWindowMinutes": 480, 202 | "TargetBackupVaultName": "TargetBackupVaultName-00002" 203 | } 204 | ] 205 | }, 206 | "BackupPlanArn": "BackupPlanArn-00003", 207 | "BackupPlanId": "BackupPlanId-00003", 208 | "CreationDate": "2024-01-30T06:12:18.572Z", 209 | "CreatorRequestId": null, 210 | "DeletionDate": "0001-01-01T00:00:00", 211 | "LastExecutionDate": "2024-10-01T12:43:27.108Z", 212 | "VersionId": "VersionId-00003", 213 | "ResponseMetadata": { 214 | "RequestId": "RequestId-00003", 215 | "Metadata": {}, 216 | "ChecksumAlgorithm": 0, 217 | "ChecksumValidationStatus": 0 218 | }, 219 | "ContentLength": 2250, 220 | "HttpStatusCode": 200, 221 | "Resources": [ 222 | "Resources-00001" 223 | ] 224 | }, 225 | { 226 | "AdvancedBackupSettings": [], 227 | "BackupPlan": { 228 | "AdvancedBackupSettings": [], 229 | "BackupPlanName": "BackupPlanName-00004", 230 | "Rules": [ 231 | { 232 | "CompletionWindowMinutes": 10080, 233 | "CopyActions": [ 234 | { 235 | "DestinationBackupVaultArn": "DestinationBackupVaultArn-00000", 236 | "Lifecycle": { 237 | "DeleteAfterDays": 60, 238 | "MoveToColdStorageAfterDays": 0, 239 | "OptInToArchiveForSupportedResources": false 240 | } 241 | } 242 | ], 243 | "EnableContinuousBackup": false, 244 | "Lifecycle": { 245 | "DeleteAfterDays": 60, 246 | "MoveToColdStorageAfterDays": 0, 247 | "OptInToArchiveForSupportedResources": false 248 | }, 249 | "RecoveryPointTags": {}, 250 | "RuleId": "RuleId-00007", 251 | "RuleName": "RuleName-00005", 252 | "ScheduleExpression": "cron(30 0 1 * ? *)", 253 | "ScheduleExpressionTimezone": "Etc/UTC", 254 | "StartWindowMinutes": 480, 255 | "TargetBackupVaultName": "TargetBackupVaultName-00003" 256 | } 257 | ] 258 | }, 259 | "BackupPlanArn": "BackupPlanArn-00004", 260 | "BackupPlanId": "BackupPlanId-00004", 261 | "CreationDate": "2024-03-26T20:34:49.677Z", 262 | "CreatorRequestId": null, 263 | "DeletionDate": "0001-01-01T00:00:00", 264 | "LastExecutionDate": "2024-10-01T01:16:20.1779999Z", 265 | "VersionId": "VersionId-00004", 266 | "ResponseMetadata": { 267 | "RequestId": "RequestId-00004", 268 | "Metadata": {}, 269 | "ChecksumAlgorithm": 0, 270 | "ChecksumValidationStatus": 0 271 | }, 272 | "ContentLength": 1121, 273 | "HttpStatusCode": 200, 274 | "Resources": [ 275 | "Resources-00002" 276 | ] 277 | }, 278 | { 279 | "AdvancedBackupSettings": [], 280 | "BackupPlan": { 281 | "AdvancedBackupSettings": [], 282 | "BackupPlanName": "BackupPlanName-00005", 283 | "Rules": [ 284 | { 285 | "CompletionWindowMinutes": 10080, 286 | "CopyActions": [], 287 | "EnableContinuousBackup": false, 288 | "Lifecycle": { 289 | "DeleteAfterDays": 35, 290 | "MoveToColdStorageAfterDays": 0, 291 | "OptInToArchiveForSupportedResources": false 292 | }, 293 | "RecoveryPointTags": {}, 294 | "RuleId": "RuleId-00008", 295 | "RuleName": "RuleName-00002", 296 | "ScheduleExpression": "cron(30 0 ? * 3 *)", 297 | "ScheduleExpressionTimezone": "America/Los_Angeles", 298 | "StartWindowMinutes": 480, 299 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 300 | } 301 | ] 302 | }, 303 | "BackupPlanArn": "BackupPlanArn-00005", 304 | "BackupPlanId": "BackupPlanId-00005", 305 | "CreationDate": "2024-07-23T21:47:45.8199999Z", 306 | "CreatorRequestId": null, 307 | "DeletionDate": "0001-01-01T00:00:00", 308 | "LastExecutionDate": "2024-10-01T09:49:18.467Z", 309 | "VersionId": "VersionId-00005", 310 | "ResponseMetadata": { 311 | "RequestId": "RequestId-00005", 312 | "Metadata": {}, 313 | "ChecksumAlgorithm": 0, 314 | "ChecksumValidationStatus": 0 315 | }, 316 | "ContentLength": 894, 317 | "HttpStatusCode": 200, 318 | "Resources": [ 319 | "Resources-00003", 320 | "Resources-00004" 321 | ] 322 | }, 323 | { 324 | "AdvancedBackupSettings": [], 325 | "BackupPlan": { 326 | "AdvancedBackupSettings": [], 327 | "BackupPlanName": "BackupPlanName-00006", 328 | "Rules": [ 329 | { 330 | "CompletionWindowMinutes": 10080, 331 | "CopyActions": [], 332 | "EnableContinuousBackup": false, 333 | "Lifecycle": { 334 | "DeleteAfterDays": 35, 335 | "MoveToColdStorageAfterDays": 0, 336 | "OptInToArchiveForSupportedResources": false 337 | }, 338 | "RecoveryPointTags": {}, 339 | "RuleId": "RuleId-00009", 340 | "RuleName": "RuleName-00001", 341 | "ScheduleExpression": "cron(0 5 ? * * *)", 342 | "ScheduleExpressionTimezone": "Etc/UTC", 343 | "StartWindowMinutes": 480, 344 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 345 | } 346 | ] 347 | }, 348 | "BackupPlanArn": "BackupPlanArn-00006", 349 | "BackupPlanId": "BackupPlanId-00006", 350 | "CreationDate": "2021-02-18T16:36:59.434Z", 351 | "CreatorRequestId": null, 352 | "DeletionDate": "0001-01-01T00:00:00", 353 | "LastExecutionDate": "2024-10-02T06:31:01.263Z", 354 | "VersionId": "VersionId-00006", 355 | "ResponseMetadata": { 356 | "RequestId": "RequestId-00006", 357 | "Metadata": {}, 358 | "ChecksumAlgorithm": 0, 359 | "ChecksumValidationStatus": 0 360 | }, 361 | "ContentLength": 890, 362 | "HttpStatusCode": 200, 363 | "Resources": [] 364 | }, 365 | { 366 | "AdvancedBackupSettings": [], 367 | "BackupPlan": { 368 | "AdvancedBackupSettings": [], 369 | "BackupPlanName": "BackupPlanName-00007", 370 | "Rules": [ 371 | { 372 | "CompletionWindowMinutes": 10080, 373 | "CopyActions": [], 374 | "EnableContinuousBackup": false, 375 | "Lifecycle": { 376 | "DeleteAfterDays": 35, 377 | "MoveToColdStorageAfterDays": 0, 378 | "OptInToArchiveForSupportedResources": false 379 | }, 380 | "RecoveryPointTags": {}, 381 | "RuleId": "RuleId-00000", 382 | "RuleName": "RuleName-00001", 383 | "ScheduleExpression": "cron(0 5 ? * * *)", 384 | "ScheduleExpressionTimezone": "Etc/UTC", 385 | "StartWindowMinutes": 480, 386 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 387 | }, 388 | { 389 | "CompletionWindowMinutes": 10080, 390 | "CopyActions": [], 391 | "EnableContinuousBackup": false, 392 | "Lifecycle": { 393 | "DeleteAfterDays": 365, 394 | "MoveToColdStorageAfterDays": 30, 395 | "OptInToArchiveForSupportedResources": false 396 | }, 397 | "RecoveryPointTags": {}, 398 | "RuleId": "RuleId-00001", 399 | "RuleName": "RuleName-00006", 400 | "ScheduleExpression": "cron(0 5 1 * ? *)", 401 | "ScheduleExpressionTimezone": "Etc/UTC", 402 | "StartWindowMinutes": 480, 403 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 404 | } 405 | ] 406 | }, 407 | "BackupPlanArn": "BackupPlanArn-00007", 408 | "BackupPlanId": "BackupPlanId-00007", 409 | "CreationDate": "2021-04-01T19:27:10.8829998Z", 410 | "CreatorRequestId": null, 411 | "DeletionDate": "0001-01-01T00:00:00", 412 | "LastExecutionDate": "2024-10-01T06:43:19.3859999Z", 413 | "VersionId": "VersionId-00007", 414 | "ResponseMetadata": { 415 | "RequestId": "RequestId-00007", 416 | "Metadata": {}, 417 | "ChecksumAlgorithm": 0, 418 | "ChecksumValidationStatus": 0 419 | }, 420 | "ContentLength": 1308, 421 | "HttpStatusCode": 200, 422 | "Resources": [] 423 | }, 424 | { 425 | "AdvancedBackupSettings": [], 426 | "BackupPlan": { 427 | "AdvancedBackupSettings": [], 428 | "BackupPlanName": "BackupPlanName-00000", 429 | "Rules": [ 430 | { 431 | "CompletionWindowMinutes": 10080, 432 | "CopyActions": [], 433 | "EnableContinuousBackup": false, 434 | "Lifecycle": { 435 | "DeleteAfterDays": 35, 436 | "MoveToColdStorageAfterDays": 0, 437 | "OptInToArchiveForSupportedResources": false 438 | }, 439 | "RecoveryPointTags": {}, 440 | "RuleId": "RuleId-00002", 441 | "RuleName": "RuleName-00000", 442 | "ScheduleExpression": "cron(0 5 ? * * *)", 443 | "ScheduleExpressionTimezone": "Etc/UTC", 444 | "StartWindowMinutes": 480, 445 | "TargetBackupVaultName": "TargetBackupVaultName-00000" 446 | } 447 | ] 448 | }, 449 | "BackupPlanArn": "BackupPlanArn-00008", 450 | "BackupPlanId": "BackupPlanId-00000", 451 | "CreationDate": "2022-06-03T21:05:53.8239998Z", 452 | "CreatorRequestId": "CreatorRequestId-00001", 453 | "DeletionDate": "0001-01-01T00:00:00", 454 | "LastExecutionDate": "2024-10-02T06:07:16.4709999Z", 455 | "VersionId": "VersionId-00008", 456 | "ResponseMetadata": { 457 | "RequestId": "RequestId-00008", 458 | "Metadata": {}, 459 | "ChecksumAlgorithm": 0, 460 | "ChecksumValidationStatus": 0 461 | }, 462 | "ContentLength": 1004, 463 | "HttpStatusCode": 200, 464 | "Resources": [] 465 | }, 466 | { 467 | "AdvancedBackupSettings": [], 468 | "BackupPlan": { 469 | "AdvancedBackupSettings": [], 470 | "BackupPlanName": "BackupPlanName-00008", 471 | "Rules": [ 472 | { 473 | "CompletionWindowMinutes": 120, 474 | "CopyActions": [ 475 | { 476 | "DestinationBackupVaultArn": "DestinationBackupVaultArn-00000", 477 | "Lifecycle": { 478 | "DeleteAfterDays": 35, 479 | "MoveToColdStorageAfterDays": 0, 480 | "OptInToArchiveForSupportedResources": false 481 | } 482 | } 483 | ], 484 | "EnableContinuousBackup": false, 485 | "Lifecycle": { 486 | "DeleteAfterDays": 35, 487 | "MoveToColdStorageAfterDays": 0, 488 | "OptInToArchiveForSupportedResources": false 489 | }, 490 | "RecoveryPointTags": {}, 491 | "RuleId": "RuleId-00003", 492 | "RuleName": "RuleName-00007", 493 | "ScheduleExpression": "cron(45 16 ? * * *)", 494 | "ScheduleExpressionTimezone": "America/Los_Angeles", 495 | "StartWindowMinutes": 60, 496 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 497 | } 498 | ] 499 | }, 500 | "BackupPlanArn": "BackupPlanArn-00009", 501 | "BackupPlanId": "BackupPlanId-00008", 502 | "CreationDate": "2024-02-23T00:41:54.97Z", 503 | "CreatorRequestId": null, 504 | "DeletionDate": "0001-01-01T00:00:00", 505 | "LastExecutionDate": "2024-10-01T23:47:11.3259999Z", 506 | "VersionId": "VersionId-00009", 507 | "ResponseMetadata": { 508 | "RequestId": "RequestId-00009", 509 | "Metadata": {}, 510 | "ChecksumAlgorithm": 0, 511 | "ChecksumValidationStatus": 0 512 | }, 513 | "ContentLength": 1083, 514 | "HttpStatusCode": 200, 515 | "Resources": [ 516 | "Resources-00005" 517 | ] 518 | }, 519 | { 520 | "AdvancedBackupSettings": [], 521 | "BackupPlan": { 522 | "AdvancedBackupSettings": [], 523 | "BackupPlanName": "BackupPlanName-00009", 524 | "Rules": [ 525 | { 526 | "CompletionWindowMinutes": 10080, 527 | "CopyActions": [], 528 | "EnableContinuousBackup": false, 529 | "Lifecycle": { 530 | "DeleteAfterDays": 2, 531 | "MoveToColdStorageAfterDays": 0, 532 | "OptInToArchiveForSupportedResources": false 533 | }, 534 | "RecoveryPointTags": {}, 535 | "RuleId": "RuleId-00004", 536 | "RuleName": "BackupPlanName-00009", 537 | "ScheduleExpression": "cron(30 0 ? * * *)", 538 | "ScheduleExpressionTimezone": "America/Chicago", 539 | "StartWindowMinutes": 240, 540 | "TargetBackupVaultName": "TargetBackupVaultName-00001" 541 | } 542 | ] 543 | }, 544 | "BackupPlanArn": "BackupPlanArn-00000", 545 | "BackupPlanId": "BackupPlanId-00009", 546 | "CreationDate": "2024-08-20T16:09:01.9579999Z", 547 | "CreatorRequestId": null, 548 | "DeletionDate": "0001-01-01T00:00:00", 549 | "LastExecutionDate": "2024-10-02T06:11:01.4619998Z", 550 | "VersionId": "VersionId-00000", 551 | "ResponseMetadata": { 552 | "RequestId": "RequestId-00000", 553 | "Metadata": {}, 554 | "ChecksumAlgorithm": 0, 555 | "ChecksumValidationStatus": 0 556 | }, 557 | "ContentLength": 897, 558 | "HttpStatusCode": 200, 559 | "Resources": [ 560 | "Resources-00006" 561 | ] 562 | } 563 | ] 564 | --------------------------------------------------------------------------------