├── .gitignore ├── LICENSE ├── README.md ├── autorestore.py ├── autorestore_check.sql ├── backup.netapp.sample.cfg ├── backup.py ├── backup.sample.cfg ├── backup.softnas.sample.cfg ├── backupcommon.py ├── certs └── README.md ├── dbinfo.py ├── exec_all.py ├── get_oracle_version.sh ├── huawei.py ├── huaweidoradocredentials.cfg.sample ├── logs.sample ├── autorestore-full-validation-20161108T224851-orcl.log ├── autorestore-no-validation-20161108T224722-orcl.log ├── orcl_20161108T215324_config.log └── orcl_20161108T215335_imagecopywithsnap.log ├── netapp.py ├── netappcredentials.cfg.sample ├── oraexec.py ├── report.py ├── reporttemplate.cfg ├── restore.py ├── restorecommon.py ├── restoretemplate.cfg ├── rmantemplate.cfg ├── softnas.py ├── softnascredentials.cfg.sample ├── tns.sample ├── sqlnet.ora └── tnsnames.ora ├── zfscredentials.cfg.sample ├── zfssa.py ├── zsnapper.py └── zvolume.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | wallet* 3 | *demo* 4 | *credentials.cfg 5 | backup.cfg 6 | .ftpconfig 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2016 Unibet Group 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /autorestore.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, sys 4 | from datetime import datetime, date, timedelta 5 | from backupcommon import BackupLock, BackupLogger, info, debug, error, exception, Configuration, BackupTemplate, scriptpath 6 | from random import randint 7 | from oraexec import OracleExec 8 | from restorecommon import RestoreDB 9 | from tempfile import mkstemp, TemporaryFile 10 | 11 | def printhelp(): 12 | print "Usage: autorestore.py [config]" 13 | print " [!][config] is optional, if missed then action is performed on all databases in config file. If it starts with !, then specified config is excluded." 14 | print " [config] could either be database unique name to be restored or on of the repository actions:" 15 | print " --createcatalog" 16 | print " --listvalidationdates" 17 | sys.exit(2) 18 | 19 | if len(sys.argv) not in [2,3]: 20 | printhelp() 21 | 22 | if os.geteuid() == 0: 23 | print "No, I will not run as root." 24 | sys.exit(0) 25 | 26 | if (not os.getenv('AUTORESTORE_SAFE_SANDBOX')) or (os.environ['AUTORESTORE_SAFE_SANDBOX'] != 'TRUE'): 27 | print "THIS AUTORESTORE PROCESS CAN BE VERY DANGEROUS IF THIS HOST HAS ACCESS TO PRODUCTION DATABASE FILESYSTEM/STORAGE." 28 | print "THE RESTORE PROCESS CAN OVERWRITE OR DELETE FILES ON THEIR ORIGINAL CONTROL FILE LOCATIONS!" 29 | print "RUN IT ONLY ON A HOST THAT IS COMPLETELY SANDBOXED FROM PRODUCTION DATABASE ENVIRONMENT." 30 | print "TO CONTINUE, SET ENVIRONMENT VARIABLE AUTORESTORE_SAFE_SANDBOX TO VALUE TRUE (CASE SENSITIVE)." 31 | print "" 32 | sys.exit(3) 33 | 34 | Configuration.init('autorestore', configfilename=sys.argv[1], additionaldefaults={'customverifydate': 'select max(time_dp) from sys.smon_scn_time','autorestoreenabled': '1', 35 | 'autorestoreinstancenumber': '1', 'autorestorethread': '1'}) 36 | oexec = OracleExec(oraclehome=Configuration.get('oraclehome', 'generic'), tnspath=os.path.join(scriptpath(), Configuration.get('tnsadmin', 'generic'))) 37 | restoretemplate = BackupTemplate('restoretemplate.cfg') 38 | 39 | exitstatus = 0 40 | 41 | # System actions 42 | 43 | # Clean destination directory 44 | def cleantarget(restoredest): 45 | debug("ACTION: Cleaning destination directory %s" % restoredest) 46 | for root, dirs, files in os.walk(restoredest, topdown=False): 47 | for name in files: 48 | os.remove(os.path.join(root, name)) 49 | for name in dirs: 50 | os.rmdir(os.path.join(root, name)) 51 | 52 | def validationdate(database): 53 | tmpsection = Configuration.defaultsection 54 | Configuration.defaultsection = database 55 | validatemodulus = int(Configuration.get('autorestoremodulus', 'autorestore')) 56 | Configuration.defaultsection = tmpsection 57 | # 58 | days_since_epoch = (datetime.utcnow() - datetime(1970,1,1)).days 59 | try: 60 | hashstring = Configuration.get('stringforvalidationmod', database) 61 | except: 62 | hashstring = database 63 | # 64 | mod1 = days_since_epoch % validatemodulus 65 | mod2 = hash(hashstring) % validatemodulus 66 | validatecorruption = mod1 == mod2 67 | days_to_next_validation = (mod2-mod1) if mod2 > mod1 else (validatemodulus-(mod1-mod2)) 68 | next_validation = date.today() + timedelta(days=days_to_next_validation) 69 | return (validatecorruption, days_to_next_validation, next_validation) 70 | 71 | def runrestore(database): 72 | global exitstatus 73 | # 74 | Configuration.defaultsection = database 75 | # 76 | restoredest = Configuration.get('autorestoredestination','autorestore') 77 | mountdest = Configuration.get('autorestoremountpoint','autorestore') 78 | logdir = Configuration.get('autorestorelogdir','autorestore') 79 | Configuration.substitutions.update({ 80 | 'logdir': logdir, 81 | 'autorestorecatalog': Configuration.get('autorestorecatalog','autorestore') 82 | }) 83 | if restoredest is None or not os.path.exists(restoredest) or not os.path.isdir(restoredest): 84 | print "Restore directory %s not found or is not a proper directory" % restoredest 85 | sys.exit(2) 86 | if mountdest is None or not os.path.exists(mountdest) or not os.path.isdir(mountdest): 87 | print "Clone mount directory %s not found or is not a proper directory" % mountdest 88 | sys.exit(2) 89 | # 90 | validatechance = int(Configuration.get('autorestorevalidatechance', 'autorestore')) 91 | validatemodulus = int(Configuration.get('autorestoremodulus', 'autorestore')) 92 | # Reinitialize logging 93 | BackupLogger.init(os.path.join(logdir, "%s-%s.log" % (datetime.now().strftime('%Y%m%dT%H%M%S'), database)), database) 94 | BackupLogger.clean() 95 | # 96 | restore = RestoreDB(database) 97 | restore.set_mount_path(mountdest) 98 | restore.set_restore_path(restoredest) 99 | # 100 | info("Logfile: %s" % BackupLogger.logfile) 101 | Configuration.substitutions.update({ 102 | 'logfile': BackupLogger.logfile 103 | }) 104 | cleantarget(restoredest) 105 | # 106 | success = False 107 | # 108 | if validatemodulus > 0: 109 | # Validation based on modulus 110 | validationinfo = validationdate(database) 111 | validatecorruption = validationinfo[0] 112 | if not validatecorruption: 113 | debug("Next database validation in %d days: %s" % ( validationinfo[1], validationinfo[2] )) 114 | else: 115 | # Validation based on random 116 | validatecorruption = (validatechance > 0) and (randint(1, validatechance) == validatechance) 117 | if validatecorruption: 118 | debug("Database will be validated during this restore session") 119 | # Start restore 120 | try: 121 | restore.run() 122 | restore.verify() 123 | if validatecorruption: 124 | restore.blockcheck() 125 | success = True 126 | except: 127 | exitstatus = 1 128 | exception("Error happened, but we can continue with the next database.") 129 | finally: 130 | restore.cleanup() 131 | # Log result to catalog 132 | Configuration.substitutions.update({ 133 | 'log_dbname': database, 134 | 'log_start': restore.starttime.strftime('%Y-%m-%d %H-%M-%S'), 135 | 'log_stop': restore.endtime.strftime('%Y-%m-%d %H-%M-%S'), 136 | 'log_success': '1' if success else '0', 137 | 'log_diff': restore.verifyseconds, 138 | 'log_snapid': restore.sourcesnapid, 139 | 'log_validated': '1' if validatecorruption else '0' 140 | }) 141 | debug('Logging the result to catalog.') 142 | try: 143 | oexec.sqlldr(Configuration.get('autorestorecatalog','autorestore'), restoretemplate.get('sqlldrlog')) 144 | except: 145 | debug("Sending the logfile to catalog failed.") 146 | try: 147 | oexec.sqlplus(restoretemplate.get('insertlog'), silent=False) 148 | except: 149 | debug("Logging the result to catalog failed.") 150 | # Finish up 151 | info("Restore %s, elapsed time: %s" % ('successful' if success else 'failed', restore.endtime-restore.starttime)) 152 | # Run ADRCI to clean up diag 153 | adrage = int(Configuration.get('logretention','generic'))*1440 154 | f1 = mkstemp(suffix=".adi") 155 | ftmp = os.fdopen(f1[0], "w") 156 | ftmp.write("set base %s\n" % logdir) 157 | ftmp.write("show homes\n") 158 | ftmp.close() 159 | f2 = mkstemp(suffix=".adi") 160 | ftmp2 = os.fdopen(f2[0], "w") 161 | ftmp2.write("set base %s\n" % logdir) 162 | with TemporaryFile() as f: 163 | try: 164 | oexec.adrci(f1[1], f) 165 | f.seek(0,0) 166 | output = f.read() 167 | startreading = False 168 | for line in output.splitlines(): 169 | if line.startswith('ADR Homes:'): 170 | startreading = True 171 | elif startreading: 172 | ftmp2.write("set home %s\n" % line.strip()) 173 | ftmp2.write("purge -age %d\n" % adrage) 174 | ftmp2.close() 175 | oexec.adrci(f2[1], f) 176 | except: 177 | print "Executing ADRCI failed." 178 | finally: 179 | os.unlink(f1[1]) 180 | os.unlink(f2[1]) 181 | # 182 | BackupLogger.close(True) 183 | 184 | # UI 185 | 186 | def loopdatabases(): 187 | excludelist = ['generic','rman','zfssa','autorestore','netapp','huaweidorado'] 188 | sections = Configuration.sections() 189 | if action is not None: 190 | if action[0] == "!": 191 | excludelist.append(action[1:]) 192 | else: 193 | sections = [action] 194 | for configname in sections: 195 | if configname not in excludelist: 196 | if Configuration.get('autorestoreenabled', configname) == '1': 197 | yield configname 198 | 199 | action = None 200 | if len(sys.argv) == 3: 201 | action = sys.argv[2] 202 | 203 | if action == '--listvalidationdates': 204 | # This action does not need a lock 205 | if int(Configuration.get('autorestoremodulus', 'autorestore')) > 0: 206 | action = None 207 | for configname in loopdatabases(): 208 | validationinfo = validationdate(configname) 209 | print "%s: %s (in %d days)" % (configname, validationinfo[2], validationinfo[1]) 210 | else: 211 | validatechance = int(Configuration.get('autorestorevalidatechance', 'autorestore')) 212 | if validatechance > 0: 213 | print "Validation is based on chance, probability 1/%d" % validatechance 214 | else: 215 | print "Database validation is not turned on" 216 | else: 217 | if action is not None and action.startswith('--'): 218 | if action == '--createcatalog': 219 | BackupLogger.init(os.path.join(Configuration.get('autorestorelogdir','autorestore'), "%s-config.log" % (datetime.now().strftime('%Y%m%dT%H%M%S'))), 'config') 220 | info("Logfile: %s" % BackupLogger.logfile) 221 | Configuration.substitutions.update({'logfile': BackupLogger.logfile}) 222 | oexec.sqlplus(restoretemplate.get('createcatalog'), silent=False) 223 | else: 224 | # Loop through all sections 225 | for configname in loopdatabases(): 226 | Configuration.defaultsection = configname 227 | lock = BackupLock(Configuration.get('autorestorelogdir','autorestore')) 228 | try: 229 | runrestore(configname) 230 | finally: 231 | lock.release() 232 | 233 | print "Exitstatus is %d" % exitstatus 234 | sys.exit(exitstatus) 235 | -------------------------------------------------------------------------------- /autorestore_check.sql: -------------------------------------------------------------------------------- 1 | -- Sample procedures and queries to automatically monitor autorestore status (from Nagios for example) 2 | 3 | FUNCTION autorestore_failed(p_msg OUT VARCHAR2) RETURN NUMBER IS 4 | p_dbs VARCHAR2(300); 5 | p_return NUMBER:= 0; -- Return OK 6 | BEGIN 7 | SELECT listagg(db_unique_name, ', ') within group (order by db_unique_name) INTO p_dbs FROM ( 8 | SELECT db_unique_name, success, start_time, rank() over(partition by db_unique_name order by start_time desc) rnk FROM autorestore.restoreaction WHERE start_time > sysdate-30) 9 | WHERE rnk = 1 AND success = 0; 10 | IF p_dbs IS NOT NULL THEN 11 | p_msg:= 'Databases where the last autorestore failed: '||p_dbs; 12 | p_return:= 2; -- Return CRITICAL 13 | END IF; 14 | RETURN p_return; 15 | END; 16 | 17 | FUNCTION autorestore_runtime(p_msg OUT VARCHAR2) RETURN NUMBER IS 18 | p_cnt NUMBER; 19 | p_dbs VARCHAR2(300); 20 | p_return NUMBER:= 0; -- Return OK 21 | BEGIN 22 | SELECT count(*), listagg(db_unique_name, ', ') within group (order by db_unique_name) INTO p_cnt, p_dbs FROM ( 23 | SELECT DISTINCT db_unique_name FROM autorestore.restoreaction WHERE start_time > sysdate-30 24 | MINUS 25 | SELECT db_unique_name FROM autorestore.restoreaction WHERE start_time >= sysdate-2); 26 | IF p_cnt > 0 THEN 27 | p_msg:= 'The following databases have not been restored within 2 days: '||p_dbs; 28 | p_return:= 2; -- Return CRITICAL 29 | END IF; 30 | RETURN p_return; 31 | END; 32 | -------------------------------------------------------------------------------- /backup.netapp.sample.cfg: -------------------------------------------------------------------------------- 1 | [generic] 2 | # After how many days the backup logs will be deleted 3 | logretention: 20 4 | # Root directory for backups, backups will be placed under $backupdest/dbsectionname 5 | backupdest: /nfs/backup 6 | # Warning and critical levels for checking last snapshot age (in hours), warning is exit code 1 and critical is exit code 2 7 | warningsnapage: 26 8 | criticalsnapage: 32 9 | # Directory where tnsnames.ora and sqlnet.ora are located 10 | tnsadmin: tns.sample 11 | # OS user that executes the backup 12 | osuser: oracle 13 | # Oracle home 14 | oraclehome: /u01/app/oracle/product/12.1.0.2/db 15 | # Maxlockwait in minutes 16 | maxlockwait: 30 17 | # Python module name and class name that implement the storage snapshot/cloning functions (extending class SnapHandler in module backupcommon) 18 | snappermodule: netapp 19 | snapperclass: Netapp 20 | 21 | [netapp] 22 | # filer is netapp adimistrative hostname that accepts API calls 23 | filer: 10.10.10.11 24 | # mounthost is for constructing NFS mount command, it is just used for display purposes 25 | mounthost: 10.10.10.12 26 | volumeprefix: oc1_ 27 | cacert: certificatefile.cer 28 | 29 | [rman] 30 | catalog: /@rman 31 | 32 | [autorestore] 33 | # The directory to be used for restore tests. Must be empty, everything under it is destroyed. 34 | # NB! This directory WILL BE CLEANED. 35 | autorestoredestination: /nfs/autorestore/dest 36 | # Clone name 37 | autorestoreclonename: autorestore 38 | # Place to mount the clone. This MUST be already described in /etc/fstab as mountable by user (with options fg,noauto,user). 39 | # For example: 40 | # zfssa.example.com:/export/demo-backup/autorestore /nfs/autorestore/mnt nfs rw,fg,soft,nointr,rsize=32768,wsize=32768,tcp,vers=3,timeo=600,noauto,user 0 0 41 | autorestoremountpoint: /nfs/autorestore/mnt 42 | # Restore will be done using the latest snapshot that is at least this many hours old 43 | autorestoresnapage: 0 44 | # Autorestore log files 45 | autorestorelogdir: /nfs/autorestore/log 46 | # Maximum difference between auto restore target date and customverifydate in hours 47 | autorestoremaxtoleranceminutes: 5 48 | # Autorestorecatalog connect string 49 | # First create user manually in the database: 50 | # create user autorestore identified by complexpassword; 51 | # grant create session,create table,create sequence,create procedure,create view,create job to autorestore; 52 | # alter user autorestore quota unlimited on users; 53 | # Then add it to password wallet and run ./autorestore.py configfilename --createcatalog 54 | autorestorecatalog: /@autorestore 55 | # Chance of doing a full database validation after restore, the larger the integer, the less likely it will be. Set to 0 to disable, set to 1 to always validate. 7 means 1/7 chance. 56 | autorestorevalidatechance: 0 57 | # Modulus of doing a full database validation. Validation is done if: mod(current day, autorestoremodulus) == mod(hash(database unique name), autorestoremodulus). 58 | # This quarantees that validation for each database is done after every x days consistently. 0 means disable. 59 | # Set either autorestorevalidatechance or autorestoremodulus to 0, they will not work concurrently, modulus if preferred. 60 | autorestoremodulus: 30 61 | 62 | ## Databases 63 | 64 | [cdb1] 65 | dbid: 888398667 66 | recoverywindow: 2 67 | parallel: 4 68 | snapexpirationdays: 31 69 | snapexpirationmonths: 6 70 | registercatalog: true 71 | hasdataguard: false 72 | schedulebackup: FREQ=DAILY;BYHOUR=10;BYMINUTE=0 73 | schedulearchlog: FREQ=HOURLY;BYHOUR=21 74 | customverifydate: select max(d) from (select cast(max(createtime) as date) d from exampleapp.transactions union all select current_dt from dbauser.autorestore_ping_timestamp) 75 | 76 | [olddb] 77 | dbid: 888398334 78 | recoverywindow: 2 79 | parallel: 4 80 | snapexpirationdays: 14 81 | snapexpirationmonths: 12 82 | registercatalog: true 83 | hasdataguard: false 84 | schedulebackup: FREQ=DAILY;BYHOUR=5;BYMINUTE=0 85 | schedulearchlog: FREQ=HOURLY 86 | genericoraclehome: /u01/app/oracle/product/11.2.0.4/db 87 | zfssapool: oldpool-0 88 | zfssaproject: databases-11g-backup 89 | customverifydate: select max(d) from (select cast(max(createtime) as date) d from exampleapp2.log union all select current_dt from dbauser.autorestore_ping_timestamp) 90 | autorestorethread: 3 91 | -------------------------------------------------------------------------------- /backup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, ConfigParser, sys 4 | from datetime import datetime, timedelta 5 | from subprocess import Popen, PIPE, STDOUT 6 | from tempfile import TemporaryFile 7 | from backupcommon import BackupLock, BackupLogger, info, debug, error, exception, scriptpath, Configuration, BackupTemplate, create_snapshot_class 8 | from oraexec import OracleExec 9 | 10 | # Check command line arguments 11 | uioptions = ['config','setschedule','backupimagecopy','report','validatebackup','generaterestore','imagecopywithsnap','missingarchlog'] 12 | 13 | def printhelp(): 14 | print "Usage: backup.py <%s>" % '|'.join(uioptions) 15 | sys.exit(2) 16 | 17 | if len(sys.argv) != 3: 18 | printhelp() 19 | else: 20 | scriptaction = sys.argv[2].lower() 21 | if not scriptaction in uioptions: 22 | printhelp() 23 | 24 | # Check environment 25 | if (scriptaction == 'setschedule') and (os.getenv('OSPASSWORD') is None): 26 | print "Environment variable OSPASSWORD must be set." 27 | sys.exit(2) 28 | 29 | # Directory where the executable script is located 30 | scriptpath = scriptpath() 31 | 32 | # Read configuration 33 | configsection = sys.argv[1] 34 | Configuration.init(configsection, additionaldefaults={'primarytns': configsection}) 35 | 36 | # Database specific configuration 37 | if Configuration.get('backupdestshared', 'generic').upper() == 'TRUE': 38 | backupdest = os.path.join(Configuration.get('backupdest', 'generic'), configsection) 39 | else: 40 | backupdest = Configuration.get('backupdest', 'generic') 41 | archdir = os.path.join(backupdest, 'archivelog') 42 | hasdataguard = Configuration.get('hasdataguard').upper() == 'TRUE' 43 | dosnapshot = Configuration.get('dosnapshot').upper() == 'TRUE' 44 | gimanaged = Configuration.get('gimanaged').upper() == 'TRUE' 45 | registercatalog = Configuration.get('registercatalog').upper() == 'TRUE' 46 | 47 | # Log file for this session 48 | logdir = os.path.join(backupdest, 'backup_logs') 49 | logfile = os.path.join(logdir, "%s_%s_%s.log" % (configsection, datetime.now().strftime('%Y%m%dT%H%M%S'), scriptaction) ) 50 | print "Log file for this session: %s" % logfile 51 | BackupLogger.init(logfile, configsection) 52 | BackupLogger.clean() 53 | 54 | # Oracle environment variables 55 | oraexec = OracleExec(Configuration.get('oraclehome', 'generic'), os.path.join(scriptpath, Configuration.get('tnsadmin', 'generic'))) 56 | 57 | # Prepare a dictionary of all possible template substitutions 58 | Configuration.substitutions.update({ 'recoverywindow': Configuration.get('recoverywindow'), 59 | 'parallel': Configuration.get('parallel'), 60 | 'backupdest': backupdest, 61 | 'archdir': archdir, 62 | 'catalogconnect': Configuration.get('catalog', 'rman'), 63 | 'configname': configsection, 64 | 'osuser': Configuration.get('osuser', 'generic'), 65 | 'ospassword': os.getenv('OSPASSWORD'), 66 | 'scriptpath': scriptpath, 67 | 'schedulebackup': Configuration.get('schedulebackup'), 68 | 'schedulearchlog': Configuration.get('schedulearchlog'), 69 | 'dbid': int(Configuration.get('dbid')), 70 | 'oraclehome': oraexec.oraclehome, 71 | 'tnspath': oraexec.tnspath, 72 | 'logfile': logfile, 73 | 'backupjobenabled': 'true' if Configuration.get('backupjobenabled').upper() == 'TRUE' else 'false', 74 | 'sectionsize': "section size %s" % Configuration.get('sectionsize', 'rman') if Configuration.get('sectionsize', 'rman') else '' 75 | }) 76 | 77 | # Read RMAN templates 78 | rmantemplateconfig = BackupTemplate('rmantemplate.cfg') 79 | 80 | # Initialize snapshot class 81 | snap = create_snapshot_class(configsection) 82 | 83 | # Execute RMAN with script as input 84 | def exec_rman(rmanscript): 85 | # Modify rman script with common headers 86 | finalscript = rmantemplateconfig.get('header') 87 | if registercatalog: 88 | finalscript+= "\n%s" % rmantemplateconfig.get('headercatalog') 89 | finalscript+= "\n%s" % rmanscript 90 | finalscript+= "\n%s" % rmantemplateconfig.get('footer') 91 | # print finalscript 92 | oraexec.rman(finalscript) 93 | 94 | # Execute sqlplus with a given script 95 | def exec_sqlplus(sqlplusscript, silent=False, header=True, primary=False): 96 | global configsection 97 | Configuration.substitutions['sqlplusconnection'] = '/@%s as sysdba' % (Configuration.get('primarytns') if primary else configsection) 98 | script = "" 99 | if header: 100 | script+= "%s\n" % rmantemplateconfig.get('sqlplusheader') 101 | script+= "%s\n" % sqlplusscript 102 | if header: 103 | script+= "%s\n" % rmantemplateconfig.get('sqlplusfooter') 104 | return oraexec.sqlplus(script, silent) 105 | 106 | ############## 107 | # User actions 108 | ############## 109 | 110 | def configure(): 111 | rmanscript = '' 112 | # Create directory for archive logs 113 | if not os.path.exists(archdir): 114 | os.makedirs(archdir) 115 | # Register database in catalog if needed 116 | if registercatalog: 117 | alreadyregistered = False 118 | info("Checking from RMAN catalog if database is already registered") 119 | output = exec_sqlplus(rmantemplateconfig.get('isdbregisteredincatalog'), silent=True, header=False) 120 | for line in output.splitlines(): 121 | if line.startswith('DATABASE IS REGISTERED IN RC'): 122 | alreadyregistered = True 123 | if not alreadyregistered: 124 | rmanscript+= rmantemplateconfig.get('registerdatabase') 125 | # Configure archivelog deletion policy 126 | if hasdataguard: 127 | rmanscript+= "\n%s" % rmantemplateconfig.get('configdelaldg') 128 | else: 129 | rmanscript+= "\n%s" % rmantemplateconfig.get('configdelalnodg') 130 | # configures rman default settings 131 | rmanscript+= "\n%s" % rmantemplateconfig.get('config') 132 | info("Running RMAN configuration") 133 | exec_rman(rmanscript) 134 | info("Running additional configuration from SQL*Plus") 135 | exec_sqlplus(rmantemplateconfig.get('configfromsqlplus')) 136 | 137 | def backup(level): 138 | entry = 'backup' 139 | if level == '1c': 140 | entry+= 'cumulative' 141 | elif level == '1d': 142 | entry+= 'diff' 143 | elif level == 'arch': 144 | entry+= 'archivelog' 145 | elif level == 'imagecopy': 146 | entry+= 'imagecopy' 147 | else: 148 | entry+= 'full' 149 | # Execute backup commands inside run block 150 | rmanscript = "run {\n%s\n%s\n}\n" % (rmantemplateconfig.get(entry), rmantemplateconfig.get('backupfooter')) 151 | exec_rman(rmanscript) 152 | 153 | def backup_missing_archlog(): 154 | output = exec_sqlplus(rmantemplateconfig.get('archivelogmissing'), silent=True) 155 | archlogscript = "" 156 | for line in output.splitlines(): 157 | if line.startswith('BACKUP force as copy'): 158 | archlogscript+= "%s\n" % line.strip() 159 | if archlogscript: 160 | info("- Copying missing archivelogs") 161 | exec_rman("run {\n%s\n%s\n}" % (rmantemplateconfig.get('allocatearchlogchannel'), archlogscript)) 162 | 163 | def delete_expired_datafilecopy(): 164 | output = exec_sqlplus(rmantemplateconfig.get('deletedatafilecopy'), silent=True) 165 | rmanscript = "" 166 | for line in output.splitlines(): 167 | if line.startswith('DELETECOPY: '): 168 | rmanscript+= "%s\n" % line.strip()[12:] 169 | if rmanscript: 170 | info("- Deleting expired datafile copies") 171 | exec_rman(rmanscript) 172 | 173 | def imagecopywithsnap(): 174 | starttime = datetime.now() 175 | restoreparamfile = os.path.join(backupdest, 'autorestore.cfg') 176 | # 177 | info("Check if there are missing archivelogs") 178 | backup_missing_archlog() 179 | # 180 | info("Switch current log") 181 | output = exec_sqlplus(rmantemplateconfig.get('archivecurrentlogs'), silent=True, primary=True) 182 | if os.path.isfile(restoreparamfile): 183 | with open(restoreparamfile, 'a') as f: 184 | for line in output.splitlines(): 185 | if line.startswith('CURRENT DATABASE SCN:'): 186 | f.write("lastscn: %s\n" % line.strip()[22:]) 187 | elif line.startswith('CURRENT DATABASE TIME:'): 188 | f.write("lasttime: %s\n" % line.strip()[23:]) 189 | elif line.startswith('BCT FILE:'): 190 | f.write("bctfile: %s\n" % line.strip()[10:]) 191 | # 192 | if dosnapshot: 193 | info("Snap the current backup area") 194 | snapid = snap.snap() 195 | debug("Created snapshot: %s" % snapid) 196 | # 197 | info("Checking for expired datafile copies") 198 | delete_expired_datafilecopy() 199 | # 200 | info("Refresh imagecopy") 201 | backup('imagecopy') 202 | exec_sqlplus(rmantemplateconfig.get('archivecurrentlogs'), primary=True) 203 | # 204 | if dosnapshot: 205 | info("Clean expired snapshots") 206 | cleaningresult = snap.clean() 207 | for r in cleaningresult: 208 | debug(r['infostring']) 209 | # 210 | info("Dump additional information about the environment to the log file") 211 | if gimanaged: 212 | p = Popen([os.path.join(scriptpath, 'dbinfo.py'), configsection], stdout=PIPE, stderr=None, stdin=None) 213 | output,outerr = p.communicate() 214 | debug(output) 215 | # Write ORACLE_HOME patch information to log file 216 | p = Popen([os.path.join(Configuration.get('oraclehome', 'generic'), 'OPatch', 'opatch'), 'lsinventory'], stdout=PIPE, stderr=None, stdin=None) 217 | output,outerr = p.communicate() 218 | debug(output) 219 | # 220 | info("Write database parameters for autorestore") 221 | with open(restoreparamfile, 'w') as f: 222 | f.write("[dbparams]\n") 223 | output = exec_sqlplus(rmantemplateconfig.get('autorestoreparameters'), silent=True) 224 | for line in output.splitlines(): 225 | if line.startswith('dbconfig-'): 226 | f.write("%s\n" % line[9:]) 227 | # 228 | endtime = datetime.now() 229 | info("------------ TOTAL ------------") 230 | info("Total execution time: %s" % (endtime-starttime)) 231 | info("Execution started: %s" % starttime) 232 | info("Execution finished: %s" % endtime) 233 | 234 | def exec_template(template_name): 235 | rmanscript = '' 236 | if registercatalog: 237 | rmanscript+= "%s\n" % rmantemplateconfig.get('resynccatalog') 238 | rmanscript+= rmantemplateconfig.get(template_name) 239 | exec_rman(rmanscript) 240 | 241 | def generate_restore(): 242 | print "\n=============" 243 | info(rmantemplateconfig.get('headerrestore')) 244 | if registercatalog: 245 | info(rmantemplateconfig.get('headercatalog')) 246 | info(rmantemplateconfig.get('fullrestore')) 247 | info(rmantemplateconfig.get('restorefooter')) 248 | 249 | def setschedule(): 250 | # Detect if we are running from a CDB and get the common user prefix 251 | output = exec_sqlplus(rmantemplateconfig.get('cdbdetect'), silent=True) 252 | commonprefix = "" 253 | for line in output.splitlines(): 254 | if line.startswith('CDB-DETECT:') and line.strip() <> 'CDB-DETECT: NO': 255 | commonprefix = line.strip()[12:] 256 | Configuration.substitutions.update({'scheduleuserprefix': commonprefix}) 257 | # 258 | script = "%s\n" % rmantemplateconfig.get('createuser') 259 | script+= "%s\n" % rmantemplateconfig.get('dropschedule') 260 | script+= "%s\n" % rmantemplateconfig.get('createschedule') 261 | exec_sqlplus(script) 262 | 263 | ################################################ 264 | ### Main section 265 | ################################################ 266 | 267 | info("Configuration file: %s" % Configuration.configfilename) 268 | 269 | lock = BackupLock(lockdir=backupdest, maxlockwait=int(Configuration.get('maxlockwait', 'generic'))) 270 | 271 | try: 272 | # User interface action execution 273 | if scriptaction == 'config': 274 | configure() 275 | elif scriptaction == 'generaterestore': 276 | generate_restore() 277 | elif scriptaction == 'imagecopywithsnap': 278 | imagecopywithsnap() 279 | elif scriptaction == 'setschedule': 280 | setschedule() 281 | elif scriptaction == 'missingarchlog': 282 | backup_missing_archlog() 283 | else: 284 | exec_template(scriptaction) 285 | finally: 286 | lock.release() 287 | if (os.getenv('BACKUP_LOG_TO_SCREEN')) and (os.environ['BACKUP_LOG_TO_SCREEN'] == 'TRUE'): 288 | BackupLogger.close(True) 289 | print "\n\n======================\nBACKUP LOG FILE OUTPUT\n======================\n\n" 290 | if os.path.isfile(logfile): 291 | with open(logfile, 'r') as tmplogf: 292 | print tmplogf.read() 293 | else: 294 | print "Log file not found" 295 | -------------------------------------------------------------------------------- /backup.sample.cfg: -------------------------------------------------------------------------------- 1 | [generic] 2 | # After how many days the backup logs will be deleted 3 | logretention: 20 4 | # Root directory for backups, backups will be placed under $backupdest/dbsectionname 5 | backupdest: /nfs/backup 6 | # Warning and critical levels for checking last snapshot age (in hours), warning is exit code 1 and critical is exit code 2 7 | warningsnapage: 26 8 | criticalsnapage: 32 9 | # Directory where tnsnames.ora and sqlnet.ora are located 10 | tnsadmin: tns.sample 11 | # OS user that executes the backup 12 | osuser: oracle 13 | # Oracle home 14 | oraclehome: /u01/app/oracle/product/12.1.0.2/db 15 | # Maxlockwait in minutes 16 | maxlockwait: 30 17 | # Python module name and class name that implement the storage snapshot/cloning functions (extending class SnapHandler in module backupcommon) 18 | snappermodule: zfssa 19 | snapperclass: ZFSSA 20 | 21 | [zfssa] 22 | url: https://zfssa.example.com:215 23 | pool: disk-pool1 24 | project: db-backup1 25 | 26 | [rman] 27 | catalog: /@rman 28 | 29 | [autorestore] 30 | # The directory to be used for restore tests. Must be empty, everything under it is destroyed. 31 | # NB! This directory WILL BE CLEANED. 32 | autorestoredestination: /nfs/autorestore/dest 33 | # Clone name 34 | autorestoreclonename: autorestore 35 | # Place to mount the clone. This MUST be already described in /etc/fstab as mountable by user (with options fg,noauto,user). 36 | # For example: 37 | # zfssa.example.com:/export/demo-backup/autorestore /nfs/autorestore/mnt nfs rw,fg,soft,nointr,rsize=32768,wsize=32768,tcp,vers=3,timeo=600,noauto,user 0 0 38 | autorestoremountpoint: /nfs/autorestore/mnt 39 | # Restore will be done using the latest snapshot that is at least this many hours old 40 | autorestoresnapage: 0 41 | # Autorestore log files 42 | autorestorelogdir: /nfs/autorestore/log 43 | # Maximum difference between auto restore target date and customverifydate in hours 44 | autorestoremaxtoleranceminutes: 5 45 | # Autorestorecatalog connect string 46 | # First create user manually in the database: 47 | # create user autorestore identified by complexpassword; 48 | # grant create session,create table,create sequence,create procedure,create view,create job to autorestore; 49 | # alter user autorestore quota unlimited on users; 50 | # Then add it to password wallet and run ./autorestore.py configfilename --createcatalog 51 | autorestorecatalog: /@autorestore 52 | # Chance of doing a full database validation after restore, the larger the integer, the less likely it will be. Set to 0 to disable, set to 1 to always validate. 7 means 1/7 chance. 53 | autorestorevalidatechance: 0 54 | # Modulus of doing a full database validation. Validation is done if: mod(current day, autorestoremodulus) == mod(hash(database unique name), autorestoremodulus). 55 | # This quarantees that validation for each database is done after every x days consistently. 0 means disable. 56 | # Set either autorestorevalidatechance or autorestoremodulus to 0, they will not work concurrently, modulus if preferred. 57 | autorestoremodulus: 30 58 | 59 | ## Databases 60 | 61 | [cdb1] 62 | dbid: 888398667 63 | recoverywindow: 2 64 | parallel: 4 65 | snapexpirationdays: 31 66 | snapexpirationmonths: 6 67 | registercatalog: true 68 | hasdataguard: false 69 | schedulebackup: FREQ=DAILY;BYHOUR=10;BYMINUTE=0 70 | schedulearchlog: FREQ=HOURLY;BYHOUR=21 71 | customverifydate: select max(d) from (select cast(max(createtime) as date) d from exampleapp.transactions union all select current_dt from dbauser.autorestore_ping_timestamp) 72 | 73 | [olddb] 74 | dbid: 888398334 75 | recoverywindow: 2 76 | parallel: 4 77 | snapexpirationdays: 14 78 | snapexpirationmonths: 12 79 | registercatalog: true 80 | hasdataguard: false 81 | schedulebackup: FREQ=DAILY;BYHOUR=5;BYMINUTE=0 82 | schedulearchlog: FREQ=HOURLY 83 | genericoraclehome: /u01/app/oracle/product/11.2.0.4/db 84 | zfssapool: oldpool-0 85 | zfssaproject: databases-11g-backup 86 | customverifydate: select max(d) from (select cast(max(createtime) as date) d from exampleapp2.log union all select current_dt from dbauser.autorestore_ping_timestamp) 87 | autorestorethread: 3 88 | -------------------------------------------------------------------------------- /backup.softnas.sample.cfg: -------------------------------------------------------------------------------- 1 | [generic] 2 | # After how many days the backup logs will be deleted 3 | logretention: 20 4 | # Root directory for backups, backups will be placed under $backupdest/dbsectionname 5 | backupdest: /nfs/backup 6 | # Warning and critical levels for checking last snapshot age (in hours), warning is exit code 1 and critical is exit code 2 7 | warningsnapage: 26 8 | criticalsnapage: 32 9 | # Directory where tnsnames.ora and sqlnet.ora are located 10 | tnsadmin: tns.sample 11 | # OS user that executes the backup 12 | osuser: oracle 13 | # Oracle home 14 | oraclehome: /u01/app/oracle/product/12.1.0.2/db 15 | # Maxlockwait in minutes 16 | maxlockwait: 30 17 | # Python module name and class name that implement the storage snapshot/cloning functions (extending class SnapHandler in module backupcommon) 18 | snappermodule: softnas 19 | snapperclass: SoftNAS 20 | 21 | [softnas] 22 | serveraddress: 52.29.252.93 23 | pool: pool1 24 | 25 | [rman] 26 | catalog: /@rman 27 | 28 | [autorestore] 29 | # The directory to be used for restore tests. Must be empty, everything under it is destroyed. 30 | # NB! This directory WILL BE CLEANED. 31 | autorestoredestination: /nfs/autorestore/dest 32 | # Clone name 33 | autorestoreclonename: autorestore 34 | # Place to mount the clone. This MUST be already described in /etc/fstab as mountable by user (with options fg,noauto,user). 35 | # For example: 36 | # zfssa.example.com:/export/demo-backup/autorestore /nfs/autorestore/mnt nfs rw,fg,soft,nointr,rsize=32768,wsize=32768,tcp,vers=3,timeo=600,noauto,user 0 0 37 | autorestoremountpoint: /nfs/autorestore/mnt 38 | # Restore will be done using the latest snapshot that is at least this many hours old 39 | autorestoresnapage: 0 40 | # Autorestore log files 41 | autorestorelogdir: /nfs/autorestore/log 42 | # Maximum difference between auto restore target date and customverifydate in hours 43 | autorestoremaxtoleranceminutes: 5 44 | # Autorestorecatalog connect string 45 | # First create user manually in the database: 46 | # create user autorestore identified by complexpassword; 47 | # grant create session,create table,create sequence,create procedure,create view,create job to autorestore; 48 | # alter user autorestore quota unlimited on users; 49 | # Then add it to password wallet and run ./autorestore.py configfilename --createcatalog 50 | autorestorecatalog: /@autorestore 51 | # Chance of doing a full database validation after restore, the larger the integer, the less likely it will be. Set to 0 to disable, set to 1 to always validate. 7 means 1/7 chance. 52 | autorestorevalidatechance: 0 53 | # Modulus of doing a full database validation. Validation is done if: mod(current day, autorestoremodulus) == mod(hash(database unique name), autorestoremodulus). 54 | # This quarantees that validation for each database is done after every x days consistently. 0 means disable. 55 | # Set either autorestorevalidatechance or autorestoremodulus to 0, they will not work concurrently, modulus if preferred. 56 | autorestoremodulus: 30 57 | 58 | ## Databases 59 | 60 | [orcl] 61 | dbid: 888398667 62 | recoverywindow: 2 63 | parallel: 4 64 | snapexpirationdays: 0 65 | snapexpirationmonths: 1 66 | registercatalog: true 67 | hasdataguard: false 68 | schedulebackup: FREQ=DAILY;BYHOUR=10;BYMINUTE=0 69 | schedulearchlog: FREQ=HOURLY;BYHOUR=21 70 | customverifydate: select max(d) from (select cast(max(createtime) as date) d from exampleapp.transactions union all select current_dt from dbauser.autorestore_ping_timestamp) 71 | -------------------------------------------------------------------------------- /backupcommon.py: -------------------------------------------------------------------------------- 1 | import os, logging, sys 2 | from tempfile import mkstemp, TemporaryFile 3 | from datetime import datetime, timedelta 4 | from time import sleep 5 | from subprocess import Popen, PIPE 6 | from abc import ABCMeta, abstractmethod 7 | from ConfigParser import SafeConfigParser 8 | from string import Template 9 | 10 | # Directory where the executable script is located 11 | def scriptpath(): 12 | scriptpath = os.path.dirname(os.path.realpath(__file__)) 13 | return scriptpath 14 | 15 | def getconfigname(): 16 | return Configuration.getconfigname() 17 | 18 | def info(msg): 19 | if BackupLogger.log is not None: 20 | BackupLogger.log.info(msg) 21 | 22 | def debug(msg): 23 | if BackupLogger.log is not None: 24 | BackupLogger.log.debug(msg) 25 | 26 | def error(msg): 27 | if BackupLogger.log is not None: 28 | BackupLogger.log.error(msg) 29 | 30 | def exception(msg): 31 | if BackupLogger.log is not None: 32 | BackupLogger.log.exception(msg) 33 | 34 | def size2str(size): 35 | # Number of bytes to human readable string 36 | sint = int(size) 37 | if sint >= 1099511627776: 38 | return "%dTB" % round(sint/1099511627776) 39 | elif sint >= 1073741824: 40 | return "%dGB" % round(sint/1073741824) 41 | elif sint >= 1048576: 42 | return "%dMB" % round(sint/1048576) 43 | elif sint >= 1024: 44 | return "%dkB" % round(sint/1024) 45 | else: 46 | return "%dB" % sint 47 | 48 | def create_snapshot_class(configsection): 49 | snapclassname = Configuration.get('snapperclass', 'generic') 50 | snapmod = __import__(Configuration.get('snappermodule', 'generic'), globals(), locals(), [snapclassname]) 51 | snapclass = getattr(snapmod, snapclassname) 52 | return snapclass(configsection) 53 | 54 | class Configuration: 55 | configfilename = None 56 | _config = None 57 | substitutions = {} 58 | defaultsection = None 59 | _defaults = {'registercatalog': 'false', 'hasdataguard': 'false', 60 | 'dosnapshot': 'true', 'gimanaged': 'true', 61 | 'schedulebackup': 'FREQ=DAILY', 'schedulearchlog': 'FREQ=HOURLY;INTERVAL=6', 62 | 'snapexpirationmonths': 0, 'backupjobenabled': 'true', 'sectionsize': '', 63 | 'pga_size': '1G', 'sga_size': '2G', 'backupdestshared': 'true'} 64 | 65 | @classmethod 66 | def getconfigname(cls): 67 | configfile = "" 68 | if os.getenv('BACKUPCONFIG'): 69 | # Configuration file is supplied by environment variable 70 | configfile = os.getenv('BACKUPCONFIG') 71 | elif os.path.isfile(os.path.join(scriptpath(), 'backup.cfg')): 72 | configfile = 'backup.cfg' 73 | return configfile 74 | 75 | @classmethod 76 | def init(cls, defaultsection = None, additionaldefaults = None, configfilename = None): 77 | if configfilename is None: 78 | cls.configfilename = os.path.join(scriptpath(), getconfigname()) 79 | else: 80 | cls.configfilename = os.path.join(scriptpath(), configfilename) 81 | if not os.path.isfile(cls.configfilename): 82 | raise Exception('configfilenotfound', "Configuration file %s not found" % cls.configfilename) 83 | cls.defaultsection = defaultsection 84 | if os.getenv('ORACLE_HOME'): 85 | cls._defaults.update({'oraclehome': os.getenv('ORACLE_HOME')}) 86 | if additionaldefaults is not None: 87 | cls._defaults.update(additionaldefaults) 88 | cls._config = SafeConfigParser(cls._defaults) 89 | cls._config.read(cls.configfilename) 90 | if not os.getenv('BACKUPCONFIG'): 91 | os.environ['BACKUPCONFIG'] = cls.configfilename 92 | 93 | @classmethod 94 | def sections(cls): 95 | return cls._config.sections() 96 | 97 | @classmethod 98 | def get(cls, parameter, section=None): 99 | if section is not None: 100 | try: 101 | return cls._config.get(cls.defaultsection, "%s%s" % (section, parameter)) 102 | except: 103 | return cls._config.get(section, parameter) 104 | else: 105 | return cls._config.get(cls.defaultsection, parameter) 106 | 107 | @classmethod 108 | def get2(cls, parameter, section=None): 109 | s = Template(cls.get(parameter, section)) 110 | return s.substitute(cls.substitutions) 111 | 112 | class BackupTemplate(object): 113 | 114 | def __init__(self, filename): 115 | self._configpath = os.path.join(scriptpath(), filename) 116 | if not os.path.isfile(self._configpath): 117 | raise Exception('templatefilenotfound', "Template file %s not found" % self._configpath) 118 | self._config = SafeConfigParser() 119 | self._config.read(self._configpath) 120 | 121 | def get(self, entry): 122 | s = Template(self._config.get('template', entry)) 123 | return s.substitute(Configuration.substitutions) 124 | 125 | class BackupLogger: 126 | log = None 127 | logfile = None 128 | _logdir = None 129 | config = None 130 | _cleaned = False 131 | 132 | @classmethod 133 | def init(cls, logfile=None, config=None): 134 | # Initialize/reinitialize all loggers 135 | if config is not None: 136 | cls.config = config 137 | if cls.log is not None: 138 | cls.close(True) 139 | del cls.log 140 | cls.log = None 141 | if logfile is not None: 142 | cls.logfile = logfile 143 | cls._logdir = os.path.dirname(cls.logfile) 144 | if not os.path.exists(cls._logdir): 145 | os.makedirs(cls._logdir) 146 | if cls.log is None: 147 | cls.log = logging.getLogger(config) 148 | cls.log.setLevel(logging.DEBUG) 149 | streamformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-15s %(message)s', '%H:%M:%S') 150 | mystream = logging.StreamHandler(sys.stdout) 151 | mystream.setFormatter(streamformatter) 152 | mystream.setLevel(logging.INFO) 153 | cls.log.addHandler(mystream) 154 | myformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(name)-15s %(message)s') 155 | myhandler = logging.FileHandler(cls.logfile) 156 | myhandler.setFormatter(myformatter) 157 | cls.log.addHandler(myhandler) 158 | 159 | @classmethod 160 | def close(cls, closeall=False): 161 | # This just closes the FileHandlers so we could write to the log file from rman/sqlplus directly 162 | handlers = cls.log.handlers[:] 163 | for handler in handlers: 164 | if closeall or handler.__class__ is logging.FileHandler: 165 | handler.flush() 166 | handler.close() 167 | cls.log.removeHandler(handler) 168 | 169 | @classmethod 170 | def clean(cls): 171 | if not cls._cleaned: 172 | retentiondays = int(Configuration.get('logretention', 'generic')) 173 | # Clear old logfiles 174 | for fname in os.listdir(cls._logdir): 175 | if fname[-4:] == ".log": 176 | fullpath = os.path.join(cls._logdir, fname) 177 | if os.path.isfile(fullpath) and ( datetime.now() - datetime.fromtimestamp(os.path.getmtime(fullpath)) > timedelta(days=retentiondays) ): 178 | if cls.log is not None: 179 | cls.log.debug("Removing log: %s" % fullpath) 180 | os.remove(fullpath) 181 | cls._cleaned = True 182 | 183 | class BackupLock(object): 184 | 185 | def _createlock(self): 186 | if not os.path.exists(self._lockfile): 187 | try: 188 | os.link(self._tmplockfile, self._lockfile) 189 | return True 190 | except: 191 | info("Getting lock %s failed!" % self._lockfile) 192 | return False 193 | else: 194 | info("Locked! File %s exists." % self._lockfile) 195 | return False 196 | 197 | def __init__(self, lockdir, maxlockwait=30): 198 | self._lockfile = os.path.join(lockdir, 'backup.lck') 199 | tmpf,self._tmplockfile = mkstemp(suffix='.lck', dir=lockdir) 200 | # Add here some more useful information about the locker 201 | os.write(tmpf, "%s\n%s\n%d" % (os.uname(), datetime.now().strftime('%Y-%m-%d %H:%M:%S'), os.getpid()) ) 202 | os.close(tmpf) 203 | # Try getting a lock 204 | lockstart = datetime.now() 205 | locksuccess = False 206 | while (datetime.now() - lockstart < timedelta(minutes=maxlockwait)): 207 | if self._createlock(): 208 | locksuccess = True 209 | break 210 | else: 211 | sleep(5) 212 | if not locksuccess: 213 | error("Did not manage to get the lock in time.") 214 | sys.exit(2) 215 | 216 | def release(self): 217 | if os.path.exists(self._lockfile): 218 | os.remove(self._lockfile) 219 | if os.path.exists(self._tmplockfile): 220 | os.remove(self._tmplockfile) 221 | 222 | class SnapHandler(object): 223 | __metaclass__ = ABCMeta 224 | configname = None 225 | 226 | @abstractmethod 227 | def __init__(self, configname): 228 | self.configname = configname 229 | 230 | @abstractmethod 231 | def listsnapshots(self, sortbycreation=False, sortreverse=False): 232 | pass 233 | 234 | @abstractmethod 235 | def snap(self): 236 | # Must return snapshot ID 237 | pass 238 | 239 | @abstractmethod 240 | def dropsnap(self, snapid): 241 | pass 242 | 243 | @abstractmethod 244 | def getsnapinfo(self, snapstruct): 245 | # s is element in list that listsnapshots returns 246 | # Must return dict with elements id (string), creation (with type datetime in UTC), numclones (int), space_total (int in bytes), space_unique (int in bytes) 247 | pass 248 | 249 | def snap2str(self, s): 250 | # Convert the snap information to one nice string value 251 | # Input must come from getsnapinfo 252 | return "%s [%s UTC] total=%s unique=%s clones=%s" % (s["id"], s["creation"], size2str(s["space_total"]), size2str(s["space_unique"]), s["numclones"]) 253 | 254 | def clone2str(self, s): 255 | # Convert clone information to a nice string value 256 | return "%s [%s] [mount point: %s]" % (s["clonename"], s["origin"], s["mountpoint"]) 257 | 258 | @abstractmethod 259 | def clone(self, snapid, clonename): 260 | pass 261 | 262 | @abstractmethod 263 | def dropclone(self, cloneid): 264 | pass 265 | 266 | @abstractmethod 267 | def filesystem_info(self, filesystemname=None): 268 | # Must return dict with the following information about a (cloned) volume/filesystem 269 | # origin - parent volume name of the clone 270 | # clonename - name of the clone volume 271 | # mountpoint - storage system mount path for this volume 272 | pass 273 | 274 | @abstractmethod 275 | def listclones(self): 276 | # Array of dicts that lists all clones of the parent volume 277 | # origin - parent volume name of the clone 278 | # clonename - name of the clone volume 279 | # mountpoint - storage system mount path for this volume 280 | pass 281 | 282 | @abstractmethod 283 | def mountstring(self, filesystemname): 284 | # Outputs string on how to mount the volume/filesystem 285 | # For example: 286 | # 10.10.10.10:/clonename 287 | pass 288 | 289 | @abstractmethod 290 | def createvolume(self): 291 | # Creates new volume for storing backups 292 | pass 293 | 294 | def clean(self): 295 | max_age_days = int(Configuration.get('snapexpirationdays')) 296 | max_age_months = int(Configuration.get('snapexpirationmonths')) 297 | sorted_snaps = self.listsnapshots(sortbycreation=True) 298 | output = [] 299 | number_of_snaps = len(sorted_snaps) 300 | for idx, snapstruct in enumerate(sorted_snaps): 301 | s = self.getsnapinfo(snapstruct) 302 | d = s["creation"] 303 | age = datetime.utcnow() - d 304 | status = "valid" 305 | drop_allowed = False 306 | dropped = False 307 | # Check snap expiration 308 | if age > timedelta(days=max_age_days): 309 | if age > timedelta(days=max_age_months*31): 310 | # Drop is allowed if monthly expiration has also passed 311 | drop_allowed = True 312 | else: 313 | if idx+1 < number_of_snaps: 314 | # The last snap of each month is retained 315 | previnfo = self.getsnapinfo(sorted_snaps[idx+1]) 316 | drop_allowed = str(s["creation"])[0:7] == str(previnfo["creation"])[0:7] 317 | if drop_allowed and s["numclones"] != 0: 318 | status = "has a clone" 319 | drop_allowed = False 320 | # Do the actual drop 321 | if drop_allowed: 322 | try: 323 | self.dropsnap(s["id"]) 324 | dropped = True 325 | status = "dropped" 326 | except: 327 | status = "DROP FAILED" 328 | yield {'snapid': s["id"], 'dropped': dropped, 'status': status, 'infostring': "%s %s" % (self.snap2str(s), status)} 329 | 330 | def autoclone(self): 331 | # Returns source snap id 332 | maxsnapage = timedelta(hours = int(Configuration.get('autorestoresnapage', 'autorestore')), minutes=0 ) 333 | # Find the snap for cloning 334 | sorted_snaps = self.listsnapshots(sortbycreation=True, sortreverse=True) 335 | sourcesnap = None 336 | for idx, snaprecord in enumerate(sorted_snaps): 337 | s = self.getsnapinfo(snaprecord) 338 | d = s["creation"] 339 | age = datetime.utcnow() - d 340 | if age >= maxsnapage: 341 | sourcesnap = s["id"] 342 | break 343 | if sourcesnap is None: 344 | raise Exception('snap','Suitable snapshot not found for cloning.') 345 | else: 346 | # Clone the snap 347 | debug("Snapshot id for autoclone: %s" % sourcesnap) 348 | self.clone(sourcesnap, Configuration.get('autorestoreclonename', 'autorestore')) 349 | return sourcesnap 350 | 351 | def dropautoclone(self): 352 | self.dropclone(Configuration.get('autorestoreclonename', 'autorestore')) 353 | 354 | # Finds the correct snapshot to clone based on restore target time 355 | # Targettime must be in UTC 356 | def search_recovery_snapid(self, targettime): 357 | sorted_snaps = self.listsnapshots(sortbycreation=True, sortreverse=False) 358 | sourcesnap = None 359 | for idx, snaprecord in enumerate(sorted_snaps): 360 | s = self.getsnapinfo(snaprecord) 361 | if s['creation'].replace(tzinfo=None) >= targettime.replace(tzinfo=None): 362 | sourcesnap = s['id'] 363 | break 364 | return sourcesnap 365 | 366 | # Class for outputting some UI elements, like prompts 367 | class UIElement(object): 368 | 369 | def __init__(self): 370 | pass 371 | 372 | def _is_dir_writable(self, path): 373 | try: 374 | f = TemporaryFile(dir = path) 375 | f.close() 376 | except OSError as e: 377 | if e.errno == errno.EACCES: 378 | return False 379 | e.filename = path 380 | raise 381 | return True 382 | 383 | def ask_directory(self, question, demand_empty=True, demand_writable=True): 384 | path = None 385 | while True: 386 | answer = raw_input("%s " % question) 387 | if answer is None or answer.strip() == "": 388 | print "Answer is required" 389 | continue 390 | path = answer.strip() 391 | if not os.path.exists(path) or not os.path.isdir(path): 392 | print "Specified path does not exist or is not directory" 393 | continue 394 | if demand_writable and not self._is_dir_writable(path): 395 | print "Specified path is not writable" 396 | continue 397 | if demand_empty and os.listdir(path): 398 | print "Specified path must be empty" 399 | continue 400 | break 401 | return path 402 | 403 | def ask_yn(self, question): 404 | answer = None 405 | while True: 406 | answer = raw_input("%s? (y/n) " % question) 407 | answer = answer.strip().upper() 408 | if answer not in ['Y','N']: 409 | print "Invalid input" 410 | continue 411 | break 412 | return answer 413 | 414 | def ask_timestamp(self, question): 415 | dt = None 416 | while True: 417 | answer = raw_input("%s: (yyyy-mm-dd hh24:mi:ss) " % question) 418 | answer = answer.strip() 419 | try: 420 | dt = datetime.strptime(answer, "%Y-%m-%d %H:%M:%S") 421 | except ValueError as e: 422 | print "Input does not match required format" 423 | continue 424 | break 425 | return dt 426 | 427 | def ask_string(self, question, maxlength=None, onlyalnum=False): 428 | answer = None 429 | while True: 430 | answer = raw_input("%s " % question) 431 | answer = answer.strip() 432 | if maxlength is not None and len(answer) > maxlength: 433 | print "Max %d characters allowed" % maxlength 434 | continue 435 | if onlyalnum and not answer.isalnum(): 436 | print "Only alphanumeric characters allowed" % maxlength 437 | continue 438 | break 439 | return answer 440 | 441 | def ask_size(self, question): 442 | answer = None 443 | while True: 444 | answer = raw_input("%s (suffix with unit M, G or T): " % question) 445 | answer = answer.strip().upper() 446 | if answer[-1:] not in ['M','G','T']: 447 | print "Suffix your input with unit M, G or T" 448 | continue 449 | if not answer[:-1].isdigit(): 450 | print "%s is not an integer" % answer[:-1] 451 | continue 452 | break 453 | return answer 454 | -------------------------------------------------------------------------------- /certs/README.md: -------------------------------------------------------------------------------- 1 | Here you place CA certificates for Netapp storage. 2 | -------------------------------------------------------------------------------- /dbinfo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, sys 4 | from subprocess import Popen, PIPE 5 | from backupcommon import Configuration, scriptpath 6 | 7 | def printhelp(): 8 | print "Usage: dbinfo.py " 9 | sys.exit(2) 10 | 11 | if len(sys.argv) != 2: 12 | printhelp() 13 | 14 | # Read configuration 15 | configsection = sys.argv[1] 16 | Configuration.init(configsection) 17 | gimanaged = Configuration.get('gimanaged').upper() == 'TRUE' 18 | 19 | if not gimanaged: 20 | print "gimanaged option is not set to TRUE for this database" 21 | sys.exit(2) 22 | 23 | # Set oracle home, if it is not configured separately, take it from environment 24 | oraclehome = Configuration.get('oraclehome','generic') 25 | if os.environ.get('ORACLE_SID'): 26 | del os.environ['ORACLE_SID'] 27 | os.environ['ORACLE_HOME'] = oraclehome 28 | os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' 29 | 30 | # Get software version 31 | p = Popen([os.path.join(scriptpath(), "get_oracle_version.sh"), oraclehome], stdout=PIPE, stderr=None, stdin=None) 32 | oracleversion,dataerr = p.communicate() 33 | if oracleversion < "11.1": 34 | print "Detected Oracle software version %s is too old" % oracleversion 35 | sys.exit(1) 36 | 37 | # srvctl config database 38 | print "== Database configuration ==" 39 | p = Popen([os.path.join(oraclehome, 'bin', 'srvctl'), 'config', 'database', '-d', configsection], stdout=PIPE, stderr=None, stdin=None) 40 | datain,dataerr = p.communicate() 41 | print datain 42 | 43 | def printserviceinfo(si): 44 | if len(si) > 0 and si['enabled']: 45 | if oracleversion < "12.1": 46 | s = "srvctl add service -d %s -s %s -j %s -B %s -r %s" % (configsection, si.get('name',''), si.get('clb',''), si.get('rlb',''), si.get('preferred','')) 47 | if si['available']: 48 | s+= " -a %s" % si['available'] 49 | print s 50 | if si['edition']: 51 | print "srvctl add service -d %s -s %s -t %s" % (configsection, si.get('name',''), si.get('edition','')) 52 | else: 53 | s = "srvctl add service -database %s -service %s -preferred %s" % (configsection, si.get('name',''), si.get('preferred','')) 54 | if si.get('available', None): 55 | s+= " -available %s" % si['available'] 56 | if si.get('edition', None): 57 | s+= " -edition %s" % si['edition'] 58 | if si.get('pluggable', None): 59 | s+= " -pdb %s" % si['pluggable'] 60 | print s 61 | 62 | # srvctl config service 63 | print "== Service configuration ==" 64 | p = Popen([os.path.join(oraclehome, 'bin', 'srvctl'), 'config', 'service', '-d', configsection], stdout=PIPE, stderr=None, stdin=None) 65 | datain,dataerr = p.communicate() 66 | print datain 67 | print "== Service configuration parsed ==" 68 | services = {} 69 | currentservice = {} 70 | for line in datain.splitlines(): 71 | s = line.split(': ') 72 | if s[0] == 'Service name': 73 | printserviceinfo(currentservice) 74 | currentservice = { 'name': s[1], 'enabled': False } 75 | elif s[0] == 'Edition': 76 | currentservice['edition'] = s[1] 77 | elif s[0] == 'Preferred instances': 78 | currentservice['preferred'] = s[1] 79 | elif s[0] == 'Available instances': 80 | currentservice['available'] = s[1] 81 | elif s[0] == 'Service is enabled': 82 | currentservice['enabled'] = True 83 | elif s[0] == 'Connection Load Balancing Goal': 84 | currentservice['clb'] = s[1] 85 | elif s[0] == 'Runtime Load Balancing Goal': 86 | currentservice['rlb'] = s[1] 87 | elif s[0] == 'Pluggable database name': 88 | currentservice['pluggable'] = s[1] 89 | printserviceinfo(currentservice) 90 | -------------------------------------------------------------------------------- /exec_all.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, sys 4 | from subprocess import check_call 5 | from backupcommon import scriptpath, Configuration 6 | 7 | def printhelp(): 8 | print "Usage: exec_all.py [comma separated exclude list]" 9 | sys.exit(2) 10 | 11 | if len(sys.argv) not in [2,3]: 12 | printhelp() 13 | 14 | # Directory where the executable script is located 15 | scriptpath = scriptpath() 16 | 17 | # Read configuration 18 | configsection = sys.argv[1] 19 | Configuration.init() 20 | 21 | if len(sys.argv) == 3: 22 | excludelist = sys.argv[2].split(",") 23 | else: 24 | excludelist = [] 25 | excludelist.append('generic') 26 | excludelist.append('rman') 27 | excludelist.append('zfssa') 28 | excludelist.append('autorestore') 29 | excludelist.append('netapp') 30 | 31 | # Loop through all sections 32 | for dbname in Configuration.sections(): 33 | if dbname not in excludelist: 34 | # Execute backup.py with the specified action 35 | print "--- DATABASE: %s ---" % dbname 36 | check_call([ os.path.join(scriptpath, 'backup.py'), dbname, configsection]) 37 | -------------------------------------------------------------------------------- /get_oracle_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | oraclebin="${1}/bin/oracle" 4 | if [ -f "${oraclebin}" ]; then 5 | strings "${oraclebin}" | grep NLSRTL | cut -d " " -f 3 6 | else 7 | exit 1 8 | fi 9 | 10 | -------------------------------------------------------------------------------- /huawei.py: -------------------------------------------------------------------------------- 1 | import requests, json, operator, urllib, os, pprint 2 | from backupcommon import SnapHandler, Configuration, scriptpath, size2str, info, error, debug 3 | from ConfigParser import SafeConfigParser 4 | from datetime import datetime, timedelta 5 | from urlparse import urlparse 6 | 7 | class HuaweiDoradoHttp(object): 8 | _header = {'Content-Type': 'application/json'} 9 | _timeout = 300 # HTTP call timeout in seconds 10 | _verify = False 11 | _lastlogin = None 12 | _logout_time = timedelta(minutes=15) 13 | server_address = None 14 | 15 | def __init__(self, baseurl, username, password): 16 | self._configuredurl = baseurl 17 | self._username = username 18 | self._password = password 19 | self._session = None 20 | if not self._verify: 21 | try: 22 | requestwarning = __import__('requests.packages.urllib3.exceptions', globals(), locals(), ['InsecureRequestWarning']) 23 | requestwarningclass = getattr(requestwarning, 'InsecureRequestWarning') 24 | requests.packages.urllib3.disable_warnings(requestwarningclass) 25 | except AttributeError: 26 | pass 27 | self._login() 28 | 29 | def _checkerror(self, r, url, method): 30 | debug("Return code: %d" % r.status_code) 31 | if r.status_code != 200: 32 | raise Exception("HuaweiDoradoHttp", "API call to storage failed. Response code: %d" % r.status_code) 33 | j = r.json() 34 | if j['error'].get('code', 0) > 0: 35 | raise Exception("HuaweiDoradoHttp", "API call to storage failed. Error element: %s" % str(j['error'])) 36 | 37 | def _login(self): 38 | # Authenticating to storage, requests session is used for storing session cookies 39 | if self._session is None: 40 | self._session = requests.Session() 41 | url = "%s/xxxxx/sessions" % self._configuredurl 42 | r = self._session.post(url, 43 | headers={'Content-Type': 'application/json'}, verify=self._verify, timeout=self._timeout, 44 | json={'username': self._username, 'password': self._password, 'scope': 0}) 45 | self._checkerror(r, url, "post") 46 | self._header.update({ 47 | 'iBaseToken': r.json()['data']['iBaseToken'] 48 | }) 49 | self._deviceid = r.json()['data']['deviceid'] 50 | self._baseurl = "%s/%s" % (self._configuredurl, urllib.quote_plus(self._deviceid)) 51 | self._lastlogin = datetime.utcnow() 52 | 53 | def _checklogin(self): 54 | # Checking if previous login has timed out 55 | if self._session is None or datetime.utcnow() - self._lastlogin > self._logout_time: 56 | self._session = None 57 | self._login() 58 | 59 | def post(self, url, payload): 60 | self._checklogin() 61 | debug("Sending POST to %s" % url) 62 | r = self._session.post("%s/%s" % (self._baseurl, url), headers=self._header, verify=self._verify, json=payload, timeout=self._timeout) 63 | self._checkerror(r, url, "post") 64 | return r.status_code, r.json() 65 | 66 | def get(self, url, return_json=True, payload={}): 67 | self._checklogin() 68 | debug("Sending GET to %s" % url) 69 | r = self._session.get("%s/%s" % (self._baseurl, url), headers=self._header, verify=self._verify, timeout=self._timeout, params=payload) 70 | self._checkerror(r, url, "get") 71 | return r.json() if return_json else None 72 | 73 | def delete(self, url, payload={}): 74 | self._checklogin() 75 | debug("Sending DELETE to %s" % url) 76 | r = self._session.delete("%s/%s" % (self._baseurl, url), headers=self._header, verify=self._verify, timeout=self._timeout, params=payload) 77 | self._checkerror(r, url, "delete") 78 | return r.status_code 79 | 80 | class HuaweiDorado(SnapHandler): 81 | _exceptionbase = "huaweidorado" 82 | _vid = None 83 | _fid = None 84 | 85 | def __init__(self, configname): 86 | # Get credentials 87 | if 'DBBACKUPSUITE_HUAWEI_USER' in os.environ and 'DBBACKUPSUITE_HUAWEI_PASSWORD' in os.environ: 88 | # From environment variables 89 | zfsauth = (os.environ['DBBACKUPSUITE_HUAWEI_USER'], os.environ['DBBACKUPSUITE_HUAWEI_PASSWORD']) 90 | elif os.path.isfile(os.path.join(os.path.expanduser("~"), '.dbbackupsuite_storage.cfg')): 91 | # From $HOME/.dbbackupsuite_storage.cfg 92 | zfscredconfig = SafeConfigParser() 93 | zfscredconfig.read(os.path.join(os.path.expanduser("~"), '.dbbackupsuite_storage.cfg')) 94 | zfsauth = (zfscredconfig.get('huaweicredentials','user'), zfscredconfig.get('huaweicredentials','password')) 95 | else: 96 | credfilename = os.path.join(scriptpath(), 'huaweidoradocredentials.cfg') 97 | if not os.path.isfile(credfilename): 98 | raise Exception(self._exceptionbase, "Configuration file %s not found" % credfilename) 99 | # Authentication information 100 | zfscredconfig = SafeConfigParser() 101 | zfscredconfig.read(credfilename) 102 | zfsauth = (zfscredconfig.get('credentials','user'), zfscredconfig.get('credentials','password')) 103 | # 104 | baseurl = "%s/deviceManager/rest" % Configuration.get('url', 'huaweidorado') 105 | self._filesystem = configname 106 | self._vstore = Configuration.get('vstore', 'huaweidorado') 107 | # 108 | self._http = HuaweiDoradoHttp(baseurl, zfsauth[0], zfsauth[1]) 109 | super(HuaweiDorado, self).__init__(configname) 110 | 111 | # Returns the vstore id that matches the configured vstore name 112 | def _vstoreid(self): 113 | if self._vid is None: 114 | r = self._http.get("vstore") 115 | for v in r['data']: 116 | if v['NAME'].upper() == self._vstore.upper(): 117 | self._vid = v['ID'] 118 | return self._vid 119 | 120 | # Returns the filesystem id that matches the configured filesystem name 121 | def _fsid(self, fsname = None): 122 | if self._fid is None or fsname is not None: 123 | n = fsname if fsname is not None else self._filesystem 124 | r = self._http.get(url="filesystem", payload={'vstoreid': self._vstoreid(), 'filter': "NAME:%s" % n}) 125 | for v in r['data']: 126 | if v['NAME'].upper() == n.upper(): 127 | if fsname is None: 128 | self._fid = v['ID'] 129 | else: 130 | return v['ID'] 131 | return self._fid 132 | 133 | def snap(self): 134 | snapname = "%s-%s" % (self._filesystem, datetime.now().strftime('%Y%m%dT%H%M%S')) 135 | r,j = self._http.post("fssnapshot", payload={'NAME': snapname, 'PARENTID': self._fsid(), 'PARENTTYPE': '40'}) 136 | return snapname 137 | 138 | def dropsnap(self, snapid): 139 | for snap in self.listsnapshots(): 140 | if snap['id'] == snapid: 141 | r = self._http.delete("fssnapshot/%s" % snap['internal_id']) 142 | break 143 | 144 | def _getclones(self, snapname=None): 145 | clones = [] 146 | # DIDN't WORK: 'filter': "ISCLONEFS:true" 147 | r = self._http.get("filesystem", payload={'vstoreId': self._vstoreid()}) 148 | for rx in r['data']: 149 | if rx['ISCLONEFS'] == 'true' and rx['PARENTFILESYSTEMNAME'].upper() == self._filesystem.upper() and (snapname is None or rx['PARENTSNAPSHOTNAME'].upper() == snapname.upper()): 150 | clones.append({'internal_id': rx['ID'], 'clonename': rx['NAME'], 'origin': rx['PARENTSNAPSHOTNAME'], 'mountpoint': self._get_share(rx['NAME'])['path']}) 151 | return clones 152 | 153 | def getsnapinfo(self, snapstruct): 154 | #r = self._http.get("fssnapshot/%s" % snapstruct['internal_id'], payload={'vstoreid': self._vstoreid()}) 155 | #s = r['data'] 156 | #print(s) 157 | clones = self._getclones(snapstruct['id']) 158 | return { 159 | 'id': snapstruct['id'], 160 | 'intrnal_id': snapstruct['internal_id'], 161 | 'creation': snapstruct['creation'], 162 | 'numclones': len(clones), 163 | 'space_total': snapstruct['space_total'], # Not supported yet by huawei 164 | 'space_unique': snapstruct['space_unique'] # Not supported yet by huawei 165 | } 166 | 167 | def listsnapshots(self, sortbycreation=False, sortreverse=False): 168 | r = self._http.get("fssnapshot", payload={'PARENTID': self._fsid()}) 169 | #pp.pprint(r.json()) 170 | snaps = [] 171 | for snap in r['data']: 172 | snaps.append({ 173 | 'id': snap['NAME'], 174 | 'internal_id': snap['ID'], 175 | 'creation': datetime.utcfromtimestamp(int(snap['utcTimeStamp'])), 176 | 'numclones': 0, # Need to make an extra call 177 | 'space_total': -1,'space_unique': -1 # Not supported yet by Huawei 178 | }) 179 | if not sortbycreation: 180 | return snaps 181 | else: 182 | return sorted(snaps, key=operator.itemgetter('creation'), reverse=sortreverse) 183 | 184 | def _get_share(self, filesystemname=None): 185 | fid = self._fsid(filesystemname) 186 | if fid is None: 187 | raise Exception(self._exceptionbase, "Can't find filesystem") 188 | r = self._http.get("NFSHARE", payload={'vstoreId': self._vstoreid(), 'filter': "FSID:%s" % fid}) 189 | for rx in r['data']: 190 | return {'id': rx['ID'], 'path': rx['SHAREPATH']} 191 | 192 | 193 | def filesystem_info(self, filesystemname=None): 194 | r = self._http.get("filesystem", payload={'vstoreId': self._vstoreid(), 'filter': "NAME:%s" % filesystemname if filesystemname is not None else self._filesystem}) 195 | rx = r['data'] 196 | return {'internal_id': rx['ID'], 'clonename': rx['NAME'], 'origin': rx['PARENTSNAPSHOTNAME'], 'mountpoint': self._get_share(rx['NAME'])} 197 | 198 | def listclones(self): 199 | return self._getclones() 200 | 201 | def mountstring(self, filesystemname): 202 | return "%s:%s" % (Configuration.get('mounthost', 'huaweidorado'), self._get_share(filesystemname)['path']) 203 | 204 | def clone(self, snapid, clonename): 205 | snap_internal_id = None 206 | for snap in self.listsnapshots(): 207 | if snap['id'] == snapid: 208 | snap_internal_id = snap['internal_id'] 209 | break 210 | if snap_internal_id is None: 211 | raise Exception(self._exceptionbase, "Snapshot for cloning not found") 212 | # Create clone 213 | r,j = self._http.post("filesystem", 214 | payload={'NAME': clonename, 'ISCLONEFS': 'true', 'PARENTFILESYSTEMID': self._fsid(), 'PARENTSNAPSHOTID': snap_internal_id, 'vstoreId': self._vstoreid()}) 215 | clone_fs_id = j['data']['ID'] 216 | # Create NFS share 217 | r,j = self._http.post("NFSHARE", 218 | payload={'SHAREPATH': "/%s/" % clonename, 'FSID': clone_fs_id, 'vstoreId': self._vstoreid()}) 219 | clone_share_id = j['data']['ID'] 220 | # Add NFS clients 221 | r,j = self._http.post("NFS_SHARE_AUTH_CLIENT", 222 | payload={'NAME': Configuration.get('restorenfsclient', 'huaweidorado'), 'PARENTID': clone_share_id, 'vstoreId': self._vstoreid(), 'ACCESSVAL': 1, 'SYNC': 0, 'ALLSQUASH': 1, 'ROOTSQUASH': 1}) 223 | 224 | def dropclone(self, cloneid): 225 | # Search for filesystem 226 | clone_fs_id = None 227 | r = self._http.get("filesystem", payload={'vstoreId': self._vstoreid()}) 228 | for rx in r['data']: 229 | if rx['NAME'] == cloneid: 230 | clone_fs_id = rx['ID'] 231 | break 232 | if clone_fs_id is None: 233 | raise Exception(self._exceptionbase, "Clone filesystem not found") 234 | # Search for share 235 | r = self._http.get("NFSHARE", payload={'vstoreId': self._vstoreid(), 'filter': "FSID:%s" % clone_fs_id}) 236 | for rx in r['data']: 237 | ret = self._http.delete("NFSHARE/%s" % rx['ID'], payload={'vstoreId': self._vstoreid()}) 238 | # Delete filesystem 239 | r = self._http.delete("filesystem/%s" % clone_fs_id, payload={'vstoreId': self._vstoreid()}) 240 | 241 | def createvolume(self): 242 | # Create clone 243 | r,j = self._http.post("filesystem", 244 | payload={ 245 | 'NAME': self._filesystem 246 | , 'vstoreId': self._vstoreid() 247 | , 'CAPACITY': 1000000000 248 | , 'SECTORSIZE': 8192 249 | , 'ENABLECOMPRESSION': 'true' 250 | , 'unixPermissions': '770' 251 | , 'ISSHOWSNAPDIR': 'false' 252 | , 'PARENTID': '0' 253 | , 'AUTODELSNAPSHOTENABLE': 'false' 254 | , 'ENABLEDEDUP': 'true' 255 | , 'COMPRESSION': '1' 256 | }) 257 | clone_fs_id = j['data']['ID'] 258 | # Create NFS share 259 | r,j = self._http.post("NFSHARE", 260 | payload={'SHAREPATH': "/%s/" % self._filesystem, 'FSID': clone_fs_id, 'vstoreId': self._vstoreid()}) 261 | clone_share_id = j['data']['ID'] 262 | # Add NFS clients 263 | r,j = self._http.post("NFS_SHARE_AUTH_CLIENT", 264 | payload={'NAME': Configuration.get('restorenfsclient', 'huaweidorado'), 'PARENTID': clone_share_id, 'vstoreId': self._vstoreid(), 'ACCESSVAL': 1, 'SYNC': 0, 'ALLSQUASH': 1, 'ROOTSQUASH': 1}) 265 | -------------------------------------------------------------------------------- /huaweidoradocredentials.cfg.sample: -------------------------------------------------------------------------------- 1 | [credentials] 2 | user: huawei_login_username_here 3 | password: huawei_login_password_here 4 | -------------------------------------------------------------------------------- /logs.sample/autorestore-no-validation-20161108T224722-orcl.log: -------------------------------------------------------------------------------- 1 | 2016-11-08 22:47:22,309 INFO orcl Starting to restore 2 | 2016-11-08 22:47:22,310 INFO orcl Logfile: /nfs/autorestore/log/20161108T224722-orcl.log 3 | 2016-11-08 22:47:22,404 DEBUG orcl ACTION: Cleaning destination directory /nfs/autorestore/dest 4 | 2016-11-08 22:47:22,405 DEBUG orcl Sending GET to pools/pool1/projects/demo-backup/filesystems/orcl/snapshots 5 | 2016-11-08 22:47:22,450 DEBUG orcl Return code: 200 6 | 2016-11-08 22:47:22,453 DEBUG orcl Snapshot id for autoclone: orcl-20161108T223946 7 | 2016-11-08 22:47:22,453 DEBUG orcl Sending PUT to pools/pool1/projects/demo-backup/filesystems/orcl/snapshots/orcl-20161108T223946/clone 8 | 2016-11-08 22:47:23,593 DEBUG orcl Return code: 201 9 | 2016-11-08 22:47:23,628 DEBUG orcl Next database validation in 2 days: 2016-11-10 10 | 2016-11-08 22:47:23,634 DEBUG orcl ACTION: Generated init file /nfs/autorestore/dest/init.ora 11 | *.compatible='12.1.0.2.0' 12 | *.control_files='/nfs/autorestore/dest/restore.cf' 13 | *.db_block_size=8192 14 | *.db_create_file_dest='/nfs/autorestore/dest' 15 | *.db_files=200 16 | *.db_name='orcl' 17 | *.filesystemio_options='SETALL' 18 | *.pga_aggregate_target=2G 19 | *.processes=300 20 | *.remote_login_passwordfile='EXCLUSIVE' 21 | *.sga_max_size=3G 22 | *.sga_target=3G 23 | *.undo_management='AUTO' 24 | *.undo_tablespace='UNDOTBS1' 25 | *.job_queue_processes=0 26 | *.diagnostic_dest='/nfs/autorestore/log' 27 | *.instance_number=1 28 | *.thread=1 29 | 2016-11-08 22:47:23,634 DEBUG orcl ACTION: startup nomount 30 | 2016-11-08 22:47:23,635 DEBUG orcl SQL*Plus execution starts 31 | SQL> conn / as sysdba 32 | Connected to an idle instance. 33 | SQL> whenever sqlerror exit failure 34 | SQL> startup nomount pfile='/nfs/autorestore/dest/init.ora' 35 | ORACLE instance started. 36 | Total System Global Area 3221225472 bytes 37 | Fixed Size 2929552 bytes 38 | Variable Size 654314608 bytes 39 | Database Buffers 2550136832 bytes 40 | Redo Buffers 13844480 bytes 41 | SQL> spool off 42 | 2016-11-08 22:47:32,217 DEBUG orcl SQL*Plus execution successful 43 | 2016-11-08 22:47:32,284 DEBUG orcl ACTION: mount database and catalog files 44 | 2016-11-08 22:47:32,353 DEBUG orcl RMAN execution starts 45 | 46 | Recovery Manager: Release 12.1.0.2.0 - Production on Tue Nov 8 22:47:33 2016 47 | 48 | Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved. 49 | 50 | RMAN> 51 | echo set on 52 | 53 | RMAN> connect target * 54 | connected to target database: ORCL (not mounted) 55 | 56 | RMAN> run { 57 | 2> allocate channel c1 device type disk; 58 | 3> restore controlfile from '/nfs/autorestore/mnt/after_backup_controlfile.cf'; 59 | 4> } 60 | using target database control file instead of recovery catalog 61 | allocated channel: c1 62 | channel c1: SID=22 device type=DISK 63 | 64 | Starting restore at 2016-11-08 22:47:37 65 | 66 | channel c1: restoring control file 67 | channel c1: restore complete, elapsed time: 00:00:01 68 | output file name=/nfs/autorestore/dest/restore.cf 69 | Finished restore at 2016-11-08 22:47:38 70 | released channel: c1 71 | 72 | 73 | RMAN> alter database mount; 74 | Statement processed 75 | 76 | 77 | RMAN> change archivelog all uncatalog; 78 | uncataloged archived log 79 | archived log file name=/u01/app/oracle/oradata/ORCL/archivelog/2016_11_08/o1_mf_1_90_d24gpx44_.arc RECID=132 STAMP=927409901 80 | uncataloged archived log 81 | archived log file name=/nfs/backup/orcl/archivelog/1_90_905892048.dbf RECID=133 STAMP=927409901 82 | uncataloged archived log 83 | archived log file name=/u01/app/oracle/oradata/ORCL/archivelog/2016_11_08/o1_mf_1_91_d24gtjx4_.arc RECID=134 STAMP=927410016 84 | uncataloged archived log 85 | archived log file name=/nfs/backup/orcl/archivelog/1_91_905892048.dbf RECID=135 STAMP=927410016 86 | uncataloged archived log 87 | archived log file name=/u01/app/oracle/oradata/ORCL/archivelog/2016_11_08/o1_mf_1_92_d24gv3fd_.arc RECID=136 STAMP=927410035 88 | uncataloged archived log 89 | archived log file name=/nfs/backup/orcl/archivelog/1_92_905892048.dbf RECID=137 STAMP=927410035 90 | uncataloged archived log 91 | archived log file name=/u01/app/oracle/oradata/ORCL/archivelog/2016_11_08/o1_mf_1_93_d24kcmtc_.arc RECID=138 STAMP=927412612 92 | uncataloged archived log 93 | archived log file name=/nfs/backup/orcl/archivelog/1_93_905892048.dbf RECID=139 STAMP=927412612 94 | Uncataloged 8 objects 95 | 96 | 97 | 98 | RMAN> change backup device type disk unavailable; 99 | changed backup piece unavailable 100 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161020-01 RECID=139 STAMP=925734725 101 | changed backup piece unavailable 102 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161108-00 RECID=144 STAMP=927409888 103 | changed backup piece unavailable 104 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161108-01 RECID=147 STAMP=927409897 105 | changed backup piece unavailable 106 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161108-02 RECID=152 STAMP=927410025 107 | changed backup piece unavailable 108 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161108-03 RECID=155 STAMP=927410031 109 | changed backup piece unavailable 110 | backup piece handle=/nfs/backup/orcl/c-1433672784-20161108-04 RECID=160 STAMP=927412621 111 | changed backup piece unavailable 112 | backup piece handle=/nfs/backup/orcl/6orkecch_1_1 RECID=161 STAMP=927412625 113 | Changed 7 objects to UNAVAILABLE status 114 | 115 | 116 | 117 | RMAN> change copy of database uncatalog; 118 | uncataloged datafile copy 119 | datafile copy file name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-SYSTEM_FNO-1_3tr6dcnn RECID=99 STAMP=927412620 120 | uncataloged datafile copy 121 | datafile copy file name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-SYSAUX_FNO-2_3ur6dcnn RECID=100 STAMP=927412620 122 | uncataloged datafile copy 123 | datafile copy file name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-UNDOTBS1_FNO-3_3vr6dcog RECID=101 STAMP=927412620 124 | uncataloged datafile copy 125 | datafile copy file name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-USERS_FNO-4_40r6dcok RECID=98 STAMP=927412620 126 | Uncataloged 4 objects 127 | 128 | 129 | 130 | RMAN> catalog start with '/nfs/autorestore/mnt/' noprompt; 131 | searching for all files that match the pattern /nfs/autorestore/mnt/ 132 | 133 | List of Files Unknown to the Database 134 | ===================================== 135 | File Name: /nfs/autorestore/mnt/c-1433672784-20161020-01 136 | File Name: /nfs/autorestore/mnt/snapcf_orcl.f 137 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-01 138 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-05 139 | File Name: /nfs/autorestore/mnt/autorestore.cfg 140 | File Name: /nfs/autorestore/mnt/backup.lck 141 | File Name: /nfs/autorestore/mnt/after_backup_controlfile.cf 142 | File Name: /nfs/autorestore/mnt/tmpkj89mU.lck 143 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-02 144 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSAUX_FNO-2_3ur6dcnn 145 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-00 146 | File Name: /nfs/autorestore/mnt/6orkecch_1_1 147 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T223650_imagecopywithsnap.log 148 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215108_imagecopywithsnap.log 149 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T223945_imagecopywithsnap.log 150 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161020T123144_imagecopywithsnap.log 151 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161020T123026_config.log 152 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215335_imagecopywithsnap.log 153 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215324_config.log 154 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-03 155 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSTEM_FNO-1_3tr6dcnn 156 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-UNDOTBS1_FNO-3_3vr6dcog 157 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-USERS_FNO-4_40r6dcok 158 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-04 159 | File Name: /nfs/autorestore/mnt/archivelog/1_90_905892048.dbf 160 | File Name: /nfs/autorestore/mnt/archivelog/1_95_905892048.dbf 161 | File Name: /nfs/autorestore/mnt/archivelog/1_92_905892048.dbf 162 | File Name: /nfs/autorestore/mnt/archivelog/1_93_905892048.dbf 163 | File Name: /nfs/autorestore/mnt/archivelog/1_94_905892048.dbf 164 | File Name: /nfs/autorestore/mnt/archivelog/1_91_905892048.dbf 165 | cataloging files... 166 | cataloging done 167 | 168 | List of Cataloged Files 169 | ======================= 170 | File Name: /nfs/autorestore/mnt/c-1433672784-20161020-01 171 | File Name: /nfs/autorestore/mnt/snapcf_orcl.f 172 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-01 173 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-05 174 | File Name: /nfs/autorestore/mnt/after_backup_controlfile.cf 175 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-02 176 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSAUX_FNO-2_3ur6dcnn 177 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-00 178 | File Name: /nfs/autorestore/mnt/6orkecch_1_1 179 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-03 180 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSTEM_FNO-1_3tr6dcnn 181 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-UNDOTBS1_FNO-3_3vr6dcog 182 | File Name: /nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-USERS_FNO-4_40r6dcok 183 | File Name: /nfs/autorestore/mnt/c-1433672784-20161108-04 184 | File Name: /nfs/autorestore/mnt/archivelog/1_90_905892048.dbf 185 | File Name: /nfs/autorestore/mnt/archivelog/1_95_905892048.dbf 186 | File Name: /nfs/autorestore/mnt/archivelog/1_92_905892048.dbf 187 | File Name: /nfs/autorestore/mnt/archivelog/1_93_905892048.dbf 188 | File Name: /nfs/autorestore/mnt/archivelog/1_94_905892048.dbf 189 | File Name: /nfs/autorestore/mnt/archivelog/1_91_905892048.dbf 190 | 191 | List of Files Which Were Not Cataloged 192 | ======================================= 193 | File Name: /nfs/autorestore/mnt/autorestore.cfg 194 | RMAN-07517: Reason: The file header is corrupted 195 | File Name: /nfs/autorestore/mnt/backup.lck 196 | RMAN-07517: Reason: The file header is corrupted 197 | File Name: /nfs/autorestore/mnt/tmpkj89mU.lck 198 | RMAN-07517: Reason: The file header is corrupted 199 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T223650_imagecopywithsnap.log 200 | RMAN-07517: Reason: The file header is corrupted 201 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215108_imagecopywithsnap.log 202 | RMAN-07517: Reason: The file header is corrupted 203 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T223945_imagecopywithsnap.log 204 | RMAN-07517: Reason: The file header is corrupted 205 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161020T123144_imagecopywithsnap.log 206 | RMAN-07517: Reason: The file header is corrupted 207 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161020T123026_config.log 208 | RMAN-07517: Reason: The file header is corrupted 209 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215335_imagecopywithsnap.log 210 | RMAN-07517: Reason: The file header is corrupted 211 | File Name: /nfs/autorestore/mnt/backup_logs/orcl_20161108T215324_config.log 212 | RMAN-07517: Reason: The file header is corrupted 213 | 214 | 215 | RMAN> sql "alter database flashback off"; 216 | sql statement: alter database flashback off 217 | 218 | 219 | RMAN> exit 220 | 221 | 222 | Recovery Manager complete. 223 | 2016-11-08 22:47:45,993 DEBUG orcl RMAN execution time 0:00:13.609732 224 | 2016-11-08 22:47:45,997 DEBUG orcl RMAN execution successful 225 | 2016-11-08 22:47:45,998 DEBUG orcl ACTION: disable block change tracking 226 | 2016-11-08 22:47:46,001 DEBUG orcl SQL*Plus execution starts 227 | SQL> conn / as sysdba 228 | Connected. 229 | SQL> whenever sqlerror exit failure 230 | SQL> alter database rename file '/u01/app/oracle/oradata/ORCL/changetracking/o1_mf_cn8pwrjs_.chg' to '/nfs/autorestore/dest/bct.bct'; 231 | 232 | Database altered. 233 | 234 | Elapsed: 00:00:00.07 235 | SQL> alter database disable block change tracking; 236 | 237 | Database altered. 238 | 239 | Elapsed: 00:00:00.02 240 | SQL> select * from V$BLOCK_CHANGE_TRACKING; 241 | DISABLED 242 | 243 | 244 | 245 | 246 | Elapsed: 00:00:00.01 247 | SQL> spool off 248 | 2016-11-08 22:47:46,268 DEBUG orcl SQL*Plus execution successful 249 | 2016-11-08 22:47:46,270 DEBUG orcl ACTION: create missing datafiles 250 | 2016-11-08 22:47:46,273 DEBUG orcl SQL*Plus execution starts 251 | RENAMEDF-switch database to copy; 252 | 253 | PL/SQL procedure successfully completed. 254 | 255 | Elapsed: 00:00:00.07 256 | 257 | PL/SQL procedure successfully completed. 258 | 259 | Elapsed: 00:00:00.02 260 | 2016-11-08 22:47:46,412 DEBUG orcl SQL*Plus execution successful 261 | 2016-11-08 22:47:46,417 DEBUG orcl ACTION: switch and recover 262 | 2016-11-08 22:47:46,419 DEBUG orcl RMAN execution starts 263 | 264 | Recovery Manager: Release 12.1.0.2.0 - Production on Tue Nov 8 22:47:46 2016 265 | 266 | Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved. 267 | 268 | RMAN> 269 | echo set on 270 | 271 | RMAN> connect target * 272 | connected to target database: ORCL (DBID=1433672784, not open) 273 | 274 | RMAN> switch database to copy; 275 | using target database control file instead of recovery catalog 276 | datafile 1 switched to datafile copy "/nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSTEM_FNO-1_3tr6dcnn" 277 | datafile 2 switched to datafile copy "/nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-SYSAUX_FNO-2_3ur6dcnn" 278 | datafile 3 switched to datafile copy "/nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-UNDOTBS1_FNO-3_3vr6dcog" 279 | datafile 4 switched to datafile copy "/nfs/autorestore/mnt/data_D-ORCL_I-1433672784_TS-USERS_FNO-4_40r6dcok" 280 | 281 | 282 | RMAN> 283 | 284 | RMAN> run { 285 | 2> allocate channel c1 device type disk; 286 | 3> set newname for database to new; 287 | 4> set until time "to_date('2016-11-08 22:39:45', 'yyyy-mm-dd hh24:mi:ss')"; 288 | 5> recover database; 289 | 6> } 290 | allocated channel: c1 291 | channel c1: SID=22 device type=DISK 292 | 293 | executing command: SET NEWNAME 294 | 295 | executing command: SET until clause 296 | 297 | Starting recover at 2016-11-08 22:47:47 298 | 299 | starting media recovery 300 | 301 | archived log for thread 1 with sequence 94 is already on disk as file /nfs/autorestore/mnt/archivelog/1_94_905892048.dbf 302 | archived log for thread 1 with sequence 95 is already on disk as file /nfs/autorestore/mnt/archivelog/1_95_905892048.dbf 303 | archived log file name=/nfs/autorestore/mnt/archivelog/1_94_905892048.dbf thread=1 sequence=94 304 | archived log file name=/nfs/autorestore/mnt/archivelog/1_95_905892048.dbf thread=1 sequence=95 305 | media recovery complete, elapsed time: 00:00:00 306 | Finished recover at 2016-11-08 22:47:48 307 | released channel: c1 308 | 309 | 310 | RMAN> exit 311 | 312 | 313 | Recovery Manager complete. 314 | 2016-11-08 22:47:48,960 DEBUG orcl RMAN execution time 0:00:02.531046 315 | 2016-11-08 22:47:48,961 DEBUG orcl RMAN execution successful 316 | 2016-11-08 22:47:48,961 DEBUG orcl ACTION: opening database to verify the result 317 | 2016-11-08 22:47:48,964 DEBUG orcl SQL*Plus execution starts 318 | alter database open read only 319 | 320 | Database altered. 321 | 322 | Elapsed: 00:00:01.29 323 | Set time formatting 324 | 325 | Session altered. 326 | 327 | Elapsed: 00:00:00.00 328 | 329 | Session altered. 330 | 331 | Elapsed: 00:00:00.00 332 | Query database timestamps 333 | CURRENT DATABASE SCN: 804125 334 | LAST ROW IN SCN TO TIME MAP: 2016-11-08 21:37:23 335 | MIN/MAX TIME FROM ARCHLOGS: 2016-11-08 22:37:12-2016-11-08 22:39:46 336 | CUSTOM VERIFICATION TIME: 2016-11-08 22:39:35 337 | 338 | Elapsed: 00:00:00.30 339 | Datafiles where online_status not in ('SYSTEM','ONLINE') or status <> 'AVAILABLE' 340 | 341 | no rows selected 342 | 343 | Elapsed: 00:00:00.10 344 | 2016-11-08 22:47:50,758 DEBUG orcl SQL*Plus execution successful 345 | 2016-11-08 22:47:50,900 DEBUG orcl Expected time: 2016-11-08 22:39:45 346 | 2016-11-08 22:47:50,901 DEBUG orcl Verified time: 2016-11-08 22:39:35 347 | 2016-11-08 22:47:50,901 DEBUG orcl VERIFY: Time difference 0:00:10 348 | 2016-11-08 22:47:50,901 DEBUG orcl ACTION: shutdown 349 | 2016-11-08 22:47:50,910 DEBUG orcl SQL*Plus execution starts 350 | SQL> conn / as sysdba 351 | Connected. 352 | SQL> shutdown immediate 353 | Database closed. 354 | Database dismounted. 355 | ORACLE instance shut down. 356 | SQL> spool off 357 | 2016-11-08 22:48:07,954 DEBUG orcl SQL*Plus execution successful 358 | 2016-11-08 22:48:08,013 DEBUG orcl Sending GET to pools/pool1/projects/demo-backup/filesystems/autorestore 359 | 2016-11-08 22:48:08,169 DEBUG orcl Return code: 200 360 | 2016-11-08 22:48:08,170 DEBUG orcl Sending DELETE to pools/pool1/projects/demo-backup/filesystems/autorestore 361 | 2016-11-08 22:48:08,999 DEBUG orcl Return code: 204 362 | 2016-11-08 22:48:09,000 DEBUG orcl Logging the result to catalog. 363 | 2016-11-08 22:48:09,001 DEBUG orcl SQLLDR execution starts 364 | 2016-11-08 22:48:09,082 ERROR orcl SQLLDR exited with code 1 365 | 2016-11-08 22:48:09,084 DEBUG orcl SQL*Plus execution starts 366 | ERROR: 367 | ORA-12545: Connect failed because target host or object does not exist 368 | 369 | 370 | 2016-11-08 22:48:09,116 ERROR orcl SQL*Plus exited with code 1 371 | 2016-11-08 22:48:09,116 ERROR orcl Logging the result to catalog failed. 372 | Traceback (most recent call last): 373 | File "./autorestore.py", line 265, in runrestore 374 | exec_sqlplus(restoretemplate.get('insertlog'), False, returnoutput=True) 375 | File "./autorestore.py", line 105, in exec_sqlplus 376 | return OracleExec.sqlplus(finalscript, silent=returnoutput) 377 | File "/home/oracle/oracle-imagecopy-backup/backupcommon.py", line 269, in sqlplus 378 | raise Exception('sqlplus', "sqlplus exited with code %d" % p.returncode) 379 | Exception: ('sqlplus', 'sqlplus exited with code 1') 380 | 2016-11-08 22:48:09,119 INFO orcl Restore successful, elapsed time: 0:00:46.689587 381 | -------------------------------------------------------------------------------- /logs.sample/orcl_20161108T215324_config.log: -------------------------------------------------------------------------------- 1 | 2016-11-08 21:53:24,140 DEBUG orcl Oracle home: /u01/app/oracle/product/12.1.0.2/db 2 | 2016-11-08 21:53:24,193 INFO orcl Configuration file: /home/oracle/oracle-imagecopy-backup/backup.cfg 3 | 2016-11-08 21:53:24,202 INFO orcl Running RMAN configuration 4 | 2016-11-08 21:53:24,203 DEBUG orcl RMAN execution starts 5 | 6 | Recovery Manager: Release 12.1.0.2.0 - Production on Tue Nov 8 21:53:24 2016 7 | 8 | Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved. 9 | 10 | RMAN> 11 | echo set on 12 | 13 | RMAN> CONNECT TARGET * 14 | connected to target database: ORCL (DBID=1433672784) 15 | 16 | RMAN> 17 | 18 | RMAN> CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; 19 | using target database control file instead of recovery catalog 20 | old RMAN configuration parameters: 21 | CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; 22 | new RMAN configuration parameters: 23 | CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; 24 | new RMAN configuration parameters are successfully stored 25 | 26 | 27 | RMAN> CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 2 DAYS; 28 | old RMAN configuration parameters: 29 | CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 2 DAYS; 30 | new RMAN configuration parameters: 31 | CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 2 DAYS; 32 | new RMAN configuration parameters are successfully stored 33 | 34 | 35 | RMAN> CONFIGURE BACKUP OPTIMIZATION ON; 36 | old RMAN configuration parameters: 37 | CONFIGURE BACKUP OPTIMIZATION ON; 38 | new RMAN configuration parameters: 39 | CONFIGURE BACKUP OPTIMIZATION ON; 40 | new RMAN configuration parameters are successfully stored 41 | 42 | 43 | RMAN> CONFIGURE DEFAULT DEVICE TYPE TO DISK; 44 | old RMAN configuration parameters: 45 | CONFIGURE DEFAULT DEVICE TYPE TO DISK; 46 | new RMAN configuration parameters: 47 | CONFIGURE DEFAULT DEVICE TYPE TO DISK; 48 | new RMAN configuration parameters are successfully stored 49 | 50 | 51 | RMAN> CONFIGURE CONTROLFILE AUTOBACKUP ON; 52 | old RMAN configuration parameters: 53 | CONFIGURE CONTROLFILE AUTOBACKUP ON; 54 | new RMAN configuration parameters: 55 | CONFIGURE CONTROLFILE AUTOBACKUP ON; 56 | new RMAN configuration parameters are successfully stored 57 | 58 | 59 | RMAN> CONFIGURE DEVICE TYPE DISK PARALLELISM 4 BACKUP TYPE TO BACKUPSET; 60 | old RMAN configuration parameters: 61 | CONFIGURE DEVICE TYPE DISK PARALLELISM 4 BACKUP TYPE TO BACKUPSET; 62 | new RMAN configuration parameters: 63 | CONFIGURE DEVICE TYPE DISK PARALLELISM 4 BACKUP TYPE TO BACKUPSET; 64 | new RMAN configuration parameters are successfully stored 65 | 66 | 67 | RMAN> CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT '/nfs/backup/orcl/%U'; 68 | old RMAN configuration parameters: 69 | CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT '/nfs/backup/orcl/%U'; 70 | new RMAN configuration parameters: 71 | CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT '/nfs/backup/orcl/%U'; 72 | new RMAN configuration parameters are successfully stored 73 | 74 | 75 | RMAN> CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/nfs/backup/orcl/%F'; 76 | old RMAN configuration parameters: 77 | CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/nfs/backup/orcl/%F'; 78 | new RMAN configuration parameters: 79 | CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/nfs/backup/orcl/%F'; 80 | new RMAN configuration parameters are successfully stored 81 | 82 | 83 | RMAN> CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/nfs/backup/orcl/snapcf_orcl.f'; 84 | old RMAN configuration parameters: 85 | CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/nfs/backup/orcl/snapcf_orcl.f'; 86 | new RMAN configuration parameters: 87 | CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/nfs/backup/orcl/snapcf_orcl.f'; 88 | new RMAN configuration parameters are successfully stored 89 | 90 | 91 | RMAN> EXIT 92 | 93 | 94 | Recovery Manager complete. 95 | 2016-11-08 21:53:27,304 DEBUG orcl RMAN execution time 0:00:03.096035 96 | 2016-11-08 21:53:27,305 DEBUG orcl RMAN execution successful 97 | 2016-11-08 21:53:27,305 INFO orcl Running additional configuration from SQL*Plus 98 | 2016-11-08 21:53:27,306 DEBUG orcl SQL*Plus execution starts 99 | SQL> select instance_name, host_name, version, status, database_status from v$instance; 100 | 101 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 102 | ---------------- ------------------------ ---------- ------------ ----------------- 103 | orcl backup 12.1.0.2.0 OPEN ACTIVE 104 | 105 | 1 row selected. 106 | 107 | Elapsed: 00:00:00.01 108 | SQL> set pages 0 109 | SQL> select user from dual; 110 | SYS 111 | 112 | 1 row selected. 113 | 114 | Elapsed: 00:00:00.01 115 | SQL> -- Block change tracking 116 | SQL> declare 117 | 2 i number; 118 | 3 j number; 119 | 4 begin 120 | 5 select count(*) into i from V$BLOCK_CHANGE_TRACKING where status='DISABLED'; 121 | 6 select count(*) into j from v$version where banner like '%Enterprise Edition%'; 122 | 7 if (i = 1) and (j > 0) then 123 | 8 execute immediate 'alter database enable block change tracking'; 124 | 9 else 125 | 10 dbms_output.put_line('BCT already enabled or database is not EE'); 126 | 11 end if; 127 | 12 end; 128 | 13 / 129 | BCT already enabled or database is not EE 130 | 131 | PL/SQL procedure successfully completed. 132 | 133 | Elapsed: 00:00:00.00 134 | SQL> -- Archivelog destination 135 | SQL> declare 136 | 2 cnt number; 137 | 3 s varchar2(60); 138 | 4 l varchar2(100):= '/nfs/backup/orcl/archivelog/'; 139 | 5 function min_param return varchar2 is 140 | 6 v varchar2(50); 141 | 7 begin 142 | 8 select min(name) into v from v$parameter where name like 'log\_archive\_dest\__' escape '\' and value is null; 143 | 9 return v; 144 | 10 end; 145 | 11 begin 146 | 12 select count(*) into cnt from v$parameter where name like 'log\_archive\_dest\__' escape '\' and upper(value) like 'LOCATION%=%USE_DB_RECOVERY_FILE_DEST%'; 147 | 13 if cnt=0 then 148 | 14 s:= min_param; 149 | 15 dbms_output.put_line('Setting '||s||' to USE_DB_RECOVERY_FILE_DEST'); 150 | 16 execute immediate 'alter system set '||s||q'[='LOCATION=USE_DB_RECOVERY_FILE_DEST MANDATORY' scope=both]'; 151 | 17 execute immediate 'alter system set '||replace(s, 'log_archive_dest_', 'log_archive_dest_state_')||'=enable scope=both'; 152 | 18 end if; 153 | 19 select count(*) into cnt from v$parameter where name like 'log\_archive\_dest\__' escape '\' and value like '%'||l||'%'; 154 | 20 if cnt=0 then 155 | 21 s:= min_param; 156 | 22 dbms_output.put_line('Setting '||s||' to '||l); 157 | 23 l:= q'['LOCATION=]'||l||q'[ VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)']'; 158 | 24 execute immediate 'alter system set '||s||'='||l||' scope=both'; 159 | 25 execute immediate 'alter system set '||replace(s, 'log_archive_dest_', 'log_archive_dest_state_')||'=enable scope=both'; 160 | 26 end if; 161 | 27 end; 162 | 28 / 163 | 164 | PL/SQL procedure successfully completed. 165 | 166 | Elapsed: 00:00:00.02 167 | SQL> spool off 168 | 2016-11-08 21:53:27,561 DEBUG orcl SQL*Plus execution successful 169 | -------------------------------------------------------------------------------- /logs.sample/orcl_20161108T215335_imagecopywithsnap.log: -------------------------------------------------------------------------------- 1 | 2016-11-08 21:53:35,592 DEBUG orcl Oracle home: /u01/app/oracle/product/12.1.0.2/db 2 | 2016-11-08 21:53:35,642 INFO orcl Configuration file: /home/oracle/oracle-imagecopy-backup/backup.cfg 3 | 2016-11-08 21:53:35,650 INFO orcl Check if there are missing archivelogs 4 | 2016-11-08 21:53:35,651 DEBUG orcl SQL*Plus execution starts 5 | 6 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 7 | ---------------- ------------------------ ---------- ------------ ----------------- 8 | orcl backup 12.1.0.2.0 OPEN ACTIVE 9 | 10 | 1 row selected. 11 | 12 | Elapsed: 00:00:00.00 13 | SYS 14 | 15 | 1 row selected. 16 | 17 | Elapsed: 00:00:00.00 18 | 19 | no rows selected 20 | 21 | Elapsed: 00:00:00.03 22 | 2016-11-08 21:53:35,810 DEBUG orcl SQL*Plus execution successful 23 | 2016-11-08 21:53:35,811 INFO orcl Switch current log 24 | 2016-11-08 21:53:35,812 DEBUG orcl SQL*Plus execution starts 25 | 26 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 27 | ---------------- ------------------------ ---------- ------------ ----------------- 28 | orcl backup 12.1.0.2.0 OPEN ACTIVE 29 | 30 | 1 row selected. 31 | 32 | Elapsed: 00:00:00.00 33 | SYS 34 | 35 | 1 row selected. 36 | 37 | Elapsed: 00:00:00.00 38 | CURRENT DATABASE TIME: 2016-11-08 21:53:35 39 | CURRENT DATABASE SCN: 799175 40 | BCT FILE: /u01/app/oracle/oradata/ORCL/changetracking/o1_mf_cn8pwrjs_.chg 41 | 42 | 3 rows selected. 43 | 44 | Elapsed: 00:00:00.01 45 | 46 | PL/SQL procedure successfully completed. 47 | 48 | Elapsed: 00:00:01.00 49 | 50 | System altered. 51 | 52 | Elapsed: 00:00:00.06 53 | 2016-11-08 21:53:36,952 DEBUG orcl SQL*Plus execution successful 54 | 2016-11-08 21:53:36,956 INFO orcl Snap the current backup area 55 | 2016-11-08 21:53:36,957 DEBUG orcl Sending POST to pools/pool1/projects/demo-backup/filesystems/orcl/snapshots 56 | 2016-11-08 21:53:37,019 DEBUG orcl Return code: 201 57 | 2016-11-08 21:53:37,020 DEBUG orcl Created snapshot: orcl-20161108T215336 58 | 2016-11-08 21:53:37,020 INFO orcl Checking for expired datafile copies 59 | 2016-11-08 21:53:37,021 DEBUG orcl SQL*Plus execution starts 60 | 61 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 62 | ---------------- ------------------------ ---------- ------------ ----------------- 63 | orcl backup 12.1.0.2.0 OPEN ACTIVE 64 | 65 | 1 row selected. 66 | 67 | Elapsed: 00:00:00.00 68 | SYS 69 | 70 | 1 row selected. 71 | 72 | Elapsed: 00:00:00.00 73 | 74 | no rows selected 75 | 76 | Elapsed: 00:00:00.02 77 | 2016-11-08 21:53:37,112 DEBUG orcl SQL*Plus execution successful 78 | 2016-11-08 21:53:37,113 INFO orcl Refresh imagecopy 79 | 2016-11-08 21:53:37,114 DEBUG orcl RMAN execution starts 80 | 81 | Recovery Manager: Release 12.1.0.2.0 - Production on Tue Nov 8 21:53:37 2016 82 | 83 | Copyright (c) 1982, 2014, Oracle and/or its affiliates. All rights reserved. 84 | 85 | RMAN> 86 | echo set on 87 | 88 | RMAN> CONNECT TARGET * 89 | connected to target database: ORCL (DBID=1433672784) 90 | 91 | RMAN> run { 92 | 2> backup incremental level 1 for recover of copy with tag 'image_copy_backup' database; 93 | 3> recover copy of database with tag 'image_copy_backup'; 94 | 4> delete noprompt backupset tag 'image_copy_backup'; 95 | 5> delete noprompt force archivelog until time 'sysdate-2'; 96 | 6> backup spfile tag 'image_copy_backup'; 97 | 7> backup current controlfile format '/nfs/backup/orcl/after_backup_controlfile.cf' reuse tag 'image_copy_backup'; 98 | 8> delete noprompt obsolete recovery window of 2 days; 99 | 9> show all; 100 | 10> } 101 | Starting backup at 2016-11-08 21:53:37 102 | using target database control file instead of recovery catalog 103 | allocated channel: ORA_DISK_1 104 | channel ORA_DISK_1: SID=46 device type=DISK 105 | allocated channel: ORA_DISK_2 106 | channel ORA_DISK_2: SID=51 device type=DISK 107 | allocated channel: ORA_DISK_3 108 | channel ORA_DISK_3: SID=48 device type=DISK 109 | allocated channel: ORA_DISK_4 110 | channel ORA_DISK_4: SID=60 device type=DISK 111 | channel ORA_DISK_1: starting incremental level 1 datafile backup set 112 | channel ORA_DISK_1: specifying datafile(s) in backup set 113 | input datafile file number=00001 name=/u01/app/oracle/oradata/ORCL/datafile/o1_mf_system_cfvpb0o0_.dbf 114 | channel ORA_DISK_1: starting piece 1 at 2016-11-08 21:53:39 115 | channel ORA_DISK_2: starting incremental level 1 datafile backup set 116 | channel ORA_DISK_2: specifying datafile(s) in backup set 117 | input datafile file number=00002 name=/u01/app/oracle/oradata/ORCL/datafile/o1_mf_sysaux_cfvpb3ph_.dbf 118 | channel ORA_DISK_2: starting piece 1 at 2016-11-08 21:53:39 119 | channel ORA_DISK_3: starting incremental level 1 datafile backup set 120 | channel ORA_DISK_3: specifying datafile(s) in backup set 121 | input datafile file number=00003 name=/u01/app/oracle/oradata/ORCL/datafile/o1_mf_undotbs1_cfvpb5hx_.dbf 122 | channel ORA_DISK_3: starting piece 1 at 2016-11-08 21:53:39 123 | channel ORA_DISK_4: starting incremental level 1 datafile backup set 124 | channel ORA_DISK_4: specifying datafile(s) in backup set 125 | input datafile file number=00004 name=/u01/app/oracle/oradata/ORCL/datafile/o1_mf_users_cfvpbkmx_.dbf 126 | channel ORA_DISK_4: starting piece 1 at 2016-11-08 21:53:39 127 | channel ORA_DISK_1: finished piece 1 at 2016-11-08 21:53:42 128 | piece handle=/nfs/backup/orcl/6brke9r3_1_1 tag=IMAGE_COPY_BACKUP comment=NONE 129 | channel ORA_DISK_1: backup set complete, elapsed time: 00:00:03 130 | channel ORA_DISK_2: finished piece 1 at 2016-11-08 21:53:42 131 | piece handle=/nfs/backup/orcl/6crke9r3_1_1 tag=IMAGE_COPY_BACKUP comment=NONE 132 | channel ORA_DISK_2: backup set complete, elapsed time: 00:00:03 133 | channel ORA_DISK_3: finished piece 1 at 2016-11-08 21:53:42 134 | piece handle=/nfs/backup/orcl/6drke9r3_1_1 tag=IMAGE_COPY_BACKUP comment=NONE 135 | channel ORA_DISK_3: backup set complete, elapsed time: 00:00:03 136 | channel ORA_DISK_4: finished piece 1 at 2016-11-08 21:53:42 137 | piece handle=/nfs/backup/orcl/6erke9r3_1_1 tag=IMAGE_COPY_BACKUP comment=NONE 138 | channel ORA_DISK_4: backup set complete, elapsed time: 00:00:03 139 | Finished backup at 2016-11-08 21:53:42 140 | 141 | Starting recover at 2016-11-08 21:53:43 142 | using channel ORA_DISK_1 143 | using channel ORA_DISK_2 144 | using channel ORA_DISK_3 145 | using channel ORA_DISK_4 146 | channel ORA_DISK_1: starting incremental datafile backup set restore 147 | channel ORA_DISK_1: specifying datafile copies to recover 148 | recovering datafile copy file number=00001 name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-SYSTEM_FNO-1_3tr6dcnn 149 | channel ORA_DISK_1: reading from backup piece /nfs/backup/orcl/6brke9r3_1_1 150 | channel ORA_DISK_2: starting incremental datafile backup set restore 151 | channel ORA_DISK_2: specifying datafile copies to recover 152 | recovering datafile copy file number=00002 name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-SYSAUX_FNO-2_3ur6dcnn 153 | channel ORA_DISK_2: reading from backup piece /nfs/backup/orcl/6crke9r3_1_1 154 | channel ORA_DISK_3: starting incremental datafile backup set restore 155 | channel ORA_DISK_3: specifying datafile copies to recover 156 | recovering datafile copy file number=00003 name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-UNDOTBS1_FNO-3_3vr6dcog 157 | channel ORA_DISK_3: reading from backup piece /nfs/backup/orcl/6drke9r3_1_1 158 | channel ORA_DISK_4: starting incremental datafile backup set restore 159 | channel ORA_DISK_4: specifying datafile copies to recover 160 | recovering datafile copy file number=00004 name=/nfs/backup/orcl/data_D-ORCL_I-1433672784_TS-USERS_FNO-4_40r6dcok 161 | channel ORA_DISK_4: reading from backup piece /nfs/backup/orcl/6erke9r3_1_1 162 | channel ORA_DISK_1: piece handle=/nfs/backup/orcl/6brke9r3_1_1 tag=IMAGE_COPY_BACKUP 163 | channel ORA_DISK_1: restored backup piece 1 164 | channel ORA_DISK_1: restore complete, elapsed time: 00:00:01 165 | channel ORA_DISK_2: piece handle=/nfs/backup/orcl/6crke9r3_1_1 tag=IMAGE_COPY_BACKUP 166 | channel ORA_DISK_2: restored backup piece 1 167 | channel ORA_DISK_2: restore complete, elapsed time: 00:00:01 168 | channel ORA_DISK_3: piece handle=/nfs/backup/orcl/6drke9r3_1_1 tag=IMAGE_COPY_BACKUP 169 | channel ORA_DISK_3: restored backup piece 1 170 | channel ORA_DISK_3: restore complete, elapsed time: 00:00:01 171 | channel ORA_DISK_4: piece handle=/nfs/backup/orcl/6erke9r3_1_1 tag=IMAGE_COPY_BACKUP 172 | channel ORA_DISK_4: restored backup piece 1 173 | channel ORA_DISK_4: restore complete, elapsed time: 00:00:01 174 | Finished recover at 2016-11-08 21:53:45 175 | 176 | Starting Control File and SPFILE Autobackup at 2016-11-08 21:53:45 177 | piece handle=/nfs/backup/orcl/c-1433672784-20161108-02 comment=NONE 178 | Finished Control File and SPFILE Autobackup at 2016-11-08 21:53:46 179 | 180 | using channel ORA_DISK_1 181 | using channel ORA_DISK_2 182 | using channel ORA_DISK_3 183 | using channel ORA_DISK_4 184 | 185 | List of Backup Pieces 186 | BP Key BS Key Pc# Cp# Status Device Type Piece Name 187 | ------- ------- --- --- ----------- ----------- ---------- 188 | 145 145 1 1 AVAILABLE DISK /nfs/backup/orcl/68rke9n5_1_1 189 | 146 146 1 1 AVAILABLE DISK /nfs/backup/orcl/after_backup_controlfile.cf 190 | 148 148 1 1 AVAILABLE DISK /nfs/backup/orcl/6drke9r3_1_1 191 | 149 149 1 1 AVAILABLE DISK /nfs/backup/orcl/6erke9r3_1_1 192 | 150 150 1 1 AVAILABLE DISK /nfs/backup/orcl/6crke9r3_1_1 193 | 151 151 1 1 AVAILABLE DISK /nfs/backup/orcl/6brke9r3_1_1 194 | deleted backup piece 195 | backup piece handle=/nfs/backup/orcl/68rke9n5_1_1 RECID=145 STAMP=927409893 196 | deleted backup piece 197 | backup piece handle=/nfs/backup/orcl/after_backup_controlfile.cf RECID=146 STAMP=927409895 198 | deleted backup piece 199 | backup piece handle=/nfs/backup/orcl/6drke9r3_1_1 RECID=148 STAMP=927410020 200 | deleted backup piece 201 | backup piece handle=/nfs/backup/orcl/6erke9r3_1_1 RECID=149 STAMP=927410020 202 | deleted backup piece 203 | backup piece handle=/nfs/backup/orcl/6crke9r3_1_1 RECID=150 STAMP=927410020 204 | deleted backup piece 205 | backup piece handle=/nfs/backup/orcl/6brke9r3_1_1 RECID=151 STAMP=927410020 206 | Deleted 6 objects 207 | 208 | 209 | released channel: ORA_DISK_1 210 | released channel: ORA_DISK_2 211 | released channel: ORA_DISK_3 212 | released channel: ORA_DISK_4 213 | allocated channel: ORA_DISK_1 214 | channel ORA_DISK_1: SID=46 device type=DISK 215 | allocated channel: ORA_DISK_2 216 | channel ORA_DISK_2: SID=51 device type=DISK 217 | allocated channel: ORA_DISK_3 218 | channel ORA_DISK_3: SID=48 device type=DISK 219 | allocated channel: ORA_DISK_4 220 | channel ORA_DISK_4: SID=60 device type=DISK 221 | specification does not match any archived log in the repository 222 | 223 | Starting backup at 2016-11-08 21:53:48 224 | using channel ORA_DISK_1 225 | using channel ORA_DISK_2 226 | using channel ORA_DISK_3 227 | using channel ORA_DISK_4 228 | channel ORA_DISK_1: starting full datafile backup set 229 | channel ORA_DISK_1: specifying datafile(s) in backup set 230 | including current SPFILE in backup set 231 | channel ORA_DISK_1: starting piece 1 at 2016-11-08 21:53:48 232 | channel ORA_DISK_1: finished piece 1 at 2016-11-08 21:53:49 233 | piece handle=/nfs/backup/orcl/6grke9rc_1_1 tag=IMAGE_COPY_BACKUP comment=NONE 234 | channel ORA_DISK_1: backup set complete, elapsed time: 00:00:01 235 | Finished backup at 2016-11-08 21:53:49 236 | 237 | Starting backup at 2016-11-08 21:53:49 238 | using channel ORA_DISK_1 239 | using channel ORA_DISK_2 240 | using channel ORA_DISK_3 241 | using channel ORA_DISK_4 242 | channel ORA_DISK_1: starting full datafile backup set 243 | channel ORA_DISK_1: specifying datafile(s) in backup set 244 | including current control file in backup set 245 | channel ORA_DISK_1: starting piece 1 at 2016-11-08 21:53:50 246 | channel ORA_DISK_1: finished piece 1 at 2016-11-08 21:53:51 247 | piece handle=/nfs/backup/orcl/after_backup_controlfile.cf tag=IMAGE_COPY_BACKUP comment=NONE 248 | channel ORA_DISK_1: backup set complete, elapsed time: 00:00:01 249 | Finished backup at 2016-11-08 21:53:51 250 | 251 | Starting Control File and SPFILE Autobackup at 2016-11-08 21:53:51 252 | piece handle=/nfs/backup/orcl/c-1433672784-20161108-03 comment=NONE 253 | Finished Control File and SPFILE Autobackup at 2016-11-08 21:53:52 254 | 255 | using channel ORA_DISK_1 256 | using channel ORA_DISK_2 257 | using channel ORA_DISK_3 258 | using channel ORA_DISK_4 259 | no obsolete backups found 260 | 261 | RMAN configuration parameters for database with db_unique_name ORCL are: 262 | CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 2 DAYS; 263 | CONFIGURE BACKUP OPTIMIZATION ON; 264 | CONFIGURE DEFAULT DEVICE TYPE TO DISK; 265 | CONFIGURE CONTROLFILE AUTOBACKUP ON; 266 | CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/nfs/backup/orcl/%F'; 267 | CONFIGURE DEVICE TYPE DISK PARALLELISM 4 BACKUP TYPE TO BACKUPSET; 268 | CONFIGURE DATAFILE BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default 269 | CONFIGURE ARCHIVELOG BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default 270 | CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT '/nfs/backup/orcl/%U'; 271 | CONFIGURE MAXSETSIZE TO UNLIMITED; # default 272 | CONFIGURE ENCRYPTION FOR DATABASE OFF; # default 273 | CONFIGURE ENCRYPTION ALGORITHM 'AES128'; # default 274 | CONFIGURE COMPRESSION ALGORITHM 'BASIC' AS OF RELEASE 'DEFAULT' OPTIMIZE FOR LOAD TRUE ; # default 275 | CONFIGURE RMAN OUTPUT TO KEEP FOR 7 DAYS; # default 276 | CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; 277 | CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/nfs/backup/orcl/snapcf_orcl.f'; 278 | 279 | 280 | RMAN> 281 | 282 | RMAN> EXIT 283 | 284 | 285 | Recovery Manager complete. 286 | 2016-11-08 21:53:54,134 DEBUG orcl RMAN execution time 0:00:17.007102 287 | 2016-11-08 21:53:54,136 DEBUG orcl RMAN execution successful 288 | 2016-11-08 21:53:54,137 DEBUG orcl SQL*Plus execution starts 289 | SQL> select instance_name, host_name, version, status, database_status from v$instance; 290 | 291 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 292 | ---------------- ------------------------ ---------- ------------ ----------------- 293 | orcl backup 12.1.0.2.0 OPEN ACTIVE 294 | 295 | 1 row selected. 296 | 297 | Elapsed: 00:00:00.01 298 | SQL> set pages 0 299 | SQL> select user from dual; 300 | SYS 301 | 302 | 1 row selected. 303 | 304 | Elapsed: 00:00:00.00 305 | SQL> select 'CURRENT DATABASE TIME: '||to_char(sysdate, 'yyyy-mm-dd hh24:mi:ss') from dual union all 306 | 2 select 'CURRENT DATABASE SCN: '||to_char(current_scn) from v$database union all 307 | 3 select 'BCT FILE: '||filename from v$block_change_tracking; 308 | CURRENT DATABASE TIME: 2016-11-08 21:53:54 309 | CURRENT DATABASE SCN: 799393 310 | BCT FILE: /u01/app/oracle/oradata/ORCL/changetracking/o1_mf_cn8pwrjs_.chg 311 | 312 | 3 rows selected. 313 | 314 | Elapsed: 00:00:00.01 315 | SQL> exec dbms_lock.sleep(1); 316 | 317 | PL/SQL procedure successfully completed. 318 | 319 | Elapsed: 00:00:01.00 320 | SQL> alter system archive log current; 321 | 322 | System altered. 323 | 324 | Elapsed: 00:00:00.06 325 | SQL> spool off 326 | 2016-11-08 21:53:55,455 DEBUG orcl SQL*Plus execution successful 327 | 2016-11-08 21:53:55,456 INFO orcl Clean expired snapshots 328 | 2016-11-08 21:53:55,457 DEBUG orcl Sending GET to pools/pool1/projects/demo-backup/filesystems/orcl/snapshots 329 | 2016-11-08 21:53:55,589 DEBUG orcl Return code: 200 330 | 2016-11-08 21:53:55,592 DEBUG orcl orcl-20160524T155205 [2016-05-24 13:52:05 UTC] total=165MB unique=276kB clones=0 valid 331 | 2016-11-08 21:53:55,593 DEBUG orcl orcl-20161017T093914 [2016-10-17 07:39:14 UTC] total=177MB unique=138kB clones=0 valid 332 | 2016-11-08 21:53:55,593 DEBUG orcl orcl-20161017T100501 [2016-10-17 08:05:01 UTC] total=181MB unique=138kB clones=0 valid 333 | 2016-11-08 21:53:55,593 DEBUG orcl orcl-20161017T134819 [2016-10-17 11:48:20 UTC] total=175MB unique=146kB clones=0 valid 334 | 2016-11-08 21:53:55,594 DEBUG orcl orcl-20161017T134956 [2016-10-17 11:49:56 UTC] total=175MB unique=230kB clones=0 valid 335 | 2016-11-08 21:53:55,594 DEBUG orcl orcl-20161017T135058 [2016-10-17 11:50:58 UTC] total=175MB unique=862kB clones=0 valid 336 | 2016-11-08 21:53:55,594 DEBUG orcl orcl-20161020T123147 [2016-10-20 10:43:24 UTC] total=204MB unique=22MB clones=0 valid 337 | 2016-11-08 21:53:55,595 DEBUG orcl orcl-20161108T215112 [2016-11-08 20:51:12 UTC] total=207MB unique=39MB clones=0 valid 338 | 2016-11-08 21:53:55,595 DEBUG orcl orcl-20161108T215336 [2016-11-08 20:53:36 UTC] total=176MB unique=292kB clones=0 valid 339 | 2016-11-08 21:53:58,719 DEBUG orcl == Database configuration == 340 | ****Unable to retrieve Oracle Clusterware home. 341 | Start Oracle Clusterware stack and try again. 342 | 343 | == Service configuration == 344 | ****Unable to retrieve Oracle Clusterware home. 345 | Start Oracle Clusterware stack and try again. 346 | 347 | == Service configuration parsed == 348 | 349 | 2016-11-08 21:53:58,720 INFO orcl Write database parameters for autorestore 350 | 2016-11-08 21:53:58,724 DEBUG orcl SQL*Plus execution starts 351 | 352 | INSTANCE_NAME HOST_NAME VERSION STATUS DATABASE_STATUS 353 | ---------------- ------------------------ ---------- ------------ ----------------- 354 | orcl backup 12.1.0.2.0 OPEN ACTIVE 355 | 356 | 1 row selected. 357 | 358 | Elapsed: 00:00:00.00 359 | SYS 360 | 361 | 1 row selected. 362 | 363 | Elapsed: 00:00:00.00 364 | dbconfig-db_block_size: 8192 365 | dbconfig-compatible: 12.1.0.2.0 366 | dbconfig-db_files: 200 367 | dbconfig-undo_tablespace: UNDOTBS1 368 | dbconfig-db_name: orcl 369 | dbconfig-enable_pluggable_database: FALSE 370 | dbconfig-backup-finished: 2016-11-08 21:53:58 371 | Elapsed: 00:00:00.01 372 | 2016-11-08 21:53:58,803 DEBUG orcl SQL*Plus execution successful 373 | 2016-11-08 21:53:58,805 INFO orcl ------------ TOTAL ------------ 374 | 2016-11-08 21:53:58,806 INFO orcl Total execution time: 0:00:23.155297 375 | 2016-11-08 21:53:58,807 INFO orcl Execution started: 2016-11-08 21:53:35.650323 376 | 2016-11-08 21:53:58,808 INFO orcl Execution finished: 2016-11-08 21:53:58.805620 377 | -------------------------------------------------------------------------------- /netapp.py: -------------------------------------------------------------------------------- 1 | import os, operator 2 | from backupcommon import SnapHandler, Configuration, scriptpath, info, error, debug, UIElement 3 | from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError 4 | from datetime import datetime, timedelta 5 | from NaServer import * 6 | from time import sleep 7 | 8 | class Netapp(SnapHandler): 9 | _exceptionbase = "netapp" 10 | _blocksize = 1024 11 | _filer = None 12 | _cacert = None 13 | _multivol = False 14 | 15 | def _read_netapp_config(self, attribute, zfscredconfig): 16 | # Try reading Netapp configuration first from the credentials file, then fail over to main configuration file 17 | value = None 18 | try: 19 | return zfscredconfig.get('netapp', attribute) 20 | except (NoOptionError, NoSectionError) as e: 21 | pass 22 | try: 23 | return Configuration.get(attribute, 'netapp') 24 | except (NoOptionError, NoSectionError) as e: 25 | raise NoOptionError("Attribute %s not found" % attribute) 26 | 27 | def __init__(self, configname): 28 | zfscredfilename = os.path.join(scriptpath(), 'netappcredentials.cfg') 29 | if not os.path.isfile(zfscredfilename): 30 | raise Exception(self._exceptionbase, "Configuration file %s not found" % zfscredfilename) 31 | # Authentication information 32 | zfscredconfig = SafeConfigParser() 33 | zfscredconfig.read(zfscredfilename) 34 | # 35 | self._filer = self._read_netapp_config('filer', zfscredconfig) 36 | self._srv = NaServer(self._filer, 1, 1) 37 | # Check if CA certificate validation is needed 38 | try: 39 | self._cacert = os.path.join(scriptpath(), 'certs', self._read_netapp_config('cacert', zfscredconfig)) 40 | except: 41 | self._cacert = None 42 | if self._cacert: 43 | self._srv.set_ca_certs(self._cacert) 44 | self._srv.set_server_cert_verification(True) 45 | self._srv.set_hostname_verification(False) 46 | # 47 | self._srv.set_admin_user(zfscredconfig.get('netappcredentials','user'), zfscredconfig.get('netappcredentials','password')) 48 | try: 49 | self._mounthost = self._read_netapp_config('mounthost', zfscredconfig) 50 | except: 51 | self._mounthost = self._filer 52 | # Get volume name 53 | self._volprefix = self._read_netapp_config('volumeprefix', zfscredconfig) 54 | self._volname = [] 55 | self._volname.append("%s%s" % (self._volprefix, configname)) 56 | try: 57 | for suffix in Configuration.get('additionalvolumesuffixes').split(','): 58 | self._volname.append("%s%s%s" % (self._volprefix, configname, suffix)) 59 | self._multivol = True 60 | except NoOptionError: 61 | pass 62 | debug("List of all volumes for this database: %s" % self._volname) 63 | super(Netapp, self).__init__(configname) 64 | 65 | def _check_netapp_error(self, output, errmsg): 66 | if output.results_errno() != 0: 67 | raise Exception(self._exceptionbase, "%s. %s" % (errmsg, output.results_reason())) 68 | 69 | def _volsize_to_num(self, volsize): 70 | unit = volsize[-1].lower() 71 | factor = 1 72 | if unit == "m": 73 | factor = 20 74 | elif unit == "g": 75 | factor = 30 76 | elif unit == "t": 77 | factor = 40 78 | elif unit == "p": 79 | factor = 50 80 | return round(float(volsize[:-1])*(2**factor)) 81 | 82 | def _get_volume_info(self, volname): 83 | debug("Querying info for volume: %s" % volname) 84 | elem = NaElement("volume-get-iter") 85 | elem.child_add_string("max-records", "1") 86 | # 87 | query = NaElement("query") 88 | query_vol = NaElement('volume-attributes') 89 | query_volume_id = NaElement("volume-id-attributes") 90 | query_volume_id.child_add_string("name", volname) 91 | query_vol.child_add(query_volume_id) 92 | query.child_add(query_vol) 93 | elem.child_add(query) 94 | # 95 | attr = NaElement("desired-attributes") 96 | attr_vol = NaElement('volume-attributes') 97 | exp = NaElement("volume-export-attributes") 98 | exp.child_add(NaElement("policy")) 99 | attr_vol.child_add(exp) 100 | id = NaElement("volume-id-attributes") 101 | id.child_add(NaElement("containing-aggregate-name")) 102 | id.child_add(NaElement("junction-path")) 103 | attr_vol.child_add(id) 104 | attr_vol.child_add(NaElement('volume-clone-attributes')) 105 | attr.child_add(attr_vol) 106 | elem.child_add(attr) 107 | # 108 | #print(elem.sprintf()) 109 | output = self._srv.invoke_elem(elem) 110 | #print(output.sprintf()) 111 | self._check_netapp_error(output, "Getting volume information failed") 112 | attrlist = output.child_get("attributes-list") 113 | info = {} 114 | if (attrlist is not None and attrlist): 115 | for ss in attrlist.children_get(): 116 | info['aggregate'] = ss.child_get('volume-id-attributes').child_get_string('containing-aggregate-name') 117 | info['mountpoint'] = ss.child_get('volume-id-attributes').child_get_string('junction-path') 118 | info['export-policy'] = ss.child_get('volume-export-attributes').child_get_string('policy') 119 | cloneattr = ss.child_get('volume-clone-attributes') 120 | if cloneattr and cloneattr.child_get('volume-clone-parent-attributes'): 121 | info['origin'] = cloneattr.child_get('volume-clone-parent-attributes').child_get_string('name') 122 | else: 123 | info['origin'] = None 124 | return info 125 | 126 | def _dropvolume(self, volname): 127 | debug("Dropping volume: %s" % volname) 128 | output = self._srv.invoke("volume-unmount", "volume-name", volname) 129 | self._check_netapp_error(output, "Unmounting volume %s failed" % volname) 130 | sleep(10) 131 | output = self._srv.invoke("volume-offline", "name", volname) 132 | self._check_netapp_error(output, "Offlining volume %s failed" % volname) 133 | sleep(10) 134 | output = self._srv.invoke("volume-destroy", "name", volname) 135 | self._check_netapp_error(output, "Dropping volume %s failed" % volname) 136 | 137 | # Public interfaces 138 | def filesystem_info(self, filesystemname=None): 139 | sourceinfo = self._get_volume_info(filesystemname) 140 | info = {} 141 | info['origin'] = sourceinfo['origin'] 142 | info['clonename'] = filesystemname 143 | info['mountpoint'] = sourceinfo['mountpoint'] 144 | return info 145 | 146 | def listclones(self): 147 | # TODO: support multiple volumes - self._volname as array 148 | elem = NaElement("volume-clone-get-iter") 149 | elem.child_add_string("max-records", "50") 150 | query = NaElement("query") 151 | query.child_add_string("parent-volume", self._volname[0]) 152 | elem.child_add(query) 153 | # 154 | output = self._srv.invoke_elem(elem) 155 | self._check_netapp_error(output, "List clones failed") 156 | attrlist = output.child_get("attributes-list") 157 | if (attrlist is not None and attrlist): 158 | for ss in attrlist.children_get(): 159 | info = {} 160 | info['origin'] = ss.child_get_string('parent-volume') 161 | info['clonename'] = ss.child_get_string('volume') 162 | info['mountpoint'] = ss.child_get_string('junction-path') 163 | yield info 164 | 165 | def mountstring(self, filesystemname): 166 | info = self.filesystem_info(filesystemname) 167 | return "%s:%s" % (self._mounthost, info['mountpoint']) 168 | 169 | def snap(self): 170 | snapname = "%s_%s" % (self.configname, datetime.now().strftime('%Y%m%dT%H%M%S')) 171 | debug("Snapshot name: %s" % snapname) 172 | for vol in self._volname: 173 | debug("Snapshotting volume: %s" % vol) 174 | output = self._srv.invoke("snapshot-create", "volume", vol, "snapshot", snapname) 175 | self._check_netapp_error(output, "Creating snapshot failed") 176 | return snapname 177 | 178 | def dropsnap(self, snapid): 179 | for counter, vol in enumerate(self._volname): 180 | debug("Dropping snapshot on volume: %s" % vol) 181 | output = self._srv.invoke("snapshot-delete", "volume", vol, "snapshot", snapid) 182 | if counter == 0: 183 | # Only check netapp error on the first volume, because user may add volumes later and on the new volumes older snapshot ID-s do not exist 184 | self._check_netapp_error(output, "Failed to drop snapshot %s" % snapid) 185 | 186 | def getsnapinfo(self, snapstruct): 187 | return snapstruct 188 | 189 | def listsnapshots(self, sortbycreation=False, sortreverse=False): 190 | snapshots = [] 191 | for volname in self._volname: 192 | debug("Getting volume info for volume: %s" % volname) 193 | output = self._srv.invoke("volume-size", "volume", volname) 194 | self._check_netapp_error(output, "Failed to get volume size information") 195 | volsize = self._volsize_to_num(output.child_get_string("volume-size")) 196 | pct_limit = round(2147483648*100/(volsize/self._blocksize)) 197 | output = self._srv.invoke("snapshot-list-info", "volume", volname) 198 | self._check_netapp_error(output, "Failed to list snapshots") 199 | snapshotlist = output.child_get("snapshots") 200 | if (snapshotlist is not None and snapshotlist): 201 | for ss in snapshotlist.children_get(): 202 | snapinfo = {'id': ss.child_get_string("name"), 203 | 'creation': datetime.utcfromtimestamp(float(ss.child_get_int("access-time"))), 204 | 'numclones': 1 if ss.child_get_string("busy") == "true" else 0, 205 | 'space_total': ss.child_get_int("cumulative-total")*self._blocksize if ss.child_get_int("cumulative-percentage-of-total-blocks") < pct_limit else round(volsize*ss.child_get_int("cumulative-percentage-of-total-blocks")/100), 206 | 'space_unique': ss.child_get_int("total")*self._blocksize if ss.child_get_int("percentage-of-total-blocks") < pct_limit else round(volsize*ss.child_get_int("percentage-of-total-blocks")/100) 207 | } 208 | try: 209 | existingitem = next(item for item in snapshots if item['id'] == snapinfo['id']) 210 | existingitem['space_total'] += snapinfo['space_total'] 211 | existingitem['space_unique'] += snapinfo['space_unique'] 212 | existingitem['numclones'] = max(existingitem['numclones'], snapinfo['numclones']) 213 | except StopIteration: 214 | snapshots.append(snapinfo) 215 | if not sortbycreation: 216 | return snapshots 217 | else: 218 | return sorted(snapshots, key=operator.itemgetter('creation'), reverse=sortreverse) 219 | 220 | def clone(self, snapid, clonename): 221 | # Create root namespace 222 | if self._multivol: 223 | debug("Multivolume mode enabled") 224 | junction_prefix = "/%s" % clonename 225 | # Query current volume to get aggregate and export_policy 226 | volinfo = self._get_volume_info(self._volname[0]) 227 | aggregate = volinfo['aggregate'] 228 | export_policy = volinfo['export-policy'] 229 | # Create new volume just for namespace root 230 | debug("Creating volume %s with junction path %s" % (clonename, junction_prefix)) 231 | output = self._srv.invoke( 232 | "volume-create", "volume", clonename, "containing-aggr-name", aggregate, "efficiency-policy", "default", "export-policy", export_policy, 233 | "group-id", os.getgid(), "user-id", os.getuid(), "unix-permissions", "0770", 234 | "junction-path", junction_prefix, "percentage-snapshot-reserve", 0, "size", "1G", "volume-state", "online") 235 | self._check_netapp_error(output, "Creating new root namespace volume failed") 236 | else: 237 | debug("Multivolume mode disabled") 238 | junction_prefix = "" 239 | # Create the clones 240 | for counter, vol in enumerate(self._volname): 241 | cname = clonename if not self._multivol else "%s%d" % (clonename, counter) 242 | debug("Cloning volume %s from snapshot %s as volume %s with junction path %s/%s" % (vol, snapid, cname, junction_prefix, cname)) 243 | output = self._srv.invoke("volume-clone-create", "parent-volume", vol, "parent-snapshot", snapid, "volume", cname) 244 | self._check_netapp_error(output, "Creating clone failed") 245 | output = self._srv.invoke("volume-mount", "junction-path", "%s/%s" % (junction_prefix, cname), "volume-name", cname) 246 | self._check_netapp_error(output, "Mounting clone failed") 247 | output = self._srv.invoke("volume-set-option", "option-name", "nosnapdir", "option-value", "on", "volume", cname) 248 | self._check_netapp_error(output, "Setting attribute on clone failed") 249 | 250 | def dropclone(self, cloneid): 251 | info = self.filesystem_info(cloneid if not self._multivol else cloneid + "0") 252 | if info['origin'] != self._volname[0]: 253 | raise Exception(self._exceptionbase, "This clone does not belong to parent %s" % self._volname) 254 | for counter, vol in enumerate(self._volname): 255 | self._dropvolume(cloneid if not self._multivol else "%s%d" % (cloneid, counter)) 256 | # Drop the root namespace 257 | if self._multivol: 258 | self._dropvolume(cloneid) 259 | 260 | def createvolume(self): 261 | uid = os.getuid() 262 | gid = os.getgid() 263 | permissions = "0770" 264 | # 265 | ui = UIElement() 266 | aggregate = ui.ask_string("Aggregate name:", 50) 267 | volume_size = ui.ask_size("Volume size:") 268 | path = ui.ask_string("Parent namespace:", 50) 269 | export_policy = ui.ask_string("Export policy:", 50) 270 | # 271 | output = self._srv.invoke( 272 | "volume-create", "volume", self._volname[0], "containing-aggr-name", aggregate, "efficiency-policy", "default", "export-policy", export_policy, 273 | "group-id", gid, "user-id", os.getuid(), "unix-permissions", permissions, 274 | "junction-path", os.path.join(path, self.configname), "percentage-snapshot-reserve", 0, "size", volume_size, "volume-state", "online") 275 | self._check_netapp_error(output, "Creating volume failed") 276 | # 277 | rootelem = NaElement("volume-modify-iter") 278 | attrelem1 = NaElement("attributes") 279 | attrelem = NaElement("volume-attributes") 280 | attrelem1.child_add(attrelem) 281 | queryelem1 = NaElement("query") 282 | queryelem = NaElement("volume-attributes") 283 | queryelem1.child_add(queryelem) 284 | volid = NaElement("volume-id-attributes") 285 | volid.child_add_string("name", self._volname[0]) 286 | queryelem.child_add(volid) 287 | snapattr = NaElement("volume-snapshot-attributes") 288 | snapattr.child_add_string("auto-snapshots-enabled", "false") 289 | snapattr.child_add_string("snapdir-access-enabled", "false") 290 | autosizeattr = NaElement("volume-autosize-attributes") 291 | autosizeattr.child_add_string("mode", "grow") 292 | attrelem.child_add(snapattr) 293 | attrelem.child_add(autosizeattr) 294 | rootelem.child_add(attrelem1) 295 | rootelem.child_add(queryelem1) 296 | rootelem.child_add_string("max-records", "1") 297 | output = self._srv.invoke_elem(rootelem) 298 | self._check_netapp_error(output, "Setting volume options failed.") 299 | print "Volume created. Please disable automatic snapshot creation through GUI, for some reason it does not work through API." 300 | -------------------------------------------------------------------------------- /netappcredentials.cfg.sample: -------------------------------------------------------------------------------- 1 | [netappcredentials] 2 | user: login_username_here 3 | password: login_password_here 4 | -------------------------------------------------------------------------------- /oraexec.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | from subprocess import Popen, PIPE 3 | from backupcommon import BackupLogger, info, debug, error, exception 4 | from datetime import datetime, timedelta 5 | from tempfile import mkstemp, TemporaryFile 6 | 7 | class OracleExec(object): 8 | oraclehome = None 9 | tnspath = None 10 | oraclesid = None 11 | 12 | def __init__(self, oraclehome, tnspath, sid=None): 13 | self.oraclehome = oraclehome 14 | self.tnspath = tnspath 15 | if sid is not None: 16 | self.oraclesid = sid 17 | debug("Oracle home: %s" % self.oraclehome) 18 | 19 | def _setenv(self): 20 | if self.oraclesid is None and os.environ.get('ORACLE_SID'): 21 | del os.environ['ORACLE_SID'] 22 | if self.oraclesid is not None: 23 | os.environ['ORACLE_SID'] = self.oraclesid 24 | os.environ['ORACLE_HOME'] = self.oraclehome 25 | os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' 26 | os.environ['TNS_ADMIN'] = self.tnspath 27 | 28 | def rman(self, finalscript): 29 | self._setenv() 30 | debug("RMAN execution starts") 31 | BackupLogger.close() 32 | starttime = datetime.now() 33 | with TemporaryFile() as f: 34 | p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), "log", BackupLogger.logfile, "append"], stdout=f, stderr=f, stdin=PIPE) 35 | # Send the script to RMAN 36 | p.communicate(input=finalscript) 37 | endtime = datetime.now() 38 | BackupLogger.init() 39 | debug("RMAN execution time %s" % (endtime-starttime)) 40 | # If RMAN exists with any code except 0, then there was some error 41 | if p.returncode != 0: 42 | error("RMAN execution failed with code %d" % p.returncode) 43 | raise Exception('rman', "RMAN exited with code %d" % p.returncode) 44 | else: 45 | debug("RMAN execution successful") 46 | 47 | def sqlplus(self, finalscript, silent=False): 48 | self._setenv() 49 | with TemporaryFile() as f: 50 | args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] 51 | if silent: 52 | args.append('-S') 53 | args.append('/nolog') 54 | debug("SQL*Plus execution starts") 55 | BackupLogger.close() 56 | p = Popen(args, stdout=f, stderr=f, stdin=PIPE) 57 | p.communicate(input=finalscript) 58 | BackupLogger.init() 59 | if p.returncode != 0: 60 | error("SQL*Plus exited with code %d" % p.returncode) 61 | raise Exception('sqlplus', "sqlplus exited with code %d" % p.returncode) 62 | else: 63 | debug("SQL*Plus execution successful") 64 | if silent: 65 | f.seek(0,0) 66 | return f.read() 67 | 68 | def sqlldr(self, login, finalscript): 69 | self._setenv() 70 | debug("SQLLDR execution starts") 71 | f1 = mkstemp(suffix=".ctl") 72 | ftmp = os.fdopen(f1[0], "w") 73 | ftmp.write(finalscript) 74 | ftmp.close() 75 | f2 = mkstemp(suffix=".log") 76 | os.close(f2[0]) 77 | with TemporaryFile() as f: 78 | p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, "control=%s" % f1[1], "log=%s" % f2[1], "errors=0", "silent=all"], stdout=f, stderr=None, stdin=None) 79 | p.communicate() 80 | if p.returncode != 0: 81 | error("SQLLDR exited with code %d" % p.returncode) 82 | raise Exception('sqlldr', "sqlldr exited with code %d" % p.returncode) 83 | else: 84 | debug("SQLLDR execution successful") 85 | os.unlink(f1[1]) 86 | os.unlink(f2[1]) 87 | 88 | def adrci(self, inputscriptfilename, outputfilehandle): 89 | self._setenv() 90 | p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), "script=%s" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) 91 | p.wait() 92 | if p.returncode != 0: 93 | raise Exception('adrci','Exit code was not 0.') 94 | -------------------------------------------------------------------------------- /report.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, sys, json 4 | from backupcommon import scriptpath, Configuration, BackupLogger, BackupTemplate, info, error, debug, exception, create_snapshot_class 5 | from tempfile import mkstemp 6 | from oraexec import OracleExec 7 | 8 | def printhelp(): 9 | print "Usage: report.py [comma separated list of databases]" 10 | sys.exit(2) 11 | 12 | if len(sys.argv) not in [1,2]: 13 | printhelp() 14 | 15 | # Directory where the executable script is located 16 | scriptpath = scriptpath() 17 | 18 | # Read configuration 19 | logf = mkstemp(prefix='backupreport-', suffix='.log') 20 | os.close(logf[0]) 21 | Configuration.init('generic') 22 | BackupLogger.init(logf[1], 'reporting') 23 | Configuration.substitutions.update( {'logfile': BackupLogger.logfile, 'autorestorecatalog': Configuration.get('autorestorecatalog', 'autorestore')} ) 24 | reporttemplate = BackupTemplate('reporttemplate.cfg') 25 | 26 | def exec_sqlplus(oraexec, script, header = 'sqlplusheader'): 27 | finalscript = "%s\n%s\n%s" % (reporttemplate.get(header), script, reporttemplate.get('sqlplusfooter')) 28 | output = oraexec.sqlplus(finalscript, silent=True) 29 | for line in output.splitlines(): 30 | if line.startswith('OUTLOG: '): 31 | yield(line.strip()[8:]) 32 | 33 | def process_database(dbname): 34 | Configuration.defaultsection = dbname 35 | Configuration.substitutions.update({'dbname': dbname}) 36 | oraexec = OracleExec(oraclehome=Configuration.get('oraclehome', 'generic'), tnspath=os.path.join(scriptpath, Configuration.get('tnsadmin', 'generic'))) 37 | # Read job status information from the database 38 | jobinfo = {} 39 | for line in exec_sqlplus(oraexec, reporttemplate.get('jobstatus')): 40 | j = json.loads(line) 41 | if j["type"] == "job": 42 | if j["job_name"] == "ARCHLOGBACKUP_JOB": 43 | jobinfo["archlog"] = j 44 | elif j["job_name"] == "IMAGECOPY_JOB": 45 | jobinfo["imagecopy"] = j 46 | elif j["type"] == "exec": 47 | if j["job_name"] == "ARCHLOGBACKUP_JOB": 48 | jobinfo["archlogexec"] = j 49 | elif j["job_name"] == "IMAGECOPY_JOB": 50 | jobinfo["imagecopyexec"] = j 51 | # Read snapshot information 52 | zfs = create_snapshot_class(dbname) 53 | snaps = zfs.listsnapshots(True, True) 54 | # Autorestore information 55 | autorestoreinfo = None 56 | try: 57 | for line in exec_sqlplus(oraexec, reporttemplate.get('autorestorestatus'), 'sqlplusautorestoreheader'): 58 | autorestoreinfo = json.loads(line) 59 | except: 60 | pass 61 | # Print output 62 | print "%s:" % dbname 63 | try: 64 | print " Backup job: %s, last: %s, duration: %s, last failure: %s" % (jobinfo['imagecopy']['state'], jobinfo['imagecopy']['last_start_date'], jobinfo['imagecopy']['last_run_duration'], jobinfo['imagecopyexec']['last_failed']) 65 | print " Archivelog job: %s, last: %s, duration: %s, last failure: %s" % (jobinfo['archlog']['state'], jobinfo['archlog']['last_start_date'], jobinfo['archlog']['last_run_duration'], jobinfo['archlogexec']['last_failed']) 66 | if len(snaps) > 0: 67 | firstsnap = zfs.getsnapinfo(snaps[0]) 68 | lastsnap = zfs.getsnapinfo(snaps[-1]) 69 | print " Snapshots: %d, latest: %s, oldest: %s" % (len(snaps), firstsnap["creation"], lastsnap["creation"]) 70 | else: 71 | print " Snapshots: none" 72 | if autorestoreinfo is not None: 73 | print " Last successful restore: %s, last restore failure: %s, last successful validation: %s, avg difference from target (s): %d, avg restore time (min): %d" % (autorestoreinfo["last_success"], autorestoreinfo["last_fail"], autorestoreinfo["last_validated"], autorestoreinfo["avgdiff"], autorestoreinfo["avgrestoremin"]) 74 | except: 75 | print " Error getting information." 76 | 77 | excludelist = ['generic','rman','zfssa','autorestore'] 78 | includelist = [] 79 | if len(sys.argv) == 2: 80 | includelist = sys.argv[1].split(",") 81 | 82 | # Loop through all sections 83 | for dbname in Configuration.sections(): 84 | if dbname not in excludelist and (len(includelist) == 0 or dbname in includelist): 85 | process_database(dbname) 86 | -------------------------------------------------------------------------------- /reporttemplate.cfg: -------------------------------------------------------------------------------- 1 | [template] 2 | sqlplusheader: -- Running header 3 | set pages 0 4 | set echo on 5 | spool ${logfile} append 6 | conn /@${dbname} as sysdba 7 | alter session set nls_timestamp_tz_format='yyyy-mm-dd hh24:mi:ss TZH:TZM'; 8 | set lines 500 9 | sqlplusautorestoreheader: -- Running header 10 | set pages 0 11 | set echo on 12 | spool ${logfile} append 13 | conn ${autorestorecatalog} 14 | alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'; 15 | alter session set NLS_NUMERIC_CHARACTERS="."; 16 | set lines 500 17 | sqlplusfooter: spool off 18 | exit 19 | jobstatus: SELECT 'OUTLOG: { "type": "job", "job_name": "'||job_name||'", "state": "'||state||'", "failure_count": '||failure_count||', "last_start_date": "'||last_start_date||'", "last_run_duration": "'||last_run_duration||'", "next_run_date": "'||next_run_date||'" }' 20 | FROM dba_scheduler_jobs 21 | WHERE owner like '%%BACKUPEXEC' AND job_name IN ('IMAGECOPY_JOB', 'ARCHLOGBACKUP_JOB') 22 | UNION ALL 23 | SELECT 'OUTLOG: { "type": "exec", "job_name": "'||job_name||'", "last_successful": "'||MAX (CASE WHEN status = 'SUCCEEDED' THEN log_date END)||'", "last_failed": "'||MAX (CASE WHEN status = 'FAILED' THEN log_date END)||'" }' 24 | FROM dba_scheduler_job_log 25 | WHERE owner like '%%BACKUPEXEC' AND operation = 'RUN' AND job_name IN ('IMAGECOPY_JOB', 'ARCHLOGBACKUP_JOB') 26 | GROUP BY job_name; 27 | autorestorestatus: SELECT 'OUTLOG: { "avgrestoremin": '||round(AVG (CASE WHEN success = 1 AND validated_corruption = 0 THEN finish_time - start_time END) * 24 * 60)||', "last_success": "'||MAX (CASE WHEN success = 1 THEN start_time END)|| 28 | '","last_fail": "'||MAX (CASE WHEN success = 0 THEN start_time END)||'","avgdiff":'||round(AVG (CASE WHEN success = 1 THEN verificationtimediffseconds END))||',"last_validated": "'|| 29 | MAX (CASE WHEN success = 1 AND validated_corruption = 1 THEN start_time END)||'"}' 30 | FROM restoreaction 31 | WHERE db_unique_name = '${dbname}'; 32 | -------------------------------------------------------------------------------- /restore.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import os, sys, errno, pytz 4 | from datetime import datetime 5 | from backupcommon import BackupLock, BackupLogger, info, debug, error, exception, Configuration, scriptpath, UIElement 6 | from restorecommon import RestoreDB 7 | from tempfile import TemporaryFile 8 | from tzlocal import get_localzone 9 | 10 | def printheader(): 11 | print "This utility will start a copy of a database restored to a specified point in time." 12 | print "THIS UTILITY SHOULD NOT BE RUN ON A SYSTEM WITH ACCESS TO PRODUCTION STORAGE!" 13 | print "THIS UTILITY IS NOT INTENDED FOR EMERGENCY PRODUCTION RESTORE." 14 | print "" 15 | 16 | def printhelp(): 17 | print "Usage: restore.py " 18 | sys.exit(2) 19 | 20 | 21 | def ask_user_input(): 22 | global restoreparams, exitvalue 23 | 24 | is_safe = ui.ask_yn("Is this system isolated with no access to production database storage") 25 | if is_safe != "Y": 26 | print "Exiting. Please execute this script in an isolated environment." 27 | exitvalue = 1 28 | return 29 | restoreparams['mountpath'] = ui.ask_directory("Directory where to mount clone:", False) 30 | restoreparams['timepoint'] = ui.ask_timestamp("Restore database to time point") 31 | is_utc = ui.ask_yn("Was the timestamp in UTC (answer N for local time)") 32 | if is_utc == "Y": 33 | tz = pytz.utc 34 | else: 35 | tz = get_localzone() 36 | restoreparams['timepoint'] = tz.localize(restoreparams['timepoint']) 37 | restore.set_restore_target_time(restoreparams['timepoint']) 38 | restoreparams['sid'] = ui.ask_string("Target instance name:", 8, True) 39 | # 40 | splitter = "######################################" 41 | print splitter 42 | print "" 43 | print "Database unique name: %s" % configname 44 | print "Oracle home: %s" % Configuration.get("oraclehome", "generic") 45 | print "Clone mount path: %s" % restoreparams['mountpath'] 46 | print "Target instance SID: %s" % restoreparams['sid'] 47 | print "Restore target time UTC: %s" % restoreparams['timepoint'].astimezone(pytz.utc) 48 | print "Restore target time local: %s" % restoreparams['timepoint'].astimezone(get_localzone()) 49 | print "Restored from snapshot: %s" % restore.sourcesnapid 50 | # 51 | print "" 52 | is_ok = ui.ask_yn("Are these parameters correct") 53 | if is_ok != "Y": 54 | print "Exiting. Please execute this script again." 55 | exitvalue = 1 56 | return 57 | print "" 58 | print splitter 59 | 60 | def exec_restore(): 61 | global exitvalue 62 | 63 | restore.clone(False) 64 | print "Please execute the following command as root to mount the backup volume:" 65 | print "" 66 | print "mount -t nfs -o rw,bg,hard,nointr,rsize=32768,wsize=32768,tcp,vers=3,timeo=600 %s %s" % (restore.mountstring, restoreparams['mountpath']) 67 | print "" 68 | while ui.ask_yn("Did you execute it") == "N": 69 | print "Please execute it then." 70 | # 71 | debug("Oracle home: %s" % Configuration.get("oraclehome", "generic")) 72 | debug("Clone mount path: %s" % restoreparams['mountpath']) 73 | debug("Target instance SID: %s" % restoreparams['sid']) 74 | debug("Restore target time UTC: %s" % restoreparams['timepoint'].astimezone(pytz.utc)) 75 | debug("Restore target time local: %s" % restoreparams['timepoint'].astimezone(get_localzone())) 76 | debug("Restored from snapshot: %s" % restore.sourcesnapid) 77 | info("Starting database restore") 78 | # 79 | try: 80 | restore.pit_restore(restoreparams['mountpath'], restoreparams['sid']) 81 | restore.verify(False) 82 | info("Database restore complete") 83 | info("SID: %s" % restoreparams['sid']) 84 | info("Requested target time: %s" % restoreparams['timepoint'].astimezone(get_localzone())) 85 | info("Verified restored database time: %s" % restore.verifytime) 86 | info("Difference from target: %s" % restore.verifydiff) 87 | except: 88 | exception("Database restore failed") 89 | exitvalue = 1 90 | print "" 91 | print "Commands to clean up:" 92 | print "1. Shut down database instance %s" % restoreparams['sid'] 93 | print "2. Execute as root: umount %s" % restoreparams['mountpath'] 94 | print "3. Drop clone: BACKUPCONFIG=%s %s %s dropclone %s" % (os.path.basename(Configuration.configfilename), 95 | os.path.join(scriptpath(), 'zsnapper.py'), configname, restore.clonename) 96 | 97 | # Main UI 98 | 99 | exitvalue = 0 100 | restoreparams = {} 101 | printheader() 102 | if len(sys.argv) not in [3]: 103 | printhelp() 104 | 105 | if os.geteuid() == 0: 106 | print "No, I will not run as root." 107 | sys.exit(0) 108 | 109 | ui = UIElement() 110 | configname = sys.argv[2] 111 | Configuration.init(defaultsection=configname, configfilename=sys.argv[1], additionaldefaults={'customverifydate': 'select max(time_dp) from sys.smon_scn_time', 112 | 'autorestoreenabled': '1', 'autorestoreinstancenumber': '1', 'autorestorethread': '1'}) 113 | 114 | BackupLogger.init('/tmp/restore_%s_%s.log' % (datetime.now().strftime('%Y%m%dT%H%M%S'), configname), configname) 115 | BackupLogger.clean() 116 | Configuration.substitutions.update({ 117 | 'logdir': '/tmp', 118 | 'logfile': BackupLogger.logfile 119 | }) 120 | print "Session log file: %s" % BackupLogger.logfile 121 | 122 | restore = RestoreDB(configname) 123 | 124 | ask_user_input() 125 | if exitvalue == 0: 126 | exec_restore() 127 | 128 | sys.exit(exitvalue) 129 | -------------------------------------------------------------------------------- /restorecommon.py: -------------------------------------------------------------------------------- 1 | import os, sys, pytz 2 | from tempfile import mkstemp, TemporaryFile, mkdtemp 3 | from datetime import datetime, timedelta 4 | from backupcommon import BackupLogger, info, debug, error, exception, Configuration, BackupTemplate, create_snapshot_class, scriptpath 5 | from oraexec import OracleExec 6 | from ConfigParser import SafeConfigParser 7 | from subprocess import check_call 8 | from tzlocal import get_localzone 9 | 10 | class RestoreDB(object): 11 | _restoretemplate = None 12 | _sourcesnapid = 'unknown' 13 | _mountdest = None 14 | _restoredest = None 15 | _exec = None 16 | _snap = None 17 | _dbparams = {} 18 | _configname = None 19 | _validatecorruption = False 20 | verifyseconds = -1 21 | sourcesnapid = "" 22 | _successful_clone = False 23 | _successful_mount = False 24 | _mountpaths = [] 25 | targettime = None 26 | 27 | def __init__(self, configname): 28 | self._restoretemplate = BackupTemplate('restoretemplate.cfg') 29 | self._configname = configname 30 | self._snap = create_snapshot_class(configname) 31 | 32 | def set_mount_path(self, mountdest): 33 | if mountdest is None or not os.path.exists(mountdest) or not os.path.isdir(mountdest): 34 | raise Exception('restore', "Mount directory %s not found or is not a proper directory" % mountdest) 35 | self._mountdest = mountdest 36 | self._root_mountdest = mountdest 37 | 38 | def set_restore_path(self, restoredest): 39 | if restoredest is None or not os.path.exists(restoredest) or not os.path.isdir(restoredest): 40 | raise Exception('restore', "Restore directory %s not found or is not a proper directory" % restoredest) 41 | self._restoredest = restoredest 42 | 43 | def set_restore_target_time(self, targettime): 44 | if targettime.tzinfo is None: 45 | raise Exception('restore', 'set_restore_target_time expects a datetime object with time zone information') 46 | self.targettime = targettime 47 | self.sourcesnapid = self._snap.search_recovery_snapid(targettime.astimezone(pytz.utc)) 48 | if self.sourcesnapid is None: 49 | raise Exception('restore', 'Suitable snapshot not found. If requested time is after the latest backup was taken, please use zsnapper.py to create a new snapshot first.') 50 | 51 | # Helpers for executing Oracle commands 52 | def _exec_rman(self, commands): 53 | finalscript = "%s\n%s\n%s" % (self._restoretemplate.get('rmanheader'), commands, self._restoretemplate.get('rmanfooter')) 54 | self._exec.rman(finalscript) 55 | 56 | def _exec_sqlplus(self, commands, headers=True, returnoutput=False): 57 | if headers: 58 | finalscript = "%s\n%s\n%s" % (self._restoretemplate.get('sqlplusheader'), commands, self._restoretemplate.get('sqlplusfooter')) 59 | else: 60 | finalscript = commands 61 | return self._exec.sqlplus(finalscript, silent=returnoutput) 62 | 63 | # Restore actions 64 | def _createinitora(self): 65 | filename = self._initfile 66 | with open(filename, 'w') as f: 67 | contents = self._restoretemplate.get('autoinitora') 68 | if 'cdb' in Configuration.substitutions and Configuration.substitutions['cdb'].upper() == 'TRUE': 69 | contents+= self._restoretemplate.get('cdbinitora') 70 | debug("ACTION: Generated init file %s\n%s" % (filename, contents)) 71 | f.write(contents) 72 | 73 | def clone(self, autorestore=True): 74 | if autorestore: 75 | self.sourcesnapid = self._snap.autoclone() 76 | else: 77 | self.clonename = "restore_%s_%s" % (self._configname, datetime.now().strftime("%Y%m%d_%H%M%S")) 78 | self._snap.clone(self.sourcesnapid, self.clonename) 79 | self.mountstring = self._snap.mountstring(self.clonename) 80 | self._successful_clone = True 81 | 82 | def _mount(self): 83 | check_call(['mount', self._root_mountdest]) 84 | self._successful_mount = True 85 | 86 | def _unmount(self): 87 | check_call(['umount', self._root_mountdest]) 88 | 89 | def _set_parameters(self): 90 | # Detect if mountpath is actually a namespace containing multiple volumes 91 | orig_mount_dest = self._mountdest 92 | if not os.path.isfile(os.path.join(orig_mount_dest, 'autorestore.cfg')): 93 | location_correct = False 94 | for item in os.listdir(orig_mount_dest): 95 | item_full_path = os.path.join(orig_mount_dest, item) 96 | if os.path.isdir(item_full_path): 97 | self._mountpaths.append(item_full_path) 98 | if os.path.isfile(os.path.join(item_full_path, 'autorestore.cfg')): 99 | location_correct = True 100 | self._mountdest = item_full_path 101 | if not location_correct: 102 | raise Exception('restore', 'Mount path is not correct, autorestore.cfg was not found') 103 | else: 104 | self._mountpaths.append(self._mountdest) 105 | debug("All datafile mountpaths: %s" % self._mountpaths) 106 | debug("Main datafile mountpath: %s" % self._mountdest) 107 | debug("Location for temporary init.ora and other files: %s" % self._restoredest) 108 | # 109 | dbconfig = SafeConfigParser() 110 | dbconfig.read(os.path.join(self._mountdest, 'autorestore.cfg')) 111 | self._dbparams['dbname'] = dbconfig.get('dbparams','db_name') 112 | if self.targettime is None: 113 | self._dbparams['restoretarget'] = datetime.strptime(dbconfig.get('dbparams','lasttime'), '%Y-%m-%d %H:%M:%S') 114 | else: 115 | self._dbparams['restoretarget'] = self.targettime.astimezone(get_localzone()) 116 | self._dbparams['bctfile'] = dbconfig.get('dbparams','bctfile') 117 | catalogstatements = [] 118 | for item in self._mountpaths: 119 | catalogstatements.append("catalog start with '%s/archivelog/' noprompt;" % item) 120 | catalogstatements.append("catalog start with '%s/data_' noprompt;" % item) 121 | Configuration.substitutions.update({ 122 | 'db_name': self._dbparams['dbname'], 123 | 'db_compatible': dbconfig.get('dbparams','compatible'), 124 | 'db_files': dbconfig.get('dbparams','db_files'), 125 | 'db_undotbs': dbconfig.get('dbparams','undo_tablespace'), 126 | 'db_block_size': dbconfig.get('dbparams','db_block_size'), 127 | # 'lastscn': dbconfig.get('dbparams','lastscn'), 128 | 'lasttime': self._dbparams['restoretarget'].strftime('%Y-%m-%d %H:%M:%S'), 129 | 'dbid': Configuration.get('dbid', self._configname), 130 | 'instancenumber': Configuration.get('autorestoreinstancenumber', self._configname), 131 | 'thread': Configuration.get('autorestorethread', self._configname), 132 | 'pga_size': Configuration.get('pga_size', 'autorestore'), 133 | 'sga_size': Configuration.get('sga_size', 'autorestore'), 134 | 'backupfinishedtime': dbconfig.get('dbparams','backup-finished'), 135 | 'bctfile': self._dbparams['bctfile'], 136 | 'autorestoredestination': self._restoredest, 137 | 'mountdestination': self._mountdest, 138 | 'catalogstatements': "\n".join(catalogstatements) 139 | }) 140 | try: 141 | Configuration.substitutions.update({'cdb': dbconfig.get('dbparams','enable_pluggable_database')}) 142 | except: 143 | Configuration.substitutions.update({'cdb': 'FALSE'}) 144 | self._initfile = os.path.join(self._restoredest, 'init.ora') 145 | Configuration.substitutions.update({ 146 | 'initora': self._initfile, 147 | }) 148 | 149 | def _run_restore(self): 150 | debug('ACTION: startup nomount') 151 | self._exec_sqlplus(self._restoretemplate.get('startupnomount')) 152 | debug('ACTION: mount database and catalog files') 153 | self._exec_rman(self._restoretemplate.get('rmanmount')) 154 | self._exec_sqlplus(self._restoretemplate.get('clearlogs')) 155 | self._exec_rman(self._restoretemplate.get('rmancatalog')) 156 | if self._dbparams['bctfile']: 157 | debug('ACTION: disable block change tracking') 158 | self._exec_sqlplus(self._restoretemplate.get('disablebct')) 159 | debug('ACTION: create missing datafiles') 160 | output = self._exec_sqlplus(self._restoretemplate.get('switchdatafiles'), returnoutput=True) 161 | switchdfscript = "" 162 | for line in output.splitlines(): 163 | if line.startswith('RENAMEDF-'): 164 | switchdfscript+= "%s\n" % line.strip()[9:] 165 | debug('ACTION: switch and recover') 166 | self._exec_rman("%s\n%s" % (switchdfscript, self._restoretemplate.get('recoverdatafiles'))) 167 | 168 | # Orchestrator 169 | def pit_restore(self, mountpath, sid): 170 | self.set_mount_path(mountpath) 171 | self.set_restore_path(mkdtemp(prefix="restore", dir=self._mountdest)) 172 | # 173 | self._restoresid = sid 174 | self._set_parameters() 175 | self._createinitora() 176 | self._exec = OracleExec(oraclehome=Configuration.get('oraclehome', 'generic'), 177 | tnspath=os.path.join(scriptpath(), Configuration.get('tnsadmin', 'generic')), 178 | sid=sid) 179 | self._run_restore() 180 | 181 | def run(self): 182 | self.starttime = datetime.now() 183 | info("Starting to restore") 184 | # 185 | success = False 186 | self.clone() 187 | try: 188 | self._mount() 189 | except: 190 | self.cleanup() 191 | raise Exception('restore', 'Mount failed') 192 | self._set_parameters() 193 | self._createinitora() 194 | self._exec = OracleExec(oraclehome=Configuration.get('oraclehome', 'generic'), 195 | tnspath=os.path.join(scriptpath(), Configuration.get('tnsadmin', 'generic')), 196 | sid=self._dbparams['dbname']) 197 | # 198 | self._run_restore() 199 | 200 | def verify(self, tolerancechecking=True): 201 | debug('ACTION: opening database to verify the result') 202 | if tolerancechecking: 203 | maxtolerance = timedelta(minutes=int(Configuration.get('autorestoremaxtoleranceminutes','autorestore'))) 204 | Configuration.substitutions.update({ 205 | 'customverifydate': Configuration.get('customverifydate', self._configname), 206 | }) 207 | output = self._exec_sqlplus(self._restoretemplate.get('openandverify'), returnoutput=True) 208 | for line in output.splitlines(): 209 | if line.startswith('CUSTOM VERIFICATION TIME:'): 210 | self.verifytime = datetime.strptime(line.split(':', 1)[1].strip(), '%Y-%m-%d %H:%M:%S') 211 | if self.verifytime is None: 212 | raise Exception('restore', 'Reading verification time failed.') 213 | self.verifydiff = self._dbparams['restoretarget'].replace(tzinfo=None) - self.verifytime 214 | self.verifyseconds = int(self.verifydiff.seconds + self.verifydiff.days * 24 * 3600) 215 | debug("Expected time: %s" % self._dbparams['restoretarget']) 216 | debug("Verified time: %s" % self.verifytime) 217 | debug("VERIFY: Time difference %s" % self.verifydiff) 218 | if tolerancechecking and self.verifydiff > maxtolerance: 219 | raise Exception('restore', "Verification time difference %s is larger than allowed tolerance %s" % (verifydiff, maxtolerance)) 220 | 221 | def blockcheck(self): 222 | info("ACTION: Validating database for corruptions") 223 | # The following command will introduce some corruption to test database validation 224 | # check_call(['dd','if=/dev/urandom','of=/nfs/autorestore/mnt/data_D-ORCL_I-1373437895_TS-SOE_FNO-5_0sqov4pv','bs=8192','count=10','seek=200','conv=notrunc' ]) 225 | try: 226 | self._exec_rman(self._restoretemplate.get('validateblocks')) 227 | self._validatecorruption = True 228 | finally: 229 | self._exec_sqlplus(self._restoretemplate.get('showcorruptblocks')) 230 | 231 | def cleanup(self): 232 | try: 233 | debug('ACTION: In case instance is still running, aborting it') 234 | self._exec_sqlplus(self._restoretemplate.get('shutdownabort')) 235 | except: 236 | pass 237 | if self._successful_mount: 238 | try: 239 | self._unmount() 240 | except: 241 | exception("Error unmounting") 242 | if self._successful_clone: 243 | try: 244 | self._snap.dropautoclone() 245 | except: 246 | exception("Error dropping clone") 247 | self.endtime = datetime.now() 248 | -------------------------------------------------------------------------------- /restoretemplate.cfg: -------------------------------------------------------------------------------- 1 | [template] 2 | autoinitora: *.compatible='${db_compatible}' 3 | *.control_files='${autorestoredestination}/restore.cf' 4 | *.db_block_size=${db_block_size} 5 | *.db_create_file_dest='${mountdestination}' 6 | *.db_create_online_log_dest_1='${mountdestination}' 7 | *.db_files=${db_files} 8 | *.db_name='${db_name}' 9 | *.filesystemio_options='SETALL' 10 | *.pga_aggregate_target=${pga_size} 11 | *.processes=300 12 | *.remote_login_passwordfile='EXCLUSIVE' 13 | *.sga_max_size=${sga_size} 14 | *.sga_target=${sga_size} 15 | *.undo_management='AUTO' 16 | *.undo_tablespace='${db_undotbs}' 17 | *.job_queue_processes=0 18 | *.diagnostic_dest='${logdir}' 19 | *.instance_number=${instancenumber} 20 | *.thread=${thread} 21 | *.streams_pool_size=200M 22 | *.db_recovery_file_dest_size=500G 23 | *.db_recovery_file_dest='${mountdestination}' 24 | *._disk_sector_size_override=TRUE 25 | cdbinitora: 26 | *.enable_pluggable_database=${cdb} 27 | sqlplusheader: set pages 0 28 | set lines 200 29 | set timing on 30 | set echo on 31 | spool ${logfile} append 32 | conn / as sysdba 33 | sqlplusfooter: spool off 34 | exit 35 | startupnomount: whenever sqlerror exit failure 36 | startup nomount pfile='${initora}' 37 | shutdown: shutdown immediate 38 | openandverify: whenever sqlerror exit failure 39 | prompt alter database open read only; 40 | alter database open read only; 41 | -- Check times 42 | prompt Set time formatting 43 | alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'; 44 | alter session set nls_timestamp_format='yyyy-mm-dd hh24:mi:ss'; 45 | prompt Query database timestamps 46 | select 'CURRENT DATABASE SCN: '||to_char(current_scn) from v$$database UNION ALL 47 | select 'LAST ROW IN SCN TO TIME MAP: '||max(time_dp) from sys.smon_scn_time UNION ALL 48 | select 'MIN/MAX TIME FROM ARCHLOGS: '||min(first_time)||'-'||min(next_time) from v$$archived_log where (select current_scn from v$$database) between first_change# and next_change# UNION ALL 49 | select 'CUSTOM VERIFICATION TIME: '||(${customverifydate}) from dual; 50 | -- Check data files 51 | col file_name format a100 52 | col status format a15 53 | col online_status format a20 54 | prompt Datafiles where online_status not in ('SYSTEM','ONLINE') or status <> 'AVAILABLE' 55 | select file_name, status, online_status from dba_data_files where online_status not in ('SYSTEM','ONLINE') or status <> 'AVAILABLE'; 56 | /* 57 | declare 58 | i number; 59 | begin 60 | select count(*) into i from dba_data_files where online_status not in ('SYSTEM','ONLINE') or status <> 'AVAILABLE'; 61 | if i > 0 then 62 | raise_application_error(-20000, 'Some data files have invalid status.'); 63 | end if; 64 | end; 65 | / */ 66 | validateblocks: backup validate check logical database; 67 | showcorruptblocks: col corruption_type format a15 68 | set pages 100 69 | set lines 80 70 | select * from v$$database_block_corruption; 71 | 72 | rmanheader: set echo on 73 | connect target / 74 | rmanfooter: exit 75 | rmanmount: run { 76 | allocate channel c1 device type disk; 77 | restore controlfile from '${mountdestination}/after_backup_controlfile.cf'; 78 | } 79 | alter database mount; 80 | CONFIGURE SNAPSHOT CONTROLFILE NAME TO '${autorestoredestination}/snapcf.f'; 81 | clearlogs: begin 82 | for rec in (select group# from v$$log) loop 83 | -- Commenting it out since the created logfiles seem to confuse the restore 84 | -- execute immediate 'alter database clear logfile group '||rec.group#; 85 | null; 86 | end loop; 87 | end; 88 | / 89 | rmancatalog: change archivelog all uncatalog; 90 | change backup device type disk unavailable; 91 | change copy of database uncatalog; 92 | ${catalogstatements} 93 | sql "alter database flashback off"; 94 | switchdatafiles: set serverout on 95 | -- Switch datafiles 96 | -- Need to do it through RMAN, because alter database rename file will DELETE source OMF file!!!!!!!!!!! 97 | declare 98 | cnt NUMBER; 99 | v_datafiles_needing_restore VARCHAR2(4000); 100 | begin 101 | -- Do we have some datafailes that do not have a copy and have to be created 102 | select count(*), listagg(to_char(file#), ',') within group (order by file#) INTO cnt, v_datafiles_needing_restore 103 | from (select file# from v$$datafile minus select file# from v$$datafile_copy where tag='IMAGE_COPY_BACKUP' and deleted='NO'); 104 | -- Create nonexisting datafiles 105 | IF cnt > 0 THEN 106 | dbms_output.put_line('RENAMEDF-run {'); 107 | dbms_output.put_line('RENAMEDF-set newname for database to new;'); 108 | dbms_output.put_line('RENAMEDF-restore datafile '||v_datafiles_needing_restore||';'); 109 | dbms_output.put_line('RENAMEDF-}'); 110 | END IF; 111 | -- Switch all datafile 112 | dbms_output.put_line('RENAMEDF-switch database to copy;'); 113 | end; 114 | / 115 | recoverdatafiles: run { 116 | allocate channel c1 device type disk; 117 | set newname for database to new; 118 | set until time "to_date('${lasttime}', 'yyyy-mm-dd hh24:mi:ss')"; 119 | recover database; 120 | } 121 | disablebct: whenever sqlerror exit failure 122 | alter database rename file '${bctfile}' to '${autorestoredestination}/bct.bct'; 123 | alter database disable block change tracking; 124 | select * from V$$BLOCK_CHANGE_TRACKING; 125 | shutdownabort: shutdown abort 126 | createcatalog: set pages 0 127 | set lines 200 128 | set timing on 129 | set echo on 130 | whenever sqlerror exit failure 131 | spool ${logfile} append 132 | conn ${autorestorecatalog} 133 | create sequence restoreaction_seq; 134 | create table restoreaction ( 135 | id number not null primary key, 136 | db_unique_name varchar2(100) not null, 137 | start_time date not null, 138 | finish_time date not null, 139 | success number(1) check (success in (0,1)), 140 | logfile varchar2(500), 141 | logfilecontents clob, 142 | verificationtimediffseconds number, 143 | snapid varchar2(200), 144 | restore_target date, 145 | validated_corruption number(1) check (validated_corruption in (0,1)), 146 | unique (db_unique_name, start_time, success) 147 | ) pctfree 0; 148 | create table restorelob (logfile varchar2(200) primary key, loglob clob) organization index; 149 | exec dbms_scheduler.create_job('LOG_RETENTION','PLSQL_BLOCK', 'DELETE FROM restoreaction WHERE start_time < SYSDATE-100;',repeat_interval=>'FREQ=daily', enabled=>true); 150 | exit 151 | sqlldrlog: LOAD DATA 152 | INFILE * 153 | APPEND 154 | INTO TABLE restorelob 155 | FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' 156 | (logfile CHAR(200), loglob LOBFILE(logfile) TERMINATED BY EOF) 157 | BEGINDATA 158 | "${logfile}" 159 | insertlog: whenever sqlerror exit failure 160 | set echo on 161 | spool ${logfile} append 162 | conn ${autorestorecatalog} 163 | set serverout on 164 | declare 165 | v_dbname restoreaction.db_unique_name%%type:= '${log_dbname}'; 166 | v_starttime restoreaction.start_time%%type:= to_date('${log_start}', 'yyyy-mm-dd hh24:mi:ss'); 167 | v_stoptime restoreaction.finish_time%%type:= to_date('${log_stop}', 'yyyy-mm-dd hh24:mi:ss'); 168 | v_success restoreaction.success%%type:= ${log_success}; 169 | v_logfile restoreaction.logfile%%type:= '${logfile}'; 170 | v_diff restoreaction.verificationtimediffseconds%%type:= ${log_diff}; 171 | v_snap restoreaction.snapid%%type:= '${log_snapid}'; 172 | v_target restoreaction.restore_target%%type:= to_date('${lasttime}', 'yyyy-mm-dd hh24:mi:ss'); 173 | v_validated restoreaction.validated_corruption%%type:= ${log_validated}; 174 | p_log_id NUMBER; 175 | begin 176 | insert into restoreaction (id, db_unique_name, start_time, finish_time, success, logfile, verificationtimediffseconds, snapid, restore_target, validated_corruption) 177 | values (restoreaction_seq.nextval, v_dbname, v_starttime, v_stoptime, v_success, v_logfile, v_diff, v_snap, v_target, v_validated); 178 | p_log_id:= restoreaction_seq.currval; 179 | for rec in (select loglob from restorelob where logfile = v_logfile) loop 180 | update restoreaction set logfilecontents = rec.loglob where id = p_log_id; 181 | end loop; 182 | delete from restorelob where logfile = v_logfile; 183 | commit; 184 | dbms_output.put_line('LOG MESSAGE ID: '||to_char(p_log_id)); 185 | end; 186 | / 187 | exit 188 | -------------------------------------------------------------------------------- /rmantemplate.cfg: -------------------------------------------------------------------------------- 1 | [template] 2 | # This is always executed with RMAN scripts. Must include CONNECT TARGET clause 3 | header: SET ECHO ON 4 | CONNECT TARGET /@${configname} 5 | # This is added when database is registered in catalog, so connect to catalog here 6 | headercatalog: CONNECT CATALOG ${catalogconnect} 7 | # This is always executed with RMAN, the last line must be EXIT 8 | footer: EXIT 9 | # Backup commands, all these commands are run inside a run block 10 | backupimagecopy: backup ${sectionsize} incremental level 1 for recover of copy with tag 'image_copy_backup' database; 11 | recover copy of database with tag 'image_copy_backup'; 12 | delete noprompt backupset tag 'image_copy_backup'; 13 | delete noprompt force archivelog until time 'sysdate-${recoverywindow}'; 14 | backup spfile tag 'image_copy_backup'; 15 | backup current controlfile format '${backupdest}/after_backup_controlfile.cf' reuse tag 'image_copy_backup'; 16 | # Backupfooter is always executed with backup commands 17 | backupfooter: delete noprompt obsolete recovery window of ${recoverywindow} days; 18 | show all; 19 | # Configuration commands 20 | registerdatabase: REGISTER DATABASE; 21 | resynccatalog: RESYNC CATALOG; 22 | config: CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF ${recoverywindow} DAYS; 23 | CONFIGURE BACKUP OPTIMIZATION ON; 24 | CONFIGURE DEFAULT DEVICE TYPE TO DISK; 25 | CONFIGURE CONTROLFILE AUTOBACKUP ON; 26 | CONFIGURE DEVICE TYPE DISK PARALLELISM ${parallel} BACKUP TYPE TO BACKUPSET; 27 | CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT '${backupdest}/%%U'; 28 | CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '${backupdest}/%%F'; 29 | CONFIGURE SNAPSHOT CONTROLFILE NAME TO '${backupdest}/snapcf_${configname}.f'; 30 | configdelalnodg: CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; 31 | configdelaldg: CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL STANDBY; 32 | # Report command 33 | report: REPORT NEED BACKUP RECOVERY WINDOW OF ${recoverywindow} DAYS; 34 | REPORT UNRECOVERABLE; 35 | RESTORE DATABASE PREVIEW; 36 | # This is executed to validate backup 37 | validatebackup: RESTORE DATABASE VALIDATE HEADER; 38 | RESTORE CONTROLFILE VALIDATE; 39 | RESTORE ARCHIVELOG ALL VALIDATE; 40 | RESTORE DATABASE VALIDATE; 41 | # Restore script 42 | headerrestore: export ORACLE_SID=${configname}1 # The last number is RAC instance number 43 | export TNS_ADMIN=${tnspath} 44 | rman target /@${configname} catalog ${catalogconnect} 45 | # RMAN commands follow 46 | fullrestore: set dbid ${dbid}; 47 | startup nomount; 48 | # Spfile 49 | restore spfile; 50 | startup force nomount; 51 | # Controlfile 52 | restore controlfile; 53 | alter database mount; 54 | sql "alter database disable block change tracking"; 55 | # Restore datafiles to diskgroup +DATA 56 | backup as copy format '+DATA' tag 'datafilerestore' copy of database from tag 'image_copy_backup'; 57 | switch database to copy; 58 | recover database; 59 | # Recover will end with an error (cannot find the latest online log), if online logs are not available, then just ignore it, this data will be lost 60 | alter database open resetlogs; 61 | shutdown immediate; 62 | restorefooter: 63 | == The following are OS command to put register database in GI 64 | == TODO 65 | allocatearchlogchannel: allocate channel d1 device type disk format '${archdir}/%%U'; 66 | 67 | # Scheduler 68 | dropschedule: begin 69 | begin 70 | DBMS_SCHEDULER.DROP_JOB( 71 | job_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_JOB,${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_JOB', 72 | force=>true, 73 | commit_semantics=>'ABSORB_ERRORS'); 74 | exception 75 | when others then null; 76 | end; 77 | begin 78 | DBMS_SCHEDULER.DROP_SCHEDULE( 79 | schedule_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_SCHEDULE,${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_SCHEDULE', 80 | force=>true); 81 | exception 82 | when others then null; 83 | end; 84 | begin 85 | DBMS_SCHEDULER.DROP_PROGRAM( 86 | program_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG,${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG', 87 | force=>true); 88 | exception 89 | when others then null; 90 | end; 91 | begin 92 | DBMS_SCHEDULER.DROP_CREDENTIAL( 93 | credential_name=>'${scheduleuserprefix}BACKUPEXEC.OSCRED', 94 | force => true); 95 | exception 96 | when others then null; 97 | end; 98 | begin 99 | DBMS_SCHEDULER.DROP_JOB_CLASS ( 100 | job_class_name=>'BACKUPEXEC_JOB', 101 | force=>true); 102 | exception 103 | when others then null; 104 | end; 105 | end; 106 | / 107 | 108 | createschedule: CREATE OR REPLACE PROCEDURE ${scheduleuserprefix}backupexec.check_archlog_copy IS 109 | i NUMBER; 110 | BEGIN 111 | -- Procedure automatically generated from backup.py 112 | -- Check if there are any archivelogs missing from the optional destination 113 | SELECT COUNT (*) INTO i 114 | FROM (SELECT thread#, sequence# 115 | FROM v$$archived_log l 116 | JOIN v$$archive_dest d ON l.dest_id = d.dest_id 117 | JOIN v$$archive_dest_status s ON d.dest_id = s.dest_id 118 | WHERE l.archived = 'YES' AND l.deleted = 'NO' AND l.status = 'A' AND UPPER (d.destination) = 'USE_DB_RECOVERY_FILE_DEST' AND d.status = 'VALID' AND s.TYPE = 'LOCAL' 119 | MINUS 120 | SELECT thread#, sequence# 121 | FROM v$$archived_log l 122 | WHERE l.archived = 'YES' AND l.deleted = 'NO' AND l.status = 'A' AND l.name LIKE '${archdir}%%'); 123 | IF i > 0 THEN 124 | DBMS_SCHEDULER.create_job (job_name => '${scheduleuserprefix}BACKUPEXEC.'||DBMS_SCHEDULER.generate_job_name('MISSINGARCH'), 125 | program_name => '${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG', 126 | job_class => 'BACKUPEXEC_JOB', 127 | enabled => TRUE, 128 | credential_name => '${scheduleuserprefix}BACKUPEXEC.OSCRED' 129 | ); 130 | END IF; 131 | END; 132 | / 133 | begin 134 | DBMS_SCHEDULER.CREATE_JOB_CLASS( 135 | job_class_name=>'BACKUPEXEC_JOB', 136 | logging_level=>DBMS_SCHEDULER.LOGGING_FULL, 137 | log_history=>60, 138 | comments=>'This class is used to execute RMAN backup jobs by BACKUPEXEC user.'); 139 | execute immediate 'grant execute on BACKUPEXEC_JOB to ${scheduleuserprefix}BACKUPEXEC'; 140 | -- 141 | DBMS_SCHEDULER.CREATE_CREDENTIAL( 142 | credential_name=>'${scheduleuserprefix}BACKUPEXEC.OSCRED', 143 | username=>'${osuser}', 144 | password=>'${ospassword}'); 145 | -- 146 | DBMS_SCHEDULER.CREATE_PROGRAM( 147 | program_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG', 148 | program_type=>'EXECUTABLE', 149 | program_action=>'${scriptpath}/backup.py', 150 | number_of_arguments=>2, 151 | enabled=>false, 152 | comments=>'This program executes the image copy refresh.'); 153 | DBMS_SCHEDULER.define_program_argument( 154 | program_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG', 155 | argument_position=>1, 156 | argument_name=>'configname', 157 | argument_type=>'VARCHAR2', 158 | default_value=>'${configname}'); 159 | DBMS_SCHEDULER.define_program_argument( 160 | program_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG', 161 | argument_position=>2, 162 | argument_name=>'action', 163 | argument_type=>'VARCHAR2', 164 | default_value=>'imagecopywithsnap'); 165 | DBMS_SCHEDULER.ENABLE('${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG'); 166 | DBMS_SCHEDULER.CREATE_PROGRAM( 167 | program_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG', 168 | program_type=>'EXECUTABLE', 169 | program_action=>'${scriptpath}/backup.py', 170 | number_of_arguments=>2, 171 | enabled=>false, 172 | comments=>'This program executes archivelog backup.'); 173 | DBMS_SCHEDULER.define_program_argument( 174 | program_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG', 175 | argument_position=>1, 176 | argument_name=>'configname', 177 | argument_type=>'VARCHAR2', 178 | default_value=>'${configname}'); 179 | DBMS_SCHEDULER.define_program_argument( 180 | program_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG', 181 | argument_position=>2, 182 | argument_name=>'action', 183 | argument_type=>'VARCHAR2', 184 | default_value=>'missingarchlog'); 185 | DBMS_SCHEDULER.ENABLE('${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_PRG'); 186 | -- 187 | DBMS_SCHEDULER.CREATE_SCHEDULE( 188 | schedule_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_SCHEDULE', 189 | repeat_interval=>'${schedulebackup}', 190 | comments=>'Schedule for refreshing image copy.'); 191 | DBMS_SCHEDULER.CREATE_SCHEDULE( 192 | schedule_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_SCHEDULE', 193 | repeat_interval=>'${schedulearchlog}', 194 | comments=>'Schedule for archivelog backups.'); 195 | -- 196 | DBMS_SCHEDULER.CREATE_JOB( 197 | job_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_JOB', 198 | program_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_PRG', 199 | schedule_name=>'${scheduleuserprefix}BACKUPEXEC.IMAGECOPY_SCHEDULE', 200 | job_class=>'BACKUPEXEC_JOB', 201 | enabled=>${backupjobenabled}, 202 | comments=>'This job executes image copy refresh.', 203 | credential_name=>'${scheduleuserprefix}BACKUPEXEC.OSCRED'); 204 | DBMS_SCHEDULER.CREATE_JOB( 205 | job_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_JOB', 206 | job_type=>'STORED_PROCEDURE', 207 | job_action=>'${scheduleuserprefix}backupexec.check_archlog_copy', 208 | schedule_name=>'${scheduleuserprefix}BACKUPEXEC.ARCHLOGBACKUP_SCHEDULE', 209 | job_class=>'BACKUPEXEC_JOB', 210 | enabled=>true, 211 | comments=>'This job checks if there are any archivelogs that are missing in the backup set.'); 212 | end; 213 | / 214 | 215 | cdbdetect: declare 216 | p_cnt NUMBER; 217 | p_value varchar2(20); 218 | begin 219 | select 1 into p_cnt from dual where sys_context('USERENV','CDB_NAME') is not null and SYS_CONTEXT('USERENV','CON_NAME') = 'CDB$$ROOT'; 220 | select nvl(max(value), 'C##') into p_value from v$$parameter where name = 'common_user_prefix'; 221 | dbms_output.put_line('CDB-DETECT: '||p_value); 222 | exception 223 | when others then 224 | dbms_output.put_line('CDB-DETECT: NO'); 225 | end; 226 | / 227 | 228 | createuser: declare 229 | i number; 230 | begin 231 | select count(*) into i from dba_users where username = '${scheduleuserprefix}BACKUPEXEC'; 232 | if i = 0 then 233 | execute immediate 'create user ${scheduleuserprefix}backupexec identified by backupexec account lock'; 234 | execute immediate 'grant create job, create external job, create procedure, select any dictionary to ${scheduleuserprefix}backupexec'; 235 | end if; 236 | end; 237 | / 238 | 239 | sqlplusheader: whenever sqlerror exit failure 240 | conn ${sqlplusconnection} 241 | set feedback on 242 | set timing on 243 | set echo on 244 | set lines 300 245 | set serverout on 246 | col host_name format a24 247 | col version format a10 248 | spool ${logfile} append 249 | select instance_name, host_name, version, status, database_status from v$$instance; 250 | set pages 0 251 | select user from dual; 252 | 253 | sqlplusfooter: spool off 254 | exit; 255 | 256 | # Archivelog commands 257 | archivecurrentlogs: select 'CURRENT DATABASE TIME: '||to_char(sysdate, 'yyyy-mm-dd hh24:mi:ss') from dual union all 258 | select 'CURRENT DATABASE SCN: '||to_char(current_scn) from v$$database union all 259 | select 'BCT FILE: '||filename from v$$block_change_tracking; 260 | exec dbms_lock.sleep(1); 261 | alter system archive log current; 262 | -- Sleep below is to allow Data Guard standby time to archive the log also 263 | exec dbms_lock.sleep(10); 264 | archivelogmissing: select q'[BACKUP force as copy archivelog sequence ]'||sequence#||' thread '||thread#||';' from ( 265 | select thread#,sequence# from v$$archived_log l join v$$archive_dest d on l.dest_id=d.dest_id join v$$archive_dest_status s on d.dest_id=s.dest_id where l.archived='YES' and l.deleted='NO' and l.status = 'A' AND upper(d.destination)='USE_DB_RECOVERY_FILE_DEST' and d.status='VALID' and s.type='LOCAL' 266 | minus 267 | select thread#,sequence# from v$$archived_log l where l.archived='YES' and l.deleted='NO' and l.status = 'A' AND l.name like '${archdir}%%' 268 | order by 1,2 269 | ); 270 | 271 | # Delete datafilecopy 272 | deletedatafilecopy: select 'DELETECOPY: delete noprompt datafilecopy '''||name||''';' cmd 273 | from v$$datafile_copy where deleted='NO' and tag='IMAGE_COPY_BACKUP' and (file#,CREATION_CHANGE#) not in (select file#,CREATION_CHANGE# from v$$datafile); 274 | 275 | # SQL*Plus part of the configuration commands 276 | configfromsqlplus: -- Block change tracking 277 | declare 278 | i number; 279 | j number; 280 | begin 281 | select count(*) into i from V$$BLOCK_CHANGE_TRACKING where status='DISABLED'; 282 | select count(*) into j from v$$version where banner like '%%Enterprise Edition%%'; 283 | if (i = 1) and (j > 0) then 284 | execute immediate 'alter database enable block change tracking'; 285 | else 286 | dbms_output.put_line('BCT already enabled or database is not EE'); 287 | end if; 288 | end; 289 | / 290 | -- Archivelog destination 291 | declare 292 | cnt number; 293 | s varchar2(60); 294 | l varchar2(100):= '${archdir}/'; 295 | function min_param return varchar2 is 296 | v varchar2(50); 297 | begin 298 | select min(name) into v from v$$parameter where name like 'log\_archive\_dest\__' escape '\' and value is null; 299 | return v; 300 | end; 301 | begin 302 | select count(*) into cnt from v$$parameter where name like 'log\_archive\_dest\__' escape '\' and upper(value) like 'LOCATION%%=%%USE_DB_RECOVERY_FILE_DEST%%'; 303 | if cnt=0 then 304 | s:= min_param; 305 | dbms_output.put_line('Setting '||s||' to USE_DB_RECOVERY_FILE_DEST'); 306 | execute immediate 'alter system set '||s||q'[='LOCATION=USE_DB_RECOVERY_FILE_DEST MANDATORY' scope=both]'; 307 | execute immediate 'alter system set '||replace(s, 'log_archive_dest_', 'log_archive_dest_state_')||'=enable scope=both'; 308 | end if; 309 | select count(*) into cnt from v$$parameter where name like 'log\_archive\_dest\__' escape '\' and value like '%%'||l||'%%'; 310 | if cnt=0 then 311 | s:= min_param; 312 | dbms_output.put_line('Setting '||s||' to '||l); 313 | l:= q'['LOCATION=]'||l||q'[ VALID_FOR=(ONLINE_LOGFILE,PRIMARY_ROLE)']'; 314 | execute immediate 'alter system set '||s||'='||l||' scope=both'; 315 | execute immediate 'alter system set '||replace(s, 'log_archive_dest_', 'log_archive_dest_state_')||'=enable scope=both'; 316 | end if; 317 | end; 318 | / 319 | 320 | isdbregisteredincatalog: conn ${catalogconnect} 321 | set lines 100 322 | set pages 0 323 | spool ${logfile} append 324 | set echo on 325 | select user, sys_context('USERENV','DB_UNIQUE_NAME'), sys_context('USERENV','SERVER_HOST'), sys_context('USERENV','INSTANCE_NAME') from dual; 326 | select 'DATABASE IS REGISTERED IN RC' from rc_database where dbid = ${dbid}; 327 | spool off 328 | exit 329 | 330 | autorestoreparameters: whenever sqlerror exit failure 331 | set pages 0 332 | set lines 200 333 | set feedback off 334 | select 'dbconfig-'||name||': '||value from v$$parameter where name in ('db_name','undo_tablespace','compatible','db_block_size','db_files','enable_pluggable_database') 335 | union all 336 | select 'dbconfig-backup-finished: '||to_char(sysdate, 'yyyy-mm-dd hh24:mi:ss') from dual; 337 | exit 338 | -------------------------------------------------------------------------------- /softnas.py: -------------------------------------------------------------------------------- 1 | import requests, json, operator, urllib, os 2 | from backupcommon import SnapHandler, Configuration, scriptpath, info, error, debug 3 | from ConfigParser import SafeConfigParser 4 | from datetime import datetime 5 | 6 | class SoftNASHttp(object): 7 | _timeout = 300 # HTTP call timeout in seconds 8 | _cookies = {} 9 | 10 | def __init__(self, baseurl): 11 | self._baseurl = baseurl 12 | try: 13 | requestwarning = __import__('requests.packages.urllib3.exceptions', globals(), locals(), ['InsecureRequestWarning']) 14 | requestwarningclass = getattr(requestwarning, 'InsecureRequestWarning') 15 | requests.packages.urllib3.disable_warnings(requestwarningclass) 16 | except AttributeError: 17 | pass 18 | 19 | def post(self, url, payload): 20 | debug("Sending POST to %s" % url) 21 | r = requests.post("%s/%s" % (self._baseurl, url), cookies=self._cookies, verify=False, data=payload, timeout=self._timeout, allow_redirects=False) 22 | self._cookies = r.cookies 23 | debug("Return code: %d" % r.status_code) 24 | try: 25 | j = json.loads(r.text) 26 | except ValueError: 27 | j = {} 28 | return j, r.status_code 29 | 30 | def get(self, url): 31 | debug("Sending GET to %s" % url) 32 | r = requests.get("%s/%s" % (self._baseurl, url), cookies=self._cookies, verify=False, timeout=self._timeout, allow_redirects=False) 33 | debug("Return code: %d" % r.status_code) 34 | return r.status_code 35 | 36 | class SoftNAS(SnapHandler): 37 | _exceptionbase = "softnas" 38 | _serveraddress = None 39 | _pool = None 40 | _filesystem = None 41 | _username = None 42 | _password = None 43 | _http = None 44 | 45 | def __init__(self, configname): 46 | credfilename = os.path.join(scriptpath(), 'softnascredentials.cfg') 47 | if not os.path.isfile(credfilename): 48 | raise Exception(self._exceptionbase, "Configuration file %s not found" % credfilename) 49 | # Authentication information 50 | credconfig = SafeConfigParser() 51 | credconfig.read(credfilename) 52 | self._username = credconfig.get('credentials','user') 53 | self._password = credconfig.get('credentials','password') 54 | # 55 | self._serveraddress = Configuration.get('serveraddress', 'softnas') 56 | url = "https://%s/softnas" % self._serveraddress 57 | self._pool = Configuration.get('pool', 'softnas') 58 | self._filesystem = configname 59 | # 60 | self._http = SoftNASHttp(url) 61 | super(SoftNAS, self).__init__(configname) 62 | 63 | def _login(self): 64 | j,r = self._http.post('login.php', { 'username': self._username, 'password': self._password }) 65 | 66 | def _logout(self): 67 | r = self._http.get('logout.php') 68 | 69 | def _request(self, opcode, parameters={}, sendJSON=False): 70 | payload = {'opcode': opcode} 71 | payload.update(parameters) 72 | if sendJSON: 73 | payload = json.dumps(payload) 74 | self._login() 75 | try: 76 | j,r = self._http.post('snserver/snserv.php', payload) 77 | if not j['success']: 78 | raise Exception('softnas',"Request failed. Return code: %d, message: %s" % (r, j)) 79 | finally: 80 | self._logout() 81 | return j 82 | 83 | def _listvolumes(self): 84 | j = self._request('volumes', {'start': 0, 'limit': 10000, 'pool': self._pool}) 85 | volumes = [] 86 | for v in j['records']: 87 | if v['pool'] == self._pool: 88 | volumes.append(v) 89 | return volumes 90 | 91 | ### 92 | 93 | def snap(self): 94 | j = self._request('snapcommand', {'command': 'create', 'pool_name': self._pool, 'volume_name': self._filesystem} ) 95 | i = j['msg'].find('@') 96 | i2 = j['msg'].find("'", i) 97 | return j['msg'][i+1:i2] 98 | 99 | def dropsnap(self, snapid): 100 | j = self._request('snapcommand', {'command': 'delete', 'snapshots': "[%s]" % json.dumps({'snapshot_name': snapid, 'pool_name': self._pool, 'volume_name': self._filesystem}) } ) 101 | 102 | def listsnapshots(self, sortbycreation=False, sortreverse=False): 103 | j = self._request('snapshotlist', {'pool_name': "%s/%s" % (self._pool, self._filesystem)}) 104 | self._listvolumes() 105 | snapshots = [] 106 | for s in j['records']: 107 | snapshots.append( {'id': s['snapshot_name'], 108 | 'creation': datetime.utcfromtimestamp(float(s['creation'])), 109 | 'numclones': 0, 110 | 'space_total': s['refer'], 111 | 'space_unique': 0 112 | } ) 113 | if not sortbycreation: 114 | return snapshots 115 | else: 116 | return sorted(snapshots, key=operator.itemgetter('creation'), reverse=sortreverse) 117 | 118 | def filesystem_info(self, filesystemname=None): 119 | volumes = [] 120 | for v in self._listvolumes(): 121 | if filesystemname is None or v['vol_name'] == filesystemname: 122 | volumes.append({ 'origin': None, 'clonename': v['vol_name'], 'mountpoint': "%s:%s" % (self._serveraddress, v['vol_path']) }) 123 | return volumes 124 | 125 | def mountstring(self, filesystemname): 126 | mountpoint = "" 127 | for v in self.filesystem_info(filesystemname): 128 | mountpoint = v['mountpoint'] 129 | break 130 | return mountpoint 131 | 132 | def getsnapinfo(self, snapstruct): 133 | return snapstruct 134 | 135 | def dropclone(self, cloneid): 136 | self._request('deletevolume', {'vol_name': cloneid, 'pool': self._pool}, True) 137 | 138 | def createvolume(self): 139 | self._request('createvolume', {'vol_name': self._filesystem, 'pool': self._pool, 'vol_type': 'filesystem', 'provisioning': 'thin', 'exportNFS': 'on', 'compression': 'on'}) 140 | 141 | def listclones(self): 142 | raise Exception(self._exceptionbase, 'SoftNAS does not support naming clones, so cloning features are disabled.') 143 | 144 | def clone(self, snapid, clonename): 145 | raise Exception(self._exceptionbase, 'SoftNAS does not support naming clones, so cloning features are disabled.') 146 | -------------------------------------------------------------------------------- /softnascredentials.cfg.sample: -------------------------------------------------------------------------------- 1 | [credentials] 2 | user=softnas 3 | password=secretpassword 4 | -------------------------------------------------------------------------------- /tns.sample/sqlnet.ora: -------------------------------------------------------------------------------- 1 | WALLET_LOCATION = 2 | (SOURCE = 3 | (METHOD = FILE) 4 | (METHOD_DATA = 5 | (DIRECTORY = /home/oracle/oracle-imagecopy-backup/wallet.sample) 6 | ) 7 | ) 8 | 9 | SQLNET.WALLET_OVERRIDE = TRUE 10 | -------------------------------------------------------------------------------- /tns.sample/tnsnames.ora: -------------------------------------------------------------------------------- 1 | # RMAN catalog 2 | RMAN= 3 | (DESCRIPTION= 4 | (ADDRESS= 5 | (PROTOCOL=TCP) 6 | (HOST=dbhost.example.com) 7 | (PORT=1521) 8 | ) 9 | (CONNECT_DATA= 10 | (SERVICE_NAME=rman.sample.example.com) 11 | ) 12 | ) 13 | 14 | AUTORESTORE = 15 | (DESCRIPTION= 16 | (ADDRESS= 17 | (PROTOCOL=TCP) 18 | (HOST=dbhost.example.com) 19 | (PORT=1521) 20 | ) 21 | (CONNECT_DATA= 22 | (SERVICE_NAME=autorestorecatalog.sample.example.com) 23 | ) 24 | ) 25 | 26 | # Databases 27 | # Use a dedicated service for connecting! Then you get automatic load balancing between instances where the service is running 28 | # srvctl add service -database cdb1 -service backup_cdb1 -preferred cdb11,cdb12 -available cdb13 29 | 30 | CDB1 = 31 | (DESCRIPTION= 32 | (ADDRESS= 33 | (PROTOCOL=TCP) 34 | (HOST=dbhost.example.com) 35 | (PORT=1521) 36 | ) 37 | (CONNECT_DATA= 38 | (SERVICE_NAME=backup_cdb1.sample.example.com) 39 | ) 40 | ) 41 | 42 | OLDDB = 43 | (DESCRIPTION= 44 | (ADDRESS= 45 | (PROTOCOL=TCP) 46 | (HOST=dbhost.example.com) 47 | (PORT=1521) 48 | ) 49 | (CONNECT_DATA= 50 | (SERVICE_NAME=backup_olddb.sample.example.com) 51 | ) 52 | ) 53 | -------------------------------------------------------------------------------- /zfscredentials.cfg.sample: -------------------------------------------------------------------------------- 1 | [zfscredentials] 2 | zfsuser: zfs_login_username_here 3 | zfspassword: zfs_login_password_here 4 | -------------------------------------------------------------------------------- /zfssa.py: -------------------------------------------------------------------------------- 1 | import requests, json, operator, urllib, os 2 | from backupcommon import SnapHandler, Configuration, scriptpath, size2str, info, error, debug 3 | from ConfigParser import SafeConfigParser 4 | from datetime import datetime, timedelta 5 | from urlparse import urlparse 6 | 7 | class ZFSHttp(object): 8 | _jsonheader = {'Content-Type': 'application/json'} 9 | _timeout = 300 # HTTP call timeout in seconds 10 | server_address = None 11 | 12 | def __init__(self, baseurl, auth): 13 | self._baseurl = baseurl 14 | self._auth = auth 15 | up = urlparse(self._baseurl) 16 | if up.netloc.find(":") > -1: 17 | self.server_address = up.netloc.split(":", 1)[0] 18 | else: 19 | self.server_address = up.netloc 20 | try: 21 | requestwarning = __import__('requests.packages.urllib3.exceptions', globals(), locals(), ['InsecureRequestWarning']) 22 | requestwarningclass = getattr(requestwarning, 'InsecureRequestWarning') 23 | requests.packages.urllib3.disable_warnings(requestwarningclass) 24 | except AttributeError: 25 | pass 26 | 27 | def _array2url(self, urlarray): 28 | # Converts list of url components as a quoted URL 29 | return '/'.join(map(urllib.quote_plus, urlarray)) 30 | 31 | def get(self, urlarray, return_json=True): 32 | url = self._array2url(urlarray) 33 | debug("Sending GET to %s" % url) 34 | r = requests.get("%s/%s" % (self._baseurl, url), auth=self._auth, headers=self._jsonheader, verify=False, timeout=self._timeout) 35 | debug("Return code: %d" % r.status_code) 36 | if r.status_code != 200: 37 | error("GET to %s returned %d" % (url, r.status_code)) 38 | raise Exception('zfssareturncode',"GET request return code is not 200 (%s)" % r.status_code) 39 | if return_json: 40 | j = json.loads(r.text) 41 | return j 42 | else: 43 | return None 44 | 45 | def post(self, urlarray, payload): 46 | url = self._array2url(urlarray) 47 | debug("Sending POST to %s" % url) 48 | r = requests.post("%s/%s" % (self._baseurl, url), auth=self._auth, headers=self._jsonheader, verify=False, data=json.dumps(payload), timeout=self._timeout) 49 | debug("Return code: %d" % r.status_code) 50 | if r.status_code == 201: 51 | j = json.loads(r.text) 52 | else: 53 | error("POST to %s returned %d" % (url, r.status_code)) 54 | j = {} 55 | return r.status_code, j 56 | 57 | def put(self, urlarray, payload): 58 | url = self._array2url(urlarray) 59 | debug("Sending PUT to %s" % url) 60 | r = requests.put("%s/%s" % (self._baseurl, url), auth=self._auth, headers=self._jsonheader, verify=False, data=json.dumps(payload), timeout=self._timeout) 61 | debug("Return code: %d" % r.status_code) 62 | if r.status_code == 201: 63 | j = json.loads(r.text) 64 | else: 65 | error("PUT to %s returned %d" % (url, r.status_code)) 66 | j = {} 67 | return r.status_code, j 68 | 69 | def delete(self, urlarray): 70 | url = self._array2url(urlarray) 71 | debug("Sending DELETE to %s" % url) 72 | r = requests.delete("%s/%s" % (self._baseurl, url), auth=self._auth, headers=self._jsonheader, verify=False, timeout=self._timeout) 73 | debug("Return code: %d" % r.status_code) 74 | return r.status_code 75 | 76 | class ZFSSA(SnapHandler): 77 | _exceptionbase = "zfssnap" 78 | 79 | def __init__(self, configname): 80 | zfscredfilename = os.path.join(scriptpath(), 'zfscredentials.cfg') 81 | if not os.path.isfile(zfscredfilename): 82 | raise Exception(self._exceptionbase, "Configuration file %s not found" % zfscredfilename) 83 | # Authentication information 84 | zfscredconfig = SafeConfigParser() 85 | zfscredconfig.read(zfscredfilename) 86 | zfsauth = (zfscredconfig.get('zfscredentials','zfsuser'), zfscredconfig.get('zfscredentials','zfspassword')) 87 | # 88 | zfssaurl = "%s/api/storage/v1" % Configuration.get('url', 'zfssa') 89 | self._pool = Configuration.get('pool', 'zfssa') 90 | self._project = Configuration.get('project', 'zfssa') 91 | self._filesystem = configname 92 | # 93 | self._http = ZFSHttp(zfssaurl, zfsauth) 94 | super(ZFSSA, self).__init__(configname) 95 | 96 | def str2date(self, zfsdate): 97 | # ZFS returned string to datetime object 98 | # 20150803T13:31:42 99 | # Result is in UTC! 100 | d = datetime.strptime(zfsdate, '%Y%m%dT%H:%M:%S') 101 | return d 102 | 103 | # Public interfaces 104 | 105 | def filesystem_info(self, filesystemname=None): 106 | urlarray = ['pools', self._pool, 'projects', self._project, 'filesystems'] 107 | if filesystemname is not None: 108 | urlarray.append(filesystemname) 109 | j = self._http.get(urlarray) 110 | if filesystemname is None: 111 | return j["filesystems"] 112 | else: 113 | return j["filesystem"] 114 | 115 | def listclones(self): 116 | output = [] 117 | for s in self.filesystem_info(): 118 | if "origin" in s: 119 | origin = s["origin"] 120 | if origin["project"] == self._project and origin["share"] == self._filesystem: 121 | yield { 'clonename': s["name"], 'origin': origin["snapshot"], 'mountpoint': s["mountpoint"] } 122 | 123 | def mountstring(self, filesystemname): 124 | info = self.filesystem_info(filesystemname) 125 | return "%s:%s" % (self._http.server_address if self._http.server_address is not None else 'zfs_server_address', info['mountpoint']) 126 | 127 | 128 | def snap(self): 129 | snapname = "%s-%s" % (self._filesystem, datetime.now().strftime('%Y%m%dT%H%M%S')) 130 | payload = { 'name': snapname } 131 | r,j = self._http.post(['pools', self._pool, 'projects', self._project, 'filesystems', self._filesystem, 'snapshots'], payload) 132 | if r != 201: 133 | raise Exception(self._exceptionbase,"Creating snapshot failed with return code %d" % r) 134 | return snapname 135 | 136 | def dropsnap(self, snapid): 137 | ret_code = self._http.delete(['pools', self._pool, 'projects', self._project, 'filesystems', self._filesystem, 'snapshots', snapid]) 138 | if ret_code != 204: 139 | raise Exception(self._exceptionbase, "Failed to drop snapshot %s" % snapid) 140 | 141 | def getsnapinfo(self, snapstruct): 142 | s = snapstruct 143 | return {'id': s["name"], 'creation': self.str2date(s["creation"]), 'numclones': int(s["numclones"]), 144 | 'space_total': int(s["space_data"]), 'space_unique': int(s["space_unique"])} 145 | 146 | def listsnapshots(self, sortbycreation=False, sortreverse=False): 147 | j = self._http.get(['pools', self._pool, 'projects', self._project, 'filesystems', self._filesystem, 'snapshots']) 148 | if not sortbycreation: 149 | return j["snapshots"] 150 | else: 151 | return sorted(j["snapshots"], key=operator.itemgetter('creation'), reverse=sortreverse) 152 | 153 | def clone(self, snapid, clonename): 154 | payload = { 'project': self._project, 'share': clonename } 155 | r,j = self._http.put(['pools', self._pool, 'projects', self._project, 'filesystems', self._filesystem, 'snapshots', snapid, 'clone'], payload) 156 | if r != 201: 157 | raise Exception(self._exceptionbase, "Creating clone failed with code %d" % r) 158 | # Remove compression from the clone 159 | # Do nothing if it errors 160 | # r,j = self._http.put(['pools', self._pool, 'projects', self._project, 'filesystems', clonename], { 'compression': 'off' } ) 161 | 162 | def dropclone(self, cloneid): 163 | j = self.filesystem_info(cloneid) 164 | if "origin" not in j: 165 | raise Exception(self._exceptionbase, 'Specified filesystem is not a clone.') 166 | origin = j["origin"] 167 | if origin["project"] != self._project or origin["share"] != self._filesystem: 168 | raise Excption(self._exceptionbase, "Specified filesystem is not cloned from share %s" % self._filesystem) 169 | r = self._http.delete(['pools', self._pool, 'projects', self._project, 'filesystems', cloneid]) 170 | if r != 204: 171 | error("Dropping clone failed. Return code: %d" % r) 172 | raise Exception(self._exceptionbase, "Dropping clone failed. Return code: %d" % r) 173 | 174 | def createvolume(self): 175 | payload = { 'name': self._filesystem } 176 | ret_code = self._http.post(['pools', self._pool, 'projects', self._project, 'filesystems'], payload) 177 | if ret_code != 201: 178 | raise Exception(self._exceptionbase, "Failed to create file system") 179 | -------------------------------------------------------------------------------- /zsnapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import sys 4 | from backupcommon import Configuration, create_snapshot_class 5 | from datetime import datetime, timedelta 6 | 7 | # Check command line arguments 8 | uioptions = ['list','clean','create','checkage','clone','dropclone','listclones','autoclone'] 9 | if (len(sys.argv) < 3) or (len(sys.argv) > 4) or (not sys.argv[2] in uioptions): 10 | print "Usage: zsnapper.py <%s> [name]" % '|'.join(uioptions) 11 | sys.exit(2) 12 | 13 | configsection = sys.argv[1] 14 | Configuration.init(configsection) 15 | 16 | zfs = create_snapshot_class(configsection) 17 | 18 | # Public use functions 19 | def checkage(): 20 | # Returns the latest snapshot age for nagios check 21 | exitcode = 0 22 | warning = timedelta(hours = int(Configuration.get('warningsnapage', 'generic'))) 23 | critical = timedelta(hours = int(Configuration.get('criticalsnapage', 'generic'))) 24 | try: 25 | snaps = zfs.listsnapshots() 26 | minage = None 27 | for s in snaps: 28 | d = zfs.str2date(s["creation"]) 29 | age = datetime.utcnow() - d 30 | if (minage is None) or (age < minage): 31 | minage = age 32 | s = "OK" 33 | if (minage is None) or (minage >= critical): 34 | exitcode = 2 35 | s = "CRITICAL" 36 | elif minage >= warning: 37 | exitcode = 1 38 | s = "WARNING" 39 | print "%s: The latest snapshot age %s" % (s, minage) 40 | except Exception as detail: 41 | print "Exception occured: %s" % detail 42 | exitcode = 3 43 | sys.exit(exitcode) 44 | 45 | def clone_snapshot(source=None, clone=None): 46 | if source is None: 47 | sourcename = sys.argv[3] 48 | else: 49 | sourcename = source 50 | if clone is None: 51 | clonename = "%s_clone_%s" % (sourcename, datetime.now().strftime('%Y%m%dT%H%M%S')) 52 | else: 53 | clonename = clone 54 | zfs.clone(sourcename, clonename) 55 | fs = zfs.filesystem_info(clonename) 56 | print "Clone created." 57 | print "Clone name: %s" % clonename 58 | print "Mount point: %s" % fs["mountpoint"] 59 | print "Mount command (execute as root and replace zfs ip address and mount directory):" 60 | print "mount -t nfs -o rw,bg,hard,nointr,rsize=32768,wsize=32768,tcp,vers=3,timeo=600 %s " % zfs.mountstring(clonename) 61 | 62 | # Call the correct procedure based on parameters 63 | if sys.argv[2] == 'clean': 64 | output = zfs.clean() 65 | for s in output: 66 | print s['infostring'] 67 | elif sys.argv[2] == 'create': 68 | snapname = zfs.snap() 69 | print "Snapshot created: %s" % snapname 70 | elif sys.argv[2] == 'clone': 71 | clone_snapshot() 72 | elif sys.argv[2] == 'checkage': 73 | checkage() 74 | elif sys.argv[2] == 'dropclone': 75 | zfs.dropclone(sys.argv[3]) 76 | print "Clone dropped." 77 | elif sys.argv[2] == 'listclones': 78 | for s in zfs.listclones(): 79 | print zfs.clone2str(s) 80 | elif sys.argv[2] == 'autoclone': 81 | zfs.autoclone() 82 | print "Clone created." 83 | else: 84 | snaps = zfs.listsnapshots() 85 | for s in snaps: 86 | print zfs.snap2str(zfs.getsnapinfo(s)) 87 | -------------------------------------------------------------------------------- /zvolume.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | 3 | import sys 4 | from backupcommon import Configuration, create_snapshot_class 5 | #from datetime import datetime, timedelta 6 | 7 | # Check command line arguments 8 | uioptions = ['create'] 9 | if (len(sys.argv) < 3) or (len(sys.argv) > 4) or (not sys.argv[2] in uioptions): 10 | print "Usage: zvolume.py <%s>" % '|'.join(uioptions) 11 | sys.exit(2) 12 | 13 | configsection = sys.argv[1] 14 | Configuration.init(configsection) 15 | 16 | storage = create_snapshot_class(configsection) 17 | 18 | # Main UI 19 | if sys.argv[2] == 'create': 20 | storage.createvolume() 21 | --------------------------------------------------------------------------------