├── .gitignore ├── CONTRIBUTING.md ├── README.md ├── docs ├── backup_restore_using_ark_and_restic.md ├── cloudant.md ├── cloudant │ ├── AddRepo.png │ ├── BackupRepo.png │ ├── Icon.png │ ├── NewRepositories.png │ ├── Repositories.png │ ├── SelectRepo.png │ └── UserName.png ├── cloudant_original.md ├── components.md ├── entire.md ├── etcd.md ├── etcd_restore_multi.md ├── etcd_restore_single.md ├── etcd_workload.md ├── images │ └── ark │ │ ├── ark_completion.png │ │ ├── ark_flow.png │ │ ├── ct_quote.png │ │ ├── icos_create_bucket.png │ │ ├── icos_service_credentials.png │ │ ├── icp_client_config.png │ │ ├── icp_create_namespace.png │ │ ├── icp_create_pv.png │ │ └── icp_create_pvc.png ├── mariadb.md ├── mongodb.md ├── pvs.md ├── registry.md └── some.md ├── helm-charts └── icp-cloudant-backup │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── _helpers.tpl │ ├── backup-cleanup-cronjob.yaml │ ├── backup-storage-pvc.yaml │ ├── icp-cloudant-backup-cronjob.yaml │ └── icp-cloudant-restore-job.yaml │ └── values.yaml ├── images ├── icp-backup-flow.png └── icp-backup-flow.xml ├── resources ├── cloudant_backup_pvc.yaml ├── icp-cloudant-backup-job.yaml ├── icp-cloudant-restore-job.yaml ├── icp-mariadb-backup-job.yaml ├── icp-mariadb-restore-job.yaml ├── icp-mongodb-mongodump-job.yaml ├── icp-mongodb-mongorestore-job.yaml ├── mariadb-backup-pvc.yaml ├── mongodump-pv.yaml └── mongodump-pvc.yaml ├── scripts ├── backupCloudant.sh ├── backupEtcd.sh ├── buildComponent.sh ├── buildPush.sh ├── cloudant │ ├── cloudant-db-node-port.yaml │ └── externalize-cloudantdb-service.sh ├── configureHelmCLI.sh ├── createCloudantPVC.sh ├── createConfigMaps.sh ├── createVolumes.sh ├── etcd.sh ├── mariadb │ └── ansible │ │ └── archive_mariadb_on_masters.yml ├── multimaster-etcd-restore.sh ├── purge_kubelet_pods.sh ├── pushComponent.sh ├── restoreCloudant.sh ├── restoreEtcd.sh ├── switchNamespace.sh └── volume_config │ ├── large.yaml │ ├── rwo-large.yaml │ ├── rwo.yaml │ ├── rwx-large.yaml │ └── rwx.yaml └── src ├── cloudant-backup ├── 1.0 │ ├── 01_install-kubectl.sh │ ├── 02_install-node9x.sh │ ├── 03_install-npm-latest.sh │ ├── 04_install-cloudant-utils.sh │ ├── 05_install-jq.sh │ ├── cloudant-backup.sh │ ├── cloudant-restore.sh │ ├── create-database.sh │ ├── delete-database.sh │ ├── externalize-cloudantdb-service.sh │ ├── get-database-names.sh │ ├── helperFunctions.sh │ ├── icp-client-config.sh │ └── set-namespace-kube-system.sh ├── 1.1 │ ├── 01_install-kubectl.sh │ ├── 02_install-node9x.sh │ ├── 03_install-npm-latest.sh │ ├── 04_install-cloudant-utils.sh │ ├── 05_install-jq.sh │ ├── cloudant-backup.sh │ ├── cloudant-restore.sh │ ├── create-database.sh │ ├── delete-database.sh │ ├── externalize-cloudantdb-service.sh │ ├── get-database-names.sh │ ├── helperFunctions.sh │ ├── icp-client-config.sh │ └── set-namespace-kube-system.sh ├── 2.0 │ ├── backup-cleanup.sh │ ├── cloudant-backup.sh │ ├── cloudant-restore.sh │ ├── create-database.sh │ ├── delete-database.sh │ ├── get-database-names.sh │ └── helper-functions.sh ├── Dockerfile-1.0 ├── Dockerfile-2.0 └── doc │ ├── ICP_cloudant_backup_and_restore.md │ └── ICP_cloudant_backup_and_restore.pdf ├── cloudant-ppa ├── build-ppa-archive.sh ├── manifest.json.tmpl └── manifest.yaml.tmpl └── mariadb-backup ├── 1.0 ├── backup-cleanup.sh ├── get-database-names.sh ├── helper-functions.sh ├── mariadb-backup.sh └── mariadb-restore.sh └── Dockerfile-1.0 /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing to IBM Cloud Architecture reference applications 2 | Anyone can contribute to IBM Cloud Architecture reference applications and their associated projects, whether you are an IBMer or not. 3 | We welcome your collaboration & contributions happily, as our reference applications are meant to reflect your real world scenarios. 4 | There are multiple ways to contribute: report bugs and improvement suggestions, improve documentation, and contribute code. 5 | 6 | 7 | ## Bug reports, documentation changes, and feature requests 8 | 9 | If you would like to contribute your experience with an IBM Cloud Architecture project back to the project in the form of encountered bug reports, necessary documentation changes, or new feature requests, this can be done through the use of the repository's [**Issues**](#) list. 10 | 11 | Before opening a new issue, please reference the existing list to make sure a similar or duplicate item does not already exist. Otherwise, please be as explicit as possible when creating the new item and be sure to include the following: 12 | 13 | - **Bug reports** 14 | - Specific Project Version 15 | - Deployment environment 16 | - A minimal, but complete, setup of steps to recreate the problem 17 | - **Documentation changes** 18 | - URL to existing incorrect or incomplete documentation (either in the project's GitHub repo or external product documentation) 19 | - Updates required to correct current inconsistency 20 | - If possible, a link to a project fork, sample, or workflow to expose the gap in documentation. 21 | - **Feature requests** 22 | - Complete description of project feature request, including but not limited to, components of the existing project that are impacted, as well as additional components that may need to be created. 23 | - A minimal, but complete, setup of steps to recreate environment necessary to identify the new feature's current gap. 24 | 25 | The more explicit and thorough you are in opening GitHub Issues, the more efficient your interaction with the maintainers will be. When creating the GitHub Issue for your bug report, documentation change, or feature request, be sure to add as many relevant labels as necessary (that are defined for that specific project). These will vary by project, but will be helpful to the maintainers in quickly triaging your new GitHub issues. 26 | 27 | ## Code contributions 28 | 29 | We really value contributions, and to maximize the impact of code contributions, we request that any contributions follow the guidelines below. If you are new to open source contribution and would like some more pointers or guidance, you may want to check out [**Your First PR**](http://yourfirstpr.github.io/) and [**First Timers Only**](https://www.firsttimersonly.com/). These are a few projects that help on-board new contributors to the overall process. 30 | 31 | ### Coding and Pull Requests best practices 32 | - Please ensure you follow the coding standard and code formatting used throughout the existing code base. 33 | - This may vary project by project, but any specific diversion from normal language standards will be explicitly noted. 34 | - One feature / bug fix / documentation update per pull request 35 | - Always pull the latest changes from upstream and rebase before creating any pull request. 36 | - New pull requests should be created against the `integration` branch of the repository, if available. 37 | - This ensures new code is included in full-stack integration tests before being merged into the `master` branch 38 | - All new features must be accompanied by associated tests. 39 | - Make sure all tests pass locally before submitting a pull request. 40 | - Include tests with every feature enhancement, improve tests with every bug fix 41 | 42 | ### Github and git flow 43 | 44 | The internet is littered with guides and information on how to use and understand git. 45 | However, here's a compact guide that follows the suggested workflow 46 | 47 | ![Github flow](https://ibm-cloud-architecture.github.io/assets/img/github_flow.png) 48 | 49 | 1. Fork the desired repo in github. 50 | 51 | 2. Clone your repo to your local computer. 52 | 53 | 3. Add the upstream repository 54 | 55 | Note: Guide for step 1-3 here: [forking a repo](https://help.github.com/articles/fork-a-repo/) 56 | 57 | 4. Create new development branch off the targeted upstream branch. This will often be `master`. 58 | 59 | ``` 60 | git checkout -b master 61 | ``` 62 | 63 | 5. Do your work: 64 | - Write your code 65 | - Write your tests 66 | - Pass your tests locally 67 | - Commit your intermediate changes as you go and as appropriate 68 | - Repeat until satisfied 69 | 70 | 6. Fetch latest upstream changes (in case other changes had been delivered upstream while you were developing your new feature). 71 | 72 | ``` 73 | git fetch upstream 74 | ``` 75 | 7. Rebase to the latest upstream changes, resolving any conflicts. This will 'replay' your local commits, one by one, after the changes delivered upstream while you were locally developing, letting you manually resolve any conflict. 76 | 77 | ``` 78 | git branch --set-upstream-to=upstream/master 79 | git rebase 80 | ``` 81 | Instructions on how to manually resolve a conflict and commit the new change or skip your local replayed commit will be presented on screen by the git CLI. 82 | 83 | 8. Push the changes to your repository 84 | 85 | ``` 86 | git push origin 87 | ``` 88 | 89 | 9. Create a pull request against the same targeted upstream branch. 90 | 91 | [Creating a pull request](https://help.github.com/articles/creating-a-pull-request/) 92 | 93 | Once the pull request has been reviewed, accepted and merged into the main github repository, you should synchronise your remote and local forked github repository `master` branch with the upstream master branch. To do so: 94 | 95 | 10. Pull to your local forked repository the latest changes upstream (that is, the pull request). 96 | 97 | ``` 98 | git pull upstream master 99 | ``` 100 | 101 | 11. Push those latest upstream changes pulled locally to your remote forked repository. 102 | 103 | ``` 104 | git push origin master 105 | ``` 106 | 107 | ### What happens next? 108 | - All pull requests will be automatically built and unit tested by travis-ci, when implemented by that specific project. 109 | - You can determine if a given project is enabled for travis-ci unit tests by the existence of a `.travis.yml` file in the root of the repository or branch. 110 | - When in use, all travis-ci unit tests must pass completely before any further review or discussion takes place. 111 | - The repository maintainer will then inspect the commit and, if accepted, will pull the code into the upstream branch. 112 | - Should a maintainer or reviewer ask for changes to be made to the pull request, these can be made locally and pushed to your forked repository and branch. 113 | - Commits passing this stage will make it into the next release cycle for the given project. 114 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore (BUR) of IBM Cloud Private 2 | 3 | ## Introduction 4 | 5 | In this document, we will describe how to back up and restore your IBM Cloud Private (ICP) environment. Understanding some of the components and processes have changed, we have begun to denote versions in the effected steps. Currently we are providing backup process, procedures and suggestions for all nodes except Vulnerability Advisor (check back will be adding soon).. 6 | 7 | 8 | ### General Guidance on ICP Backup 9 | 10 | Consider the backup and recovery procedures to best meet your resilience requirements. Each implementation will have its own specific requirements and thus potentially its own procedures and best-practices. Possible recovery / failure scenarios should be rehearsed in your non-production environment to verify their validity. Each backup and recovery (BUR) solution will rely upon the enterprise for specific procedures and tooling to manage backups of the cluster nodes, their filesystems and persistent storage solution(s). 11 | 12 | When developing your plan, along side the standard infrastructure failure scenarios, consider the following possible node failures: Boot, Worker, Proxy, Management, Master in single Master topology, Master in multi-Master topology. Consider failure of your shared storage / persistent storage solution. Also, consider the possiblity of catastrophic failures such as multiple Masters and the entire cluster potentially including a DR declaration. 13 | 14 | Currently, since we do not require any data from **Worker Nodes** and **Proxy Nodes**, and we can simply recreate them from the command line, we **will not create backups of these nodes.** 15 | ### Notes About etcd 16 | 17 | ICP and Kubernetes rely heavily on etcd to store the Kubernetes and Calico configurations. According to the etcd documentation: (https://coreos.com/etcd/docs/latest/v2/admin_guide.html#disaster-recovery) 18 | 19 | > A user should avoid restarting an etcd member with a data directory from an out-of-date backup. Using an out-of-date data directory can lead to inconsistency as the member had agreed to store information via raft then re-joins saying it needs that information again. For maximum safety, if an etcd member suffers any sort of data corruption or loss, it must be removed from the cluster. Once removed the member can be re-added with an empty data directory. 20 | 21 | ### Notes About ICP Components 22 | 23 | In ICP there are several components that help maintain the state of Kubernetes and ICP components. We have taken care to make special note of each of these component stores: 24 | 25 | * etcd 26 | * Docker Registry 27 | * Audit Logs 28 | * Cloudant (ICP 2.1.0.2 and before) 29 | * MongoDB (ICP 2.1.0.3 and after) 30 | * MariaDB 31 | * certificates 32 | 33 | Based upon these components we recommend the following flow: 34 | 35 | ![flow](images/icp-backup-flow.png) 36 | 37 | > It is important to note that you will leverage the same best-practices you use elsewhere in your datacenter. The special procedures for backup of ICP compents are in addition to (and rely upon) these already proven techniques that must be in place. 38 | 39 | ## Backup and Recovery: Breaking it Down 40 | 41 | This guide segments the backup process into two logical super-steps: 42 | 43 | * Initial Backup: Backup of the entire cleanly installed environment post deplployment of the initial solution topology. This will be used as a basis for certain recovery scenarios. 44 | 45 | * Steady State: Specialized backup of individual ICP components. 46 | 47 | ## Cluster Procedures 48 | 49 | [Backup and restore the entire environment](docs/entire.md) 50 | 51 | [Backup and restore ICP components](docs/components.md) 52 | 53 | 54 | ## Managing Persistent Volumes 55 | 56 | [Back up and restore the Persistent Volumes](docs/pvs.md) 57 | 58 | 59 | ## Backlog and Notes 60 | 61 | [Procedures on our backlog, participation is invited](docs/some.md) 62 | 63 | 64 | ## Additional information 65 | 66 | * [How to restore a master node deployed to AWS](https://github.ibm.com/jkwong/icp-aws-hertz/blob/master/MasterNodeRecovery.md) 67 | 68 | * [Everything you ever wanted to know about using etcd with Kubernetes v1.6 (but were afraid to ask)](https://www.mirantis.com/blog/everything-you-ever-wanted-to-know-about-using-etcd-with-kubernetes-v1-6-but-were-afraid-to-ask/) 69 | 70 | * [OpenShift Backup and Restore](https://docs.openshift.com/container-platform/3.5/admin_guide/backup_restore.html#etcd-backup) 71 | 72 | * [Kubernetes Backups](https://kubernetes.io/docs/getting-started-guides/ubuntu/backups/) 73 | 74 | * [Using utilities to recover Tectonic clusters](https://coreos.com/tectonic/docs/latest/troubleshooting/bootkube_recovery_tool.html) 75 | -------------------------------------------------------------------------------- /docs/cloudant.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore the Cloudant Database in IBM Cloud Private (2.1.0.2 and Before) 2 | 3 | IBM Cloudant local datastore is used by IBM Cloud Private(ICP) to store information for OIDC service, metering service (IBM® Cloud Product Insights), Helm repository server, and Helm API server. It runs as a kubernetes Statefulset and mount to local host path. The StatefulSet is exposed as HeadLess service as “cloudantdb”. 4 | There are 8 databases in ICP: 5 | 6 | * _users 7 | * helm_repos 8 | * metrics 9 | * metrics_app 10 | * platform-db 11 | * security-data 12 | * stats 13 | * tgz\_files\_icp 14 | 15 | In this page, we'll describe how to back up and restore the Cloudant local db in IBM Cloud Private. 16 | 17 | ## Flow 18 | 19 | Here is the sequence of steps we will run to back up and validate the Cloudant databases. 20 | 21 | * Add data to the Cloudant database 22 | * Define a Persistent Volume 23 | * Create a Persistent Volume Claim 24 | * Back up Cloudant database 25 | * Simulate a loss (by deleting the data we added above) 26 | * Restore the Cloudant database 27 | * Validate the data is back 28 | 29 | ## Add data to the Cloudant database 30 | 31 | Cloudant holds different kinds of data for ICP, such as Helm repositories and metrics. 32 | 33 | Run the following procedure to add a new Helm repository to ICP: 34 | 35 | * Log on to the ICP UI 36 | * Click *Manage → Helm Repositories* 37 | * Click *Add Repository* 38 | * Copy one of the existing Repositories URL to your buffer 39 | * Add a new repository 40 | - name *test_cloudant* 41 | - URL: the value copied in the buffer 42 | * Click Add 43 | 44 | You will see the new repository in the list 45 | 46 | ## Define a Persistent Volume 47 | 48 | We will store the Cloudant backup as a Kubernetes Persistent Volume Claim (PVC), so that you can use the same procedure defined to back up the PVCs 49 | 50 | So we need first to create a Persistent Volume (or a Storage Class Provider), if there is none. 51 | Follow the guidelines for your environment to create a Persistent Volume 52 | 53 | ## Create a Persistent Volume Claim 54 | 55 | Run the following procedure the back up Cloudant to the PVC (assuming you are in the directory `icp-backup/scripts` 56 | 57 | ``` 58 | ./switchNamespace.sh kube-system 59 | ./createCloudantPVC.sh 60 | ``` 61 | 62 | You will see the following output: 63 | 64 | ``` 65 | patro:scripts edu$ ./createCloudantPVC.sh 66 | Creating Cloudant PVC... 67 | Error from server (NotFound): persistentvolumeclaims "cloudant-backup" not found 68 | persistentvolumeclaim "cloudant-backup" created 69 | ``` 70 | 71 | ## Back up Cloudant database 72 | 73 | Now we can back up the Cloudant database. Run the following procedure: 74 | 75 | ``` 76 | ./backupCloudant.sh 77 | ``` 78 | 79 | And you will see the following output: 80 | 81 | ``` 82 | patro:scripts edu$ ./backupCloudant.sh 83 | Deleting job 84 | Error from server (NotFound): jobs.batch "icp-cloudant-backup" not found 85 | Creating job 86 | job "icp-cloudant-backup" created 87 | Name: icp-cloudant-backup 88 | Namespace: default 89 | Image(s): patrocinio/icp-backup-cloudant-backup:latest 90 | Selector: controller-uid=31cc1adb-3439-11e8-aa21-067c83088870 91 | Parallelism: 1 92 | Completions: 1 93 | Start Time: Fri, 30 Mar 2018 18:41:28 +0200 94 | Labels: controller-uid=31cc1adb-3439-11e8-aa21-067c83088870 95 | job-name=icp-cloudant-backup 96 | Pods Statuses: 1 Running / 0 Succeeded / 0 Failed 97 | Volumes: 98 | cloudant-backup: 99 | Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) 100 | ClaimName: cloudant-backup 101 | ReadOnly: false 102 | Events: 103 | FirstSeen LastSeen Count From SubObjectPath Type Reason Message 104 | --------- -------- ----- ---- ------------- -------- ------ ------- 105 | 1s 1s 1 {job-controller } Normal SuccessfulCreate Created pod: icp-cloudant-backup-g7hjx 106 | ``` 107 | 108 | You can see the output of the Pod by running the following command: 109 | 110 | ``` 111 | kubectl logs -f 112 | ``` 113 | 114 | where is the Pod ID, as shown in the last time of the previous output. This Job takes a few minutes; at the end, you should see a message like this: 115 | 116 | ``` 117 | 2018-03-30T19:04:02.785Z couchbackup:backup Finished - Total document revisions written: 1 118 | ``` 119 | 120 | ## Simulate a loss 121 | 122 | Now we are going to simulate a loss in the Cloudant database. 123 | 124 | As we added the Helm repository *test_cloudant*, let's remove it, by following these steps in the ICP UI: 125 | 126 | * Click *Manage → Helm Repositories* 127 | * In the line containing, *test_cloudant*, select the Action menu, and click *Delete* 128 | * In the confirmation dialog, click *Delete* again 129 | 130 | You will see that the *test_cloudant* database disappears from the list. 131 | 132 | ## Restore the Cloudant database 133 | 134 | Let's now recover the Cloudant database from the backup. Run the following script to restore the Cloudant database: 135 | 136 | ``` 137 | ./restoreCloudant.sh 138 | ``` 139 | 140 | You will see the following output: 141 | 142 | ``` 143 | kubectl describe job $JOBpatro:scripts edu$ ./restoreCloudant.sh 144 | Deleting job 145 | Error from server (NotFound): jobs.batch "icp-cloudant-restore" not found 146 | Creating job 147 | job "icp-cloudant-restore" created 148 | Name: icp-cloudant-restore 149 | Namespace: kube-system 150 | Image(s): patrocinio/icp-backup-cloudant-backup:latest 151 | Selector: controller-uid=65f3c397-344e-11e8-aa21-067c83088870 152 | Parallelism: 1 153 | Completions: 1 154 | Start Time: Fri, 30 Mar 2018 21:13:15 +0200 155 | Labels: controller-uid=65f3c397-344e-11e8-aa21-067c83088870 156 | job-name=icp-cloudant-restore 157 | Pods Statuses: 1 Running / 0 Succeeded / 1 Failed 158 | Volumes: 159 | cloudant-backup: 160 | Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) 161 | ClaimName: cloudant-backup 162 | ReadOnly: false 163 | Events: 164 | FirstSeen LastSeen Count From SubObjectPath Type Reason Message 165 | --------- -------- ----- ---- ------------- -------- ------ ------- 166 | 7s 7s 1 {job-controller } Normal SuccessfulCreate Created pod: icp-cloudant-restore-8bwq8 167 | 4s 4s 1 {job-controller } Normal SuccessfulCreate Created pod: icp-cloudant-restore-lpb4c 168 | ``` 169 | 170 | Next, look at the Kubernetes Pod by running the following command: `kubectl logs -f ` where `` is the ID displayed in the last line of the previous output. 171 | 172 | ### Restoring a single Cloudant database 173 | 174 | Cloudant contains 8 databases, and some of them can have a huge amount of data. If you want to restore one single Cloudant database, you can run the following comment: `./restoreCloudant.sh ` where `` is one of the following databases: 175 | 176 | * _users 177 | * helm_repos 178 | * metrics 179 | * metrics_app 180 | * platform-db 181 | * security-data 182 | * stats 183 | * tgz\_files\_icp 184 | 185 | 186 | ## Validate the data is back 187 | 188 | Follow these steps to validate the data has been restored: 189 | 190 | * Log on to the ICP UI 191 | * Click *Manage → Helm Repositories* 192 | * Ensure the repository *test_cloudant* is back 193 | -------------------------------------------------------------------------------- /docs/cloudant/AddRepo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/AddRepo.png -------------------------------------------------------------------------------- /docs/cloudant/BackupRepo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/BackupRepo.png -------------------------------------------------------------------------------- /docs/cloudant/Icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/Icon.png -------------------------------------------------------------------------------- /docs/cloudant/NewRepositories.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/NewRepositories.png -------------------------------------------------------------------------------- /docs/cloudant/Repositories.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/Repositories.png -------------------------------------------------------------------------------- /docs/cloudant/SelectRepo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/SelectRepo.png -------------------------------------------------------------------------------- /docs/cloudant/UserName.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/cloudant/UserName.png -------------------------------------------------------------------------------- /docs/components.md: -------------------------------------------------------------------------------- 1 | # Back up and restore ICP components 2 | 3 | The most important (and detailed) step in the In this scenario is performing backups of the ICP components. It is vital to perform this backups in the spirit of this guide. Improperly taken backups may prove later to be useless. Useless backups may leave your cluster in a state that requires redeployment. 4 | 5 | [Back up and restore etcd](etcd.md) 6 | 7 | [Back up and restore Docker Registry](registry.md) 8 | 9 | [Back up and restore Cloudant 2.1.0.2 and earlier clusters only](cloudant.md) 10 | 11 | [Back up and restore MongoDB 2.1.0.3 and later clusters only](mongodb.md) 12 | 13 | These components must be backed up in the following order: 14 | 15 | * etcd 16 | * Docker Registry 17 | * Cloudant (2.1.0.2 and earlier) 18 | * MongoDB (2.1.0.3 and later) 19 | 20 | > When restoring our Master Node we will proceed in the opposite order. 21 | -------------------------------------------------------------------------------- /docs/entire.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore an Entire ICP Topology 2 | 3 | As described in the introduction, we don't recommend using a traditional server backup alone to persist an ICP environment after the cluster is put into use (because of components suck as the etcd datastore). 4 | 5 | However, a full environment backup might be required to quickly restore the environment (or a node / subset of nodes) to the initial state, with specific ICP component restores applied as documented at [Backup and Restore ICP Components](components.md) 6 | 7 | In order to follow all of the recommendations in this guide, it is assumed that you are able to have access to your cluster immediately post-install. 8 | 9 | ## Back up your ICP environment 10 | 11 | Here are the steps you should follow to take an initial backup your ICP environment. Keep in mind our guiding principle: **We do not take backups of nodes that we don't restore and just replace instead (Worker and Proxy).** 12 | 13 | ### Stop the ICP Servers (Virtual Machines) 14 | 15 | For the process used in this guide you do not need to stop the entire cluster all at once, but it is important to note a few items. When attempting to bring down an entire cluster always stop the Master nodes first otherwise they will begin rescheduling and attempting to recover. This is not desired when you are attempting to acheive an organized steady state. Once the Masters have been stopped you are free to proceed in any order you please. For cluster restart proceed in reverse and pring the Masters up once all other nodes have resumed. 16 | 17 | > Follow other best practices such as if you are downing worker nodes for some reason you will want to use the drain command prior to taking them offline. 18 | 19 | Stop kubelet first, Kubelet may attempt to restart Docker processes otherwise. 20 | 21 | ```sudo systemctl stop kubelet``` 22 | 23 | Next stop Docker: ```sudo systemctl stop docker``` 24 | 25 | Confirm that all processes are shutdown (be patient): ```top``` 26 | 27 | and that all related network ports are no longer in use: ```netstat –antp``` 28 | 29 | Once you have completed the other tasks for performing system maintenance or taking backups, to restart the cluster simply reboot the nodes (Masters Last). 30 | 31 | > Yes, some of the time you can actually start the processes explicitly as stated below, but this is a good opportunity to reaffirm that these systems will start on their own. Also, this team has seen much more consistent success via the shutdown -r now method. 32 | 33 | If you wish to restart without a reboot, start Docker first and then follow with kubelet: 34 | 35 | ```sudo start docker
``` 36 | 37 | Pause for a moment then: 38 | 39 | ```sudo start kubelet``` 40 | 41 | You can follow the logs for kubelet: ```sudo journalctl -e -u kubelet``` 42 | 43 | ### Taking an Infrustructure Level Backup of Your Cluster 44 | 45 | We recommend taking the backup immediately follwing the ICP installation. 46 | 47 | > In the case that you are performing an upgrade, post upgrade, follow this procedure for taking a cold backup once again. Retain both the post-upgrade and post-initial-install backups of the Master nodes. As a special note, if you have an HA cluster you should be able to accomplish the backup of the Master nodes without having an outage. Simply back them up one at a time. 48 | 49 | The tool to use for the backup depends on your hosting environment and accepted tools.. 50 | 51 | * For a VMware environment, you can use VMware snapshot, Veaam, IBM Spectrum Protect, or any other approved snapshot tool that allows you to store this snapshot in perpetuity (forever). 52 | * For a public cloud environment, use the backup solution preferred by the cloud provider. 53 | * For any other environment, you can always use the storage-provided mechanism for backup, or other solution that allows you to accurately recreate the original state of the infrastructure and build. 54 | 55 | ### Validate your Backup 56 | 57 | No backup is **good** until we test it by using it to successfully restore our cluster (or component thereof). 58 | 59 | Follow these steps to validate your backup: 60 | 61 | * Destroy the node (or nodes) via whichever means fits your potential / expected scenario 62 | * Follow the provided steps to restore what was destroyed in the previous step 63 | * Verify the validity of whatever was destroyed and restored 64 | 65 | > The fact that an ICP node is running is a good thing, but that does not necessarily mean your restoration was successful. In your non-production environments perform steps that force workload mobility. Verify that you Masters are able to behave like Masters, Proxies like Proxies, .... you get the idea. 66 | -------------------------------------------------------------------------------- /docs/etcd.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore etcd in IBM Cloud Private 2 | 3 | The backup process is the same whether you're running single master or multi-Master configurations of ICP. In both cases the backup is always taken from a single node, to ensure consistency upon restore. In the case of restore to a multi-node cluster, any of the nodes may be restored from the same backup. 4 | 5 | To back up and restore etcd, you must be logged into one of the etcd nodes (this will be the master unless you deployed an external etcd cluster). Clone this GitHub repository to the node running etcd, you will use it when performing the steps in this guide. 6 | 7 | ``` 8 | git clone https://github.com/ibm-cloud-architecture/icp-backup.git 9 | cd icp-backup/scripts 10 | ``` 11 | Next define an environment variable **endpoint** that points to a node running etcd: 12 | 13 | ``` 14 | export endpoint= 15 | ``` 16 | If you are simply testing the backup and restore process it is useful to have data to verify the validity of your process. Follow [Create some workloads in ICP](etcd_workload.md) to create a data-trail to follow. 17 | 18 | ## Backup Procedure for etcd 19 | 20 | From the etcd node run the following command from the cloned GitHub repository above: 21 | 22 | ``` 23 | ./backupEtcd.sh 24 | ``` 25 | 26 | If successful, you should receive output resembling the following: 27 | 28 | ``` 29 | root@eduardo-icp:~/icp-backup/scripts# ./backupEtcd.sh 30 | Current Time : your-date-and-time.38 31 | Back up to the following file: /data/etcd.your-date-and-time.db 32 | Snapshot saved at /data/etcd.your-date-and-timedb 33 | ``` 34 | 35 | This command generates a file (your backup) in the master node `/tmp` directory, using the current timestamp. Copy this file (`/tmp/etcd.your-date-and-time.db`) to a safe location, outside the node and in a location that is subject to backup. This backup should be kept in perpetuity (forever). 36 | 37 | If you are ready to restore your datastore from this (or another) backup, proceed to the relevant topic: 38 | * [Restore etcd on single master environment](etcd_restore_single.md) 39 | * [Restore etcd on multi-master environment](etcd_restore_multi.md) 40 | -------------------------------------------------------------------------------- /docs/etcd_restore_multi.md: -------------------------------------------------------------------------------- 1 | # Restore etcd for Multi-Master Node ICP Topology 2 | 3 | In a multi master ICP environment you'll need to first restore a consistent cluster. This can either be done via restoring a single node, and then growing the cluster out to the desired size, or by restoring the entire cluster from the same backup copy all at once. In this topic we will describe how to perform the full cluster restore. 4 | 5 | To reduce the effort required we will use ansible where possible to execute commands on all master nodes simultaneously. It is assumed that the ansible commands are run from the boot node (normally master1) which holds the cluster configuration files from the initial installation. The configuration files are typically held in `/opt/ibm-cloud-private-x.x.x/cluster` (replace x.x.x with your ICP version e.g 3.1.1). Adjust commands accordingly if your installation uses a different directory. 6 | 7 | Define the following environment variable, according to your installation: `export CLUSTER_DIR=/opt/ibm-cloud-private-x.x.x/cluster` 8 | 9 | ## Prerequisites Ansible and jq 10 | 11 | Ensure that Ansible is installed on the boot node: `which ansible` If this command returns an empty response, install ansible on this node. 12 | 13 | All Master Nodes also require the `jq` json parsing tool. you can ensure this tool is installed with the following command: 14 | ``` 15 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m package -a "use=auto name=jq state=present" 16 | ``` 17 | 18 | ## Stop Kubernetes on ALL Master Nodes 19 | 20 | Before restoring the data, we need to stop the etcd Pod. To ensure cluster consistency we will also shut down all other pods managed by hyperkube. In most deployments (ones where we have separate management servers) we also need to shut these down. 21 | ``` 22 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -a "mkdir -p /etc/cfc/podbackup" 23 | 24 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "mv /etc/cfc/pods/*.json /etc/cfc/podbackup" 25 | ``` 26 | 27 | Wait for etcd to be shut down on **all** nodes: 28 | ``` 29 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m wait_for -a "port=4001 state=stopped" 30 | ``` 31 | 32 | Once etcd has stopped, we will shut down kubelet running this command on all Master (and Management) nodes: 33 | 34 | ``` 35 | ansible master,management -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m service -a "name=kubelet state=stopped" 36 | ``` 37 | 38 | Once kubelet has stopped, restart the docker service to ensure all pods not managed by kubelet are shut down. 39 | ``` 40 | ansible master,management -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m service -a "name=docker state=restarted" 41 | ``` 42 | 43 | 44 | ## Purge, Copy and Restore etcd Data 45 | 46 | Next, **purge** the current etcd data on all Master Nodes: 47 | ``` 48 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "rm -rf /var/lib/etcd /var/lib/etcd-wal/wal" 49 | ``` 50 | 51 | Copy etcd snapshot to all Master Nodes. Assuming you have the file `/tmp/etcd.your-date-and-time.db` in your environment, containing a backup of your etcd, run the following procedure to copy the file to all master nodes: 52 | ``` 53 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m copy -a "src=/tmp/etcd.your-date-and-time.db dest=/tmp/snapshot.db" 54 | ``` 55 | 56 | Following the purge, restore the snapshot on all Master Nodes. Assuming you have cloned the git repo, and your current directory is located in `icp-backup/scripts`, run the following command: 57 | ``` 58 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m script -a "./multimaster-etcd-restore.sh" 59 | ``` 60 | 61 | The commands above loads the data to directory /var/lib/etcd/restored on each of your Master Nodes, with the cluster settings configured. Assuming this command was successful, we need now to move to expected directory, by running the following commands: 62 | 63 | ``` 64 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "mv /var/lib/etcd/restored/* /var/lib/etcd/" 65 | 66 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "mv /var/lib/etcd/member/wal /var/lib/etcd-wal/wal" 67 | ``` 68 | 69 | Before we re-enable kubelet and etcd with the newly restored data, we will purge kubelet pods directory to ensure consistency between the cached kubelet data and the etcd data. We use a simple script to ensure that all docker mounts are unmounted before purging the pods directory. In deployments where we have management nodes, we'll also need to run the following: 70 | 71 | ``` 72 | ansible master,management -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m script -a "./purge_kubelet_pods.sh" 73 | ``` 74 | Finally, re-enable both **kubelet** and the **etcd** pod. 75 | 76 | With the etcd cluster data restored, we can re-enable kubelet and instruct it to start the etcd cluster. Run the following commands: 77 | 78 | ``` 79 | ansible master,management -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m service -a "name=kubelet state=started" 80 | 81 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "mv /etc/cfc/podbackup/etcd.json /etc/cfc/pods" 82 | ``` 83 | 84 | It will take a few seconds for etcd to come back. We can use ansible to monitor the progess: 85 | 86 | ``` 87 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m wait_for -a "port=4001 state=started" 88 | ``` 89 | 90 | ## Validate etcd Cluster Health 91 | 92 | To setup the etcdctl tool to query the etcd cluster, run the following commands, adjusting the IP address for the current node you're working on: `export endpoint=`` 93 | 94 | Then run the following scripts: `. ./etcd.sh` 95 | 96 | To query the cluster health, run this command: `$etcdctl2 cluster-health` 97 | 98 | You should see a response similar to the following: 99 | 100 | ``` 101 | member 8211f1d0f64f3269 is healthy: got healthy result from https://10.0.0.1:2380 102 | member 91bc3c398fb3c146 is healthy: got healthy result from https://10.0.0.2:2380 103 | member fd422379fda50e48 is healthy: got healthy result from https://10.0.0.3:2380 104 | cluster is healthy 105 | ``` 106 | 107 | #### Start the Remaining ICP Cluster Pods 108 | 109 | Now that etcd is restored to a healthy state, let **kubelet** start the rest of the core kubernetes pods, which in turn will start the workloads managed by kubernetes. 110 | ``` 111 | ansible master -i $CLUSTER_DIR/hosts -e @$CLUSTER_DIR/config.yaml --private-key=$CLUSTER_DIR/ssh_key -m shell -a "mv /etc/cfc/podbackup/*.json /etc/cfc/pods" 112 | ``` 113 | 114 | It will likely take several minutes for all pods to be restarted. Monitor the pods in the `kube-system` namespace by running: `kubectl get pods --namespace=kube-system` 115 | 116 | # Validating the Results 117 | 118 | Next let's validate that the new environment has the data restored in etcd. Run the following command to display the ConfigMaps from Kubernetes: `kubectl get configmaps | grep snake` 119 | 120 | If you loaded our sample before starting the exercise you will see the below listing. If you did not, you **should** see whichever ConfigMaps were part of your system upon the time your backup was taken. 121 | 122 | ``` 123 | root@icp-master:~# kubectl get configmaps | grep snake 124 | snake-10 1 48m 125 | snake-11 1 48m 126 | snake-12 1 48m 127 | snake-13 1 48m 128 | snake-14 1 48m 129 | snake-15 1 48m 130 | snake-16 1 48m 131 | snake-17 1 48m 132 | snake-18 1 48m 133 | snake-19 1 48m 134 | snake-20 1 48m 135 | snake-21 1 48m 136 | snake-22 1 48m 137 | snake-23 1 48m 138 | snake-24 1 48m 139 | snake-25 1 48m 140 | snake-26 1 48m 141 | snake-8 1 48m 142 | snake-9 1 48m 143 | ``` 144 | 145 | Congratulations! You restored successfully your etcd! 146 | -------------------------------------------------------------------------------- /docs/etcd_restore_single.md: -------------------------------------------------------------------------------- 1 | ## Restoring etcd For Single 2 | 3 | Follow the instructions here to restore your etcd datastore to one of the Master Nodes. 4 | 5 | > Note these instructions do not yet include the process for restoring etcd for topologies that have externalized the etcd cluster. However, you can derive both the backup and restore processes from the tools and steps followed here. Use the same principles for both backup and recovery. As with other scenarios rehearsing the process is required. We currently have this scenario in our backlog and invite contributors. 6 | 7 | It is assumed you are using the restore process for one of the following reasons: 8 | 9 | * You have recovered the initial environment as described in the procedure [Backup and restore the entire environment](entire.md) because you no longer have a Master Node with a clean state. 10 | * You are recovering a Master Node for a single Master Node cluster 11 | * You are recovering a Master Node in a multi-Master Node environment that requires you to manually restore the initial state etcd (to accommodate your backup tool strategy / methodology) 12 | 13 | ### Etcd Restore on Single Master ICP Topology 14 | 15 | Before restoring etcd, we need to stop the etcd Pod using the following command: 16 | 17 | ``` 18 | mkdir -p /etc/cfc/podbackup 19 | mv /etc/cfc/pods/etcd.json /etc/cfc/podbackup/ 20 | ``` 21 | 22 | Verify the pod has indeed stopped by running the following command: `docker ps | grep etc` 23 | 24 | If the pod has successfully been stopped you will see nothing returned. 25 | 26 | Next, we need to purge the current etcd data by running the following command: `rm -rf /var/lib/etcd /var/lib/etcd-wal/wal` 27 | 28 | Using your backup file `/tmp/etcd.your-date-and-time.db` from your clusters earlier backup run the following procedure to restore etcd: `./restoreEtcd.sh etcd.your-date-and-time.db` 29 | 30 | You should see the following response: 31 | 32 | ``` 33 | root@eduardo-icp:~/icp-backup/scripts# ./restoreEtcd.sh etcd.your-date-and-time.db 34 | Restore snapshot etcd.your-date-and-time.db 35 | your-date-and-time I | mvcc: restore compact to **your size value here** 36 | your-date-and-time I | etcdserver/membership: added member **the ID for the memeber** [https://169.61.93.24:2380] to cluster **your cluster id**``` 37 | ``` 38 | 39 | The command above loads the data to directory `/var/lib/etcd/restored`. 40 | 41 | Next you must move the data to the expected directory by running the following commands: 42 | ``` 43 | mv /var/lib/etcd/restored/* /var/lib/etcd/ 44 | mv /var/lib/etcd/member/wal /var/lib/etcd-wal/wal 45 | rmdir /var/lib/etcd/restored 46 | ``` 47 | 48 | After successfully performing the previous steps you are ready to once again enable the etcd pod. Do so by running the following command: `mv /etc/cfc/podbackup/etcd.json /etc/cfc/pods/` 49 | 50 | Depending on your environment it will likely take a few seconds (to a few minutes) for etcd to become live. You can see the progress by running the following command: `docker ps | grep` 51 | 52 | 53 | Eventually (it might take a few minutes), you should see a response similar to the following: 54 | ``` 55 | root@icp-master:~# docker ps | grep etcd 56 | 999c8e48c0e3 ibmcom/etcd "etcd --name=etcd0 -…" About a minute ago Up About a minute k8s_etcd_k8s-etcd-10.0.0.1_kube-system_349da84ef01d46f51daacdd97b2991e1_0 57 | 747287ff5b4f ibmcom/pause:3.0 "/pause" About a minute ago Up About a minute k8s_POD_k8s-etcd-10.0.0.1_kube-system_349da84ef01d46f51daacdd97b2991e1_0 58 | ``` 59 | 60 | Next let's validate that the new environment has the data restored in etcd. Run the following command to display the ConfigMaps from Kubernetes: `kubectl get configmaps | grep snake` 61 | 62 | If you loaded our sample before starting the exercise you will see the below listing. If you did not, you **should** see whichever ConfigMaps were part of your system upon the time your backup was taken. 63 | 64 | ``` 65 | root@icp-master:~# kubectl get configmaps | grep snake 66 | snake-10 1 48m 67 | snake-11 1 48m 68 | snake-12 1 48m 69 | snake-13 1 48m 70 | snake-14 1 48m 71 | snake-15 1 48m 72 | snake-16 1 48m 73 | snake-17 1 48m 74 | snake-18 1 48m 75 | snake-19 1 48m 76 | snake-20 1 48m 77 | snake-21 1 48m 78 | snake-22 1 48m 79 | snake-23 1 48m 80 | snake-24 1 48m 81 | snake-25 1 48m 82 | snake-26 1 48m 83 | snake-8 1 48m 84 | snake-9 1 48m 85 | ``` 86 | 87 | Congratulations! You restored successfully your etcd! 88 | -------------------------------------------------------------------------------- /docs/etcd_workload.md: -------------------------------------------------------------------------------- 1 | # Create some workloads in ICP 2 | If you are performing these steps in a lab or non-production environment it may be useful to give your etcd some personality before running this test. WE can do that by loading some data in the form of ConfigMaps. 3 | 4 | This script creates and deletes config maps continuously: `./createConfigMaps.sh` 5 | 6 | If you leave this script running for a while, you will see many ConfigMaps created: 7 | 8 | ``` 9 | root@icp-master:~/icp-backup/scripts# kubectl get configmaps | grep snake 10 | snake-0 1 9m 11 | snake-1 1 9m 12 | snake-2 1 9m 13 | snake-3 1 9m 14 | snake-4 1 9m 15 | snake-85 1 9m 16 | snake-86 1 9m 17 | snake-87 1 9m 18 | snake-88 1 9m 19 | snake-89 1 9m 20 | snake-90 1 9m 21 | snake-91 1 9m 22 | snake-92 1 9m 23 | snake-93 1 9m 24 | snake-94 1 9m 25 | snake-95 1 9m 26 | snake-96 1 9m 27 | snake-97 1 9m 28 | snake-98 1 9m 29 | snake-99 1 9m 30 | ``` 31 | Ok ... **kill it** and move on to the backup. 32 | -------------------------------------------------------------------------------- /docs/images/ark/ark_completion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/ark_completion.png -------------------------------------------------------------------------------- /docs/images/ark/ark_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/ark_flow.png -------------------------------------------------------------------------------- /docs/images/ark/ct_quote.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/ct_quote.png -------------------------------------------------------------------------------- /docs/images/ark/icos_create_bucket.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icos_create_bucket.png -------------------------------------------------------------------------------- /docs/images/ark/icos_service_credentials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icos_service_credentials.png -------------------------------------------------------------------------------- /docs/images/ark/icp_client_config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icp_client_config.png -------------------------------------------------------------------------------- /docs/images/ark/icp_create_namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icp_create_namespace.png -------------------------------------------------------------------------------- /docs/images/ark/icp_create_pv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icp_create_pv.png -------------------------------------------------------------------------------- /docs/images/ark/icp_create_pvc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/docs/images/ark/icp_create_pvc.png -------------------------------------------------------------------------------- /docs/mongodb.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore the MongoDB Database in IBM Cloud Private (2.1.0.3 and Newer) 2 | 3 | IBM MongoDB datastore is used by IBM Cloud Private(ICP) to store information for OIDC service, metering service (IBM® Cloud Product Insights), Helm repository server, and Helm API server and more. It runs as a Kubernetes statefulset **icp-mongodb** on the Master Nodes. If you inspect your cluster you will notice the pods in this statefulset named **icp-mongodb-(increment)** that run one per each master and mount storage to local host path. The StatefulSet is exposed as a service as “mongodb”. 4 | 5 | 6 | ## Topic Overview 7 | 8 | In this topic, we describe how to perform a backup and restore on this MongoDB instance in IBM Cloud Private. You may also use these techniques to take a backup any MongoDB instance running in your cluster. The steps included are as follows: 9 | 10 | * (Optional) Load data into the sample MongoDB 11 | * Perform a MongoDB backup 12 | * (Optional) Simulate data loss 13 | * Restore a MongoDB database 14 | * Perform data Validation 15 | 16 | Before going forward please NOTE: If using an ICP version prior to 3.1.1 `--sslCAFile /data/configdb/tls.crt` should be `--sslCAFile /ca/tls.crt` when using `mongo` `mongodump` or `mongorestore` commands. 17 | ### (Optional) Load data into the sample MongoDB 18 | 19 | Load some data into this database. First run the following command to connect: 20 | 21 | ```kubectl exec -n kube-system -it icp-mongodb-0 -- sh -c 'mongo --host rs0/mongodb:27017 --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /data/configdb/tls.crt --sslPEMKeyFile /work-dir/mongo.pem'``` 22 | 23 | You will be directed to the MongoDB CLI prompt. Run the following commands to load some data: 24 | ``` 25 | db.myCollection.insertOne({ key1: "value1" }); 26 | db.myCollection.insertOne({ key2: "value2" }); 27 | ``` 28 | 29 | Next, run the following command to retrieve the values: 30 | 31 | `db.myCollection.find()` 32 | 33 | ## Backup MongoDB 34 | MongoDB provides a tool that we will leverage for backup called **mongodump**. 35 | 36 | Backup data can be dumped to a persistent volume or to local filesystem of the master node. 37 | 38 | ### Dump backup onto local filesystem 39 | 40 | Run the following command to dump to the master node's filesystem. This will create a dump of all the databases at /var/lib/icp/mongodb/work-dir/backup/mongodump. 41 | 42 | ```kubectl -n kube-system exec icp-mongodb-0 -- sh -c 'mkdir -p /work-dir/Backup/mongodump; mongodump --oplog --out /work-dir/Backup/mongodump --host rs0/mongodb:27017 --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /data/configdb/tls.crt --sslPEMKeyFile /work-dir/mongo.pem' ``` 43 | 44 | Backup data can then be archived with a timestamp and moved elsewhere. 45 | 46 | ### Dump backup onto a Persistent Volume 47 | 48 | Run the following commands to dump to a PV. The *mongodump-pv.yaml*, *mongodump-pvc.yaml*, *icp-mongodb-mongodump-job.yaml*, and *icp-mongodb-mongorestore-job.yaml* files can be found in `icp-backup/resources` of this repository. 49 | 50 | First, we need to create a PV. If you are going this route, you should consult kubernetes doc on how to create the PV you are looking for. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistent-volumes 51 | 52 | For this example, we already created an NFS directory and added the nfs server ip and directory to the `mongodump-pv.yaml`. We will create our PV with the following command: 53 | 54 | ``` 55 | kubectl apply -f mongodump-pv.yaml 56 | ``` 57 | 58 | We then need to create a Persistent Volume Claim, which our Jobs can use to get access to the PV, by running the following command: 59 | 60 | 61 | ``` 62 | kubectl apply -f mongodump-pvc.yaml 63 | ``` 64 | 65 | Run the following command to dump the MongoDB database: 66 | 67 | ``` 68 | kubectl apply -f icp-mongodb-mongodump-job.yaml 69 | ``` 70 | 71 | This Kubernetes job will dump the MongoDB databases into the persistent volume created above. If this is your ICP cluster backup, make certain this PV is being secured, backed up, and saved. You will need the contents to perform a restore at a future date. 72 | 73 | ## (Optional) Simulate data loss in MongoDB 74 | For proving out your technique, simulate some data loss in MongoDB by deleting the data inserted from the optional step previously described. Exec into your MongoDB pod. Run the following commands to delete one key: 75 | 76 | ```kubectl exec -n kube-system -it icp-mongodb-0 -- sh -c 'mongo --host rs0/mongodb:27017 --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /data/configdb/tls.crt --sslPEMKeyFile /work-dir/mongo.pem'``` 77 | 78 | ```db.myCollection.deleteOne ({ key1: "value1" });``` 79 | 80 | If you run: `db.myCollection.find()` you will see there is a single document in the collection. 81 | 82 | ## Restore the MongoDB Database 83 | 84 | ### Restore backup from local filesystem 85 | 86 | In the dump instructions, you dumped the mongoDB database into /var/lib/icp/mongodb/work-dir/backup/mongodump and presumably archived and moved it else where. To restore it, you need to move that archive back into /var/lib/icp/mongodb/work-dir/backup/mongodump, unarchive it and run the mongorestore command. 87 | 88 | Run the following to restore data saved to the master node's filesystem: 89 | 90 | ```kubectl -n kube-system exec icp-mongodb-0 -- sh -c 'mongorestore --host rs0/mongodb:27017 --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /data/configdb/tls.crt --sslPEMKeyFile /work-dir/mongo.pem /work-dir/Backup/mongodump'``` 91 | 92 | ### Restore backup from a Persistent Volume 93 | 94 | Run the following to restore data saved to a persistent volume. 95 | 96 | To restore the MongoDB database, run the following command: 97 | ```kubectl apply -f icp-mongodb-mongorestore-job.yaml``` 98 | 99 | ## (Optional) Validate the data has been restored 100 | 101 | From **within** in the MongoDB CLI Pod, run the following commands: 102 | 103 | ```kubectl exec -n kube-system -it icp-mongodb-0 -- sh -c 'mongo --host rs0/mongodb:27017 --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /data/configdb/tls.crt --sslPEMKeyFile /work-dir/mongo.pem'``` 104 | 105 | `db.myCollection.find()` 106 | 107 | You should see the both key value pairs. 108 | -------------------------------------------------------------------------------- /docs/pvs.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore the Persistent Volumes 2 | 3 | For the backup of Persistent Volumes develop procedures using the tools most suited (and potentially developed for) each storage solution. Include in-host backups for nodes in your solution that host path persistent volumes. If you use these, develop a standard volume on each node that can easily be identified and quarantined from the rest of the node. In the case of vSphereVolume plugin storage, verify with your VMware administrator that these vmdk's are indeed part of their usual backp / restore process (as they may be created dynamically and around typical processes). For GlusterFS, not only should you include backup of infrastructure and storage volumes, take care to ensure you have regular backups of the Heketi database (this should be part of best-practice Heketi / GlusterFS setup). Your enterprise likely has a solution suited for the NFS server, take care not to create a process / environment that is outside of these processes. 4 | -------------------------------------------------------------------------------- /docs/registry.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore the Docker Registry in IBM Cloud Private 2 | 3 | This topic covers backup and restore of the Docker Registry in IBM Cloud Private. If you do not have any images loaded in your registry, it may be useful to load one for testing purposes: 4 | 5 | ## Add an image in the ICP Docker Registry 6 | 7 | First follow the steps at [Configuring authentication for the Docker CLI](https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/manage_images/configuring_docker_cli.html) to configure authentication. Run the following commands in any machine that has access to the ICP master node and has Docker engine installed. 8 | 9 | Next, pull an nginx image: `docker pull nginx` 10 | 11 | You should see the output resembling the following: 12 | ```text 13 | patro:icp-backup edu$ docker pull nginx 14 | Using default tag: latest 15 | latest: Pulling from library/nginx 16 | 8176e34d5d92: Pull complete 17 | 5b19c1bdd74b: Pull complete 18 | 4e9f6296fa34: Pull complete 19 | Digest: sha256:4771d09578c7c6a65299e110b3ee1c0a2592f5ea2618d23e4ffe7a4cab1ce5de 20 | Status: Downloaded newer image for nginx:latest 21 | ``` 22 | 23 | Log in to your Docker Registry: `docker login mycluster.icp:8500` 24 | 25 | Provide the admin user and password when prompted. Tag the image, by running the following command: `docker tag nginx mycluster.icp:8500/default/nginx` 26 | 27 | Finally, push the image to the Docker Registry: `docker push mycluster.icp:8500/default/nginx` 28 | 29 | You will see output similar to the following: 30 | ``` 31 | patro:.docker edu$ docker push mycluster.icp:8500/default/nginx 32 | The push refers to repository [mycluster.icp:8500/default/nginx] 33 | e89b70d28795: Pushed 34 | 832a3ae4ac84: Pushed 35 | 014cf8bfcb2d: Pushed 36 | latest: digest: sha256:600bff7fb36d7992512f8c07abd50aac08db8f17c94e3c83e47d53435a1a6f7c size: 948 37 | ``` 38 | 39 | Finally, open your browser to: `https://$MASTER_ID:8443/console/images` 40 | 41 | You will see the nginx image listed. 42 | 43 | ## Back up the ICP Docker Registry 44 | 45 | Now that there are images loaded into the ICP Docker Registry, lets perform a backup by running the following command from one of the master nodes: 46 | 47 | ``` 48 | cd /var/lib/registry 49 | tar czvf /tmp/icp_dr.tar.gz . 50 | ``` 51 | 52 | Now move the file `/tmp/icp_dr.tar.gz` to a safe location, outside the master node. 53 | 54 | > If this process is automated, which is a good idea, move this file to a location that is included in either your shared storage or in-host backup routine. 55 | 56 | ## Simulating a Loss to the Docker Registry 57 | 58 | In the case that you do not have a actual recovery situation, you can simulate a loss to the Docker Registry. To do so simply delete the files under /var/lib/registry: `rm -rf /var/lib/registry/*` 59 | 60 | If you open your browser to: `https://$MASTER_ID:8443/console/images` You will see an empty response. 61 | 62 | ### Restore your ICP Docker Registry 63 | 64 | To restore your Docker Registry, bring back to file `/tmp/icp_dr.tar.gz` to directory `/tmp` and run the following commands: 65 | 66 | ``` 67 | cd /var/lib/registry 68 | tar xvzf /tmp/icp_dr.tar.gz 69 | ``` 70 | 71 | Next, run the following command to recycle the image manager Pod: 72 | 73 | ``` 74 | kubectl delete pod image-manager-0 -n kube-system 75 | ``` 76 | 77 | If you re-open the URL `https://$MASTER_ID:8443/console/images`, you should see the images restored. 78 | -------------------------------------------------------------------------------- /docs/some.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore Certain ICP Nodes 2 | 3 | ### Validate the Environment Works with just 2 Master Nodes 4 | 5 | We are looking for specific procedures / recordings / practices to prove out the destruction of a Master Node, evidence of Master Node fail-over, continued operation of the cluster and finally detailed restoration of the failed Master. All of this in multi-Master Node clusters. We would like to see this performed on multiple hypervisors, public clouds and baremetal. 6 | 7 | ### Validate the ICP Components 8 | 9 | This includes showing health of each ICP component and how to recognize health in customer environments. This is a good opportunity to share a dashboard from Kibana and perhaps Grafana. 10 | 11 | ## Note on Proxy Nodes 12 | 13 | It is recommended to backup and changes made to Proxy Node configuration, however we do not recommend backup and restore of the proxy nodes themselves. These nodes can be recreated from command line. 14 | 15 | 16 | ## Destroy a Worker Node 17 | 18 | We don't backup and restore Worker Nodes, but we do keep track and manage backups of persistent storage for stateful workload. We should design a test of a Worker Node going "belly-up" and seeing the PV relaease and follow the workload as it finds another Worker Node. We should attempt to identify (programitically) the death of a Worker Node and trigger the deletion and creation of an identidal node keeping in mind taints, hostgroups and the like. 19 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: ICP cloudant backup utility deployed as a Kubernetes CronJob running on a master node in an ICP cluster. 3 | name: icp-cloudant-backup 4 | version: 0.3.0 5 | keywords: 6 | - ICP 7 | - Cloudant 8 | - Backup 9 | maintainers: # (optional) 10 | - name: "Peter Van Sickel" 11 | email: "pvs@us.ibm.com" 12 | appVersion: 0.3.0 13 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/README.md: -------------------------------------------------------------------------------- 1 | # ICP Cloudant Backup Helm Chart 2 | 3 | The Cloudant backup helm chart deploys the ICP Cloudant backup utility to an ICP cluster. 4 | 5 | Two cron jobs get deployed: 6 | - icp-cloudant-backup 7 | - icp-cloudant-backup-cleanup 8 | 9 | # ICP Cloudant Backup 10 | 11 | The ICP Cloudant backup cronjob, takes a backup of the ICP Cloudant databases. Certain databases may be excluded from the backup based on the names provided with the `--exclude` parameter as specified using the `.Values.backup.args.exclude` value. 12 | 13 | The backup schedule is set using `.Values.backup.cronjob.schedule` value. 14 | 15 | # ICP Cloudant Backup Cleanup 16 | 17 | The backup cleanup cronjob deletes any directories above the retention count. 18 | 19 | The cleanup schedule is set using the `.Values.cleanup.cronjob.schedule` 20 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "icp-cloudant-backup.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "icp-cloudant-backup.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "icp-cloudant-backup.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/templates/backup-cleanup-cronjob.yaml: -------------------------------------------------------------------------------- 1 | # Note: v2alpha1 also appears to be available. 2 | # Good article on Kubernetes cronjob: https://chrisshort.net/kubernetes-cron-jobs/ 3 | apiVersion: batch/v1beta1 4 | kind: CronJob 5 | metadata: 6 | name: icp-cloudant-backup-cleanup 7 | spec: 8 | # Note: Kubernetes uses UTC time for scheduling. 9 | schedule: {{ .Values.cleanup.cronjob.schedule }} 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | restartPolicy: OnFailure 15 | containers: 16 | - name: icp-cloudant-backup-cleanup 17 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 18 | imagePullPolicy: "{{ .Values.image.pullPolicy }}" 19 | command: [ "/backup-cleanup.sh" ] 20 | args: [ "--backup-home", "{{ .Values.common.args.backupHome }}", 21 | "--retain", "{{ .Values.cleanup.args.retain }}" ] 22 | volumeMounts: 23 | - name: data 24 | mountPath: {{ .Values.common.args.backupHome }} 25 | volumes: 26 | - name: data 27 | {{- if .Values.persistence.enabled }} 28 | persistentVolumeClaim: 29 | claimName: icp-cloudant-backup-pvc 30 | {{- else }} 31 | emptyDir: {} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/templates/backup-storage-pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and ( .Values.persistence.enabled ) (not .Values.persistence.pvc_exists) }} 2 | # Backup PVC template 3 | # Picked up the start for this template here: 4 | # https://github.com/mdn/helm-charts/blob/master/mysql/templates/pvc.yaml 5 | # 6 | kind: PersistentVolumeClaim 7 | apiVersion: v1 8 | metadata: 9 | name: icp-cloudant-backup-pvc 10 | annotations: 11 | # Don't delete the PVC via helm delete. 12 | "helm.sh/resource-policy": keep 13 | labels: 14 | app: icp-cloudant-backup 15 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 16 | release: "{{ .Release.Name }}" 17 | heritage: "{{ .Release.Service }}" 18 | annotations: 19 | {{- if .Values.persistence.storageClass }} 20 | volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }} 21 | {{- else }} 22 | volume.alpha.kubernetes.io/storage-class: default 23 | {{- end }} 24 | spec: 25 | accessModes: 26 | - {{ .Values.persistence.accessMode | quote }} 27 | resources: 28 | requests: 29 | storage: {{ .Values.persistence.size | quote }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/templates/icp-cloudant-backup-cronjob.yaml: -------------------------------------------------------------------------------- 1 | # Note: v2alpha1 also appears to be available. 2 | # Good article on Kubernetes cronjob: https://chrisshort.net/kubernetes-cron-jobs/ 3 | apiVersion: batch/v1beta1 4 | kind: CronJob 5 | metadata: 6 | name: icp-cloudant-backup 7 | spec: 8 | # Note: Kubernetes uses UTC time for scheduling. 9 | schedule: {{ .Values.backup.cronjob.schedule }} 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | restartPolicy: OnFailure 15 | containers: 16 | - name: icp-cloudant-backup 17 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 18 | imagePullPolicy: "{{ .Values.image.pullPolicy }}" 19 | command: [ "/cloudant-backup.sh" ] 20 | args: [ "--dbhost", "{{ .Values.common.args.dbhost }}", 21 | "--backup-home", "{{ .Values.common.args.backupHome }}", 22 | "--exclude", "{{ .Values.backup.args.exclude }}" ] 23 | volumeMounts: 24 | - name: icp-cloudant-backup 25 | mountPath: {{ .Values.common.args.backupHome }} 26 | volumes: 27 | - name: icp-cloudant-backup 28 | {{- if .Values.persistence.enabled }} 29 | persistentVolumeClaim: 30 | claimName: icp-cloudant-backup-pvc 31 | {{- else }} 32 | emptyDir: {} 33 | {{- end -}} 34 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/templates/icp-cloudant-restore-job.yaml: -------------------------------------------------------------------------------- 1 | # Note: v2alpha1 also appears to be available. 2 | # Restore the ICP Cloudant databases 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: icp-cloudant-restore 7 | spec: 8 | template: 9 | spec: 10 | restartPolicy: Never 11 | containers: 12 | - name: icp-cloudant-restore 13 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 14 | imagePullPolicy: "{{ .Values.image.pullPolicy }}" 15 | command: [ "/cloudant-restore.sh" ] 16 | args: [ "--dbhost", "{{ .Values.common.args.dbhost }}", 17 | "--backup-home", "{{ .Values.common.args.backupHome }}" 18 | ] 19 | volumeMounts: 20 | - name: data 21 | mountPath: {{ .Values.common.args.backupHome }} 22 | volumes: 23 | - name: data 24 | {{- if .Values.persistence.enabled }} 25 | persistentVolumeClaim: 26 | claimName: icp-cloudant-backup-pvc 27 | {{- else }} 28 | emptyDir: {} 29 | {{- end -}} 30 | -------------------------------------------------------------------------------- /helm-charts/icp-cloudant-backup/values.yaml: -------------------------------------------------------------------------------- 1 | # See article on Kubernetes cronjob: https://chrisshort.net/kubernetes-cron-jobs/ 2 | # For cronjob schedule string just search the Internet for the schdule 3 | # you are interested in, e.g,. cron job every 12 hours 4 | # NOTE: Kubernetes uses UTC time for scheduling. 5 | 6 | # Args common to backup, cleanup, etc 7 | common: 8 | args: 9 | dbhost: cloudantdb.kube-system 10 | backupHome: /data/backups 11 | 12 | backup: 13 | # cloudant-backup.sh command args 14 | args: 15 | exclude: "metrics metrics_app" 16 | cronjob: 17 | # Every noon, midnight (UTC): "0 */12 * * *" 18 | # Every hour: "0 * * * *" 19 | schedule: "0 * * * *" 20 | 21 | cleanup: 22 | args: 23 | # Use retain to specify the number of backups to retain. 24 | retain: 3 25 | cronjob: 26 | # Every noon, midnight (UTC) 27 | schedule: "0 */12 * * *" 28 | 29 | 30 | ## Persist data to a persistent volume 31 | persistence: 32 | # If the PVC has already been created then set pvc_exists to true. 33 | # TODO: Investigate way to inject detection of the PVC at deployment. 34 | pvc_exists: true 35 | enabled: true 36 | ## If defined, volume.beta.kubernetes.io/storage-class: 37 | ## Default: volume.alpha.kubernetes.io/storage-class: default 38 | ## 39 | storageClass: 40 | accessMode: ReadWriteOnce 41 | size: 5Gi 42 | 43 | replicaCount: 1 44 | 45 | image: 46 | pullPolicy: Always 47 | tag: latest 48 | repository: mycluster.icp:8500/default/ibmcase/icp-cloudant-backup 49 | 50 | # TBD: Run icp-cloudant-backup on one of the master nodes. 51 | nodeSelector: 52 | role: master 53 | 54 | job: 55 | backoffLimit: 3 56 | -------------------------------------------------------------------------------- /images/icp-backup-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/images/icp-backup-flow.png -------------------------------------------------------------------------------- /images/icp-backup-flow.xml: -------------------------------------------------------------------------------- 1 | 7Vrfk5owEP5reO0QIqiP1dr2Hjq9GR96fUwhAnMhYUI8tX99ExNAiJ4/jqo3Bw+abDabzX7fhgV14DRbf+MoT36wCBPHc6O1A784ngcAcOWXkmy0ZDQItCDmaWSUasE8/YuN0MyLl2mEi4aiYIyING8KQ0YpDkVDhjhnq6bagpHmqjmKsSWYh4jY0l9pJJJyX65bD3zHaZyYpUe+GfiDwueYsyU16zkeXGwvPZyh0pbRLxIUsdWOCM4cOOWMCd3K1lNMVGzLsOl5Xw+MVn5zTMUpEzw94QWRpdn6Ay0E2uo8TB+Nk2JTBmaVpALPcxSq/kqC78BJIjIie0A2CfqDyaSKwZQRxuUQZVTqTwrB2TMuhTI07vaSI4uUkJbyglFhiAGg6e+bikgaUykL5Y6xHJxsV8Zqf2o4JqgoTDtkWRqatvalBFfGfWIigbnA64PRBBVGkvuYZVjwjVQxEwYGVcN6UKK8qjk0MqJkhz2BkSHD2rgyXCMnGwa8/UBCC8i5YHmJojtlWS7jSkXhKKcClCnk9KeUKMiWSntGX1LOaKY2LB2SNJYW6IIjGa9lKJYc95Q4ixIgOM6JSqdrUgwsUvzMMUcC99l9AZQwADdLb99CskpZO8F7VM85s0fHUa1kXcMaHIZV2fDcOUV5kTDRH8Pdouy5V0R5eBjlR8yLtBD6divv2FwVpj20b4F2AK6H7MhC1gIP0+izeiJR0VJhUcF4A3r6smIJLgCP4IUMxUSGmW+eDELbzm/V+eSr7joVT8a8atcjep84sp6jWrDJWLAlD42WCY9APMZip3C1wd1Bz98DXinjmCCRvjSd2IeoWeGRpdtU21+wQ9jihPbdTNp9jGrZGTftVGVAaUdv2LKzpVe16ZMYN+4ZdxbjoM24Qc+4cxhXvpP5gJRTZJubXTIuEhYzisislrbue5dRdGBTdHhLikLYqojbd8pTOQqHRwx1SFLQk/TaJA1ueo76HZF0MG4aGrVc6ZCj9svWj8LRzjjn9wfjmaSzXwy/Z9JpnpW0A3dwMAY2SW9bYP6vg7EqXY+QVFIJbXbUcqVQHHY48FqlLHRf9cvSH72uP3DBa/qyoT2+OMPst+x9hnWZYf6dZVgAjiTGyRnWMgTG18kw4L+zDLN//egzrMsMG95bhnktQvmXZljbkHudDDP13Z0mmOzW/zDR6vXfeODsHw== -------------------------------------------------------------------------------- /resources/cloudant_backup_pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: cloudant-backup 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 3Gi 11 | -------------------------------------------------------------------------------- /resources/icp-cloudant-backup-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-cloudant-backup 5 | spec: 6 | template: 7 | spec: 8 | volumes: 9 | - name: cloudant-backup 10 | persistentVolumeClaim: 11 | claimName: cloudant-backup 12 | containers: 13 | - name: icp-cloudant-backup 14 | image: "patrocinio/icp-backup-cloudant-backup:latest" 15 | command: ["/cloudant-backup.sh"] 16 | volumeMounts: 17 | - mountPath: "/backups" 18 | name: cloudant-backup 19 | restartPolicy: Never 20 | backoffLimit: 3 21 | -------------------------------------------------------------------------------- /resources/icp-cloudant-restore-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-cloudant-restore 5 | spec: 6 | template: 7 | spec: 8 | volumes: 9 | - name: cloudant-backup 10 | persistentVolumeClaim: 11 | claimName: cloudant-backup 12 | containers: 13 | - name: icp-cloudant-restore 14 | image: "patrocinio/icp-backup-cloudant-backup:latest" 15 | env: 16 | - name: DBNAME 17 | valueFrom: 18 | configMapKeyRef: 19 | name: cloudant-dbs 20 | key: dbnames 21 | command: ["/cloudant-restore.sh"] 22 | args: ["--dbnames", "$(DBNAME)"] 23 | volumeMounts: 24 | - mountPath: "/backups" 25 | name: cloudant-backup 26 | restartPolicy: Never 27 | backoffLimit: 3 28 | -------------------------------------------------------------------------------- /resources/icp-mariadb-backup-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-mariadb-backup 5 | spec: 6 | template: 7 | spec: 8 | volumes: 9 | - name: mariadb-backup 10 | persistentVolumeClaim: 11 | claimName: mariadb-backup-pvc 12 | containers: 13 | - name: icp-mariadb-backup 14 | image: "mycluster.icp:8500/default/ibmcase/icp-mariadb-backup:latest" 15 | command: ["/mariadb-backup.sh", "--backup-home", "/data/backups"] 16 | volumeMounts: 17 | - mountPath: "/data/backups" 18 | name: mariadb-backup 19 | restartPolicy: Never 20 | backoffLimit: 3 21 | -------------------------------------------------------------------------------- /resources/icp-mariadb-restore-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-mariadb-restore 5 | spec: 6 | template: 7 | spec: 8 | volumes: 9 | - name: mariadb-restore 10 | persistentVolumeClaim: 11 | claimName: mariadb-backup-pvc 12 | containers: 13 | - name: icp-mariadb-restore 14 | image: "mycluster.icp:8500/default/ibmcase/icp-mariadb-backup:latest" 15 | command: ["/mariadb-restore.sh", "--backup-home", "/data/backups"] 16 | volumeMounts: 17 | - mountPath: "/data/backups" 18 | name: mariadb-backup 19 | restartPolicy: Never 20 | backoffLimit: 3 21 | -------------------------------------------------------------------------------- /resources/icp-mongodb-mongodump-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-mongodb-backup 5 | namespace: kube-system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: icp-mongodb-backup 11 | image: ibmcom/icp-mongodb 12 | command: ["bash", "-c", "cat /cred/mongo-certs/tls.crt /cred/mongo-certs/tls.key > /mongo.pem; cat /cred/cluster-ca/tls.crt /cred/cluster-ca/tls.key > /ca.pem; mongodump --oplog --out /dump --host mongodb:$MONGODB_SERVICE_PORT --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /ca.pem --sslPEMKeyFile /mongo.pem"] 13 | volumeMounts: 14 | - mountPath: "/dump" 15 | name: mongodump 16 | - mountPath: "/cred/mongo-certs" 17 | name: icp-mongodb-client-cert 18 | - mountPath: "/cred/cluster-ca" 19 | name: cluster-ca-cert 20 | env: 21 | - name: ADMIN_USER 22 | valueFrom: 23 | secretKeyRef: 24 | name: icp-mongodb-admin 25 | key: user 26 | - name: ADMIN_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: icp-mongodb-admin 30 | key: password 31 | volumes: 32 | - name: mongodump 33 | persistentVolumeClaim: 34 | claimName: my-mongodump 35 | - name: icp-mongodb-client-cert 36 | secret: 37 | defaultMode: 0400 38 | secretName: icp-mongodb-client-cert 39 | - name: cluster-ca-cert 40 | secret: 41 | defaultMode: 400 42 | secretName: cluster-ca-cert 43 | restartPolicy: Never 44 | -------------------------------------------------------------------------------- /resources/icp-mongodb-mongorestore-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: icp-mongodb-restore 5 | namespace: kube-system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: icp-mongodb-restore 11 | image: ibmcom/icp-mongodb 12 | command: ["bash", "-c", "cat /cred/mongo-certs/tls.crt /cred/mongo-certs/tls.key > /mongo.pem; cat /cred/cluster-ca/tls.crt /cred/cluster-ca/tls.key > /ca.pem; mongorestore --oplogReplay --host mongodb:$MONGODB_SERVICE_PORT --username $ADMIN_USER --password $ADMIN_PASSWORD --authenticationDatabase admin --ssl --sslCAFile /ca.pem --sslPEMKeyFile /mongo.pem /dump"] 13 | volumeMounts: 14 | - mountPath: "/dump" 15 | name: mongodump 16 | - mountPath: "/cred/mongo-certs" 17 | name: icp-mongodb-client-cert 18 | - mountPath: "/cred/cluster-ca" 19 | name: cluster-ca-cert 20 | env: 21 | - name: ADMIN_USER 22 | valueFrom: 23 | secretKeyRef: 24 | name: icp-mongodb-admin 25 | key: user 26 | - name: ADMIN_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: icp-mongodb-admin 30 | key: password 31 | volumes: 32 | - name: mongodump 33 | persistentVolumeClaim: 34 | claimName: my-mongodump 35 | - name: icp-mongodb-client-cert 36 | secret: 37 | defaultMode: 0400 38 | secretName: icp-mongodb-client-cert 39 | - name: cluster-ca-cert 40 | secret: 41 | defaultMode: 400 42 | secretName: cluster-ca-cert 43 | restartPolicy: Never 44 | -------------------------------------------------------------------------------- /resources/mariadb-backup-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: mariadb-backup-pvc 5 | labels: 6 | app: icp-mariadb-backup 7 | annotations: 8 | volume.alpha.kubernetes.io/storage-class: default 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 5Gi 15 | -------------------------------------------------------------------------------- /resources/mongodump-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: my-mongodump 5 | namespace: kube-system 6 | spec: 7 | capacity: 8 | storage: 4Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | path: 15 | server: 16 | -------------------------------------------------------------------------------- /resources/mongodump-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: my-mongodump 5 | namespace: kube-system 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 4Gi 12 | -------------------------------------------------------------------------------- /scripts/backupCloudant.sh: -------------------------------------------------------------------------------- 1 | JOB=icp-cloudant-backup 2 | 3 | echo Deleting job 4 | kubectl delete job $JOB 5 | 6 | echo Creating job 7 | kubectl create -f ../resources/icp-cloudant-backup-job.yaml 8 | 9 | kubectl describe job $JOB -------------------------------------------------------------------------------- /scripts/backupEtcd.sh: -------------------------------------------------------------------------------- 1 | BASE_FILE_NAME=/data/etcd 2 | 3 | . ./etcd.sh 4 | 5 | CURRENT_TIME=$(date "+%Y.%m.%d-%H.%M.%S") 6 | echo "Current Time : $CURRENT_TIME" 7 | 8 | FILE_NAME="$BASE_FILE_NAME.$CURRENT_TIME.db" 9 | echo "Back up to the following file: " "$FILE_NAME" 10 | 11 | $etcdctl3 snapshot save $FILE_NAME -------------------------------------------------------------------------------- /scripts/buildComponent.sh: -------------------------------------------------------------------------------- 1 | COMPONENT=$1 2 | VERSION=$2 3 | 4 | echo Building component $COMPONENT at version $VERSION 5 | cd ../src/$COMPONENT 6 | 7 | IMAGE=patrocinio/icp-backup-$COMPONENT:$VERSION 8 | docker build -f Dockerfile-$VERSION --build-arg version=$VERSION -t $IMAGE . 9 | -------------------------------------------------------------------------------- /scripts/buildPush.sh: -------------------------------------------------------------------------------- 1 | latest=$(ls ../src/$1 | sort -n | tail -1) 2 | echo Version $latest 3 | ./buildComponent.sh $1 $latest 4 | ./pushComponent.sh $1 $latest 5 | -------------------------------------------------------------------------------- /scripts/cloudant/cloudant-db-node-port.yaml: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "cloudantdb-ext", 6 | "namespace": "kube-system", 7 | "resourceVersion": "1240", 8 | "labels": { 9 | "app": "icp-ds" 10 | } 11 | }, 12 | "spec": { 13 | "ports": [ 14 | { 15 | "name": "p4369", 16 | "protocol": "TCP", 17 | "port": 4369, 18 | "targetPort": 4369 19 | }, 20 | { 21 | "name": "p5984", 22 | "protocol": "TCP", 23 | "port": 5984, 24 | "targetPort": 5984 25 | }, 26 | { 27 | "name": "p6984", 28 | "protocol": "TCP", 29 | "port": 6984, 30 | "targetPort": 6984 31 | }, 32 | { 33 | "name": "p9000", 34 | "protocol": "TCP", 35 | "port": 9000, 36 | "targetPort": 9000 37 | } 38 | ], 39 | "selector": { 40 | "app": "icp-ds" 41 | }, 42 | "type": "NodePort", 43 | "sessionAffinity": "None" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /scripts/cloudant/externalize-cloudantdb-service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Use the cloudant-db-node-port.yaml from the icp-backup git repo 18 | # to externalize the ICP cloudantdb service. 19 | # 20 | # Pre-reqs: 21 | # 1. Clone 22 | # 2. kubectl is needed to apply the yaml to externalize the service. 23 | # 24 | # Assumptions: 25 | # 1. User has a current kube context configured. (See icp-client-config.sh) 26 | # 2. The default place to run this script from the icp-backup/scripts directory. 27 | # 28 | # 29 | ################################################################################ 30 | function usage { 31 | echo "" 32 | echo "Usage: externalize-cloudantdb-service.sh [options]" 33 | echo " --yaml-path - (optional) Path to yaml file that externalizes the cloudantdb service." 34 | echo " Defaults to cloudant-db-node-port.yaml in the current directory." 35 | echo "" 36 | echo " --help|-h - emit this usage information" 37 | echo "" 38 | echo " - and -- are accepted as keyword argument indicators" 39 | echo "" 40 | echo "Sample invocations:" 41 | echo " ./externalize-cloudantdb-service.sh" 42 | echo " ./externalize-cloudantdb-service.sh --yaml-path ../../resources/cloudant-db-node-port.yaml" 43 | echo "" 44 | } 45 | 46 | # The info() function is used to emit log messages. 47 | # It is assumed that SCRIPT is set in the caller. 48 | function info { 49 | local lineno=$1; shift 50 | local ts=$(date +[%Y/%m/%d-%T]) 51 | echo "$ts $SCRIPT($lineno) $*" 52 | } 53 | 54 | ############ "Main" starts here 55 | SCRIPT=${0##*/} 56 | 57 | info $LINENO "BEGIN $SCRIPT" 58 | 59 | yaml_path="" 60 | 61 | # process the input args 62 | # For keyword-value arguments the arg gets the keyword and 63 | # the case statement assigns the value to a script variable. 64 | # If any "switch" args are added to the command line args, 65 | # then it wouldn't need a shift after processing the switch 66 | # keyword. The script variable for a switch argument would 67 | # be initialized to "false" or the empty string and if the 68 | # switch is provided on the command line it would be assigned 69 | # "true". 70 | # 71 | while (( $# > 0 )); do 72 | arg=$1 73 | case $arg in 74 | -h|--help ) usage; exit 0 75 | ;; 76 | 77 | -yaml-path|--yaml-path ) yaml_path=$2; shift 78 | ;; 79 | 80 | * ) usage; 81 | info $LINENO "ERROR: Unknown option: $arg in command line." 82 | exit 1 83 | ;; 84 | esac 85 | # shift to next key-value pair 86 | shift 87 | done 88 | 89 | if [ -z "$yaml_path" ]; then 90 | yaml_path=cloudant-db-node-port.yaml 91 | fi 92 | 93 | if [ ! -f "$yaml_path" ]; then 94 | info $LINENO "ERROR: $yaml_path does not exist." 95 | exit 1 96 | fi 97 | 98 | exists=$(kubectl get svc --namespace=kube-system | grep cloudantdb-ext) 99 | if [ -z "$exists" ]; then 100 | kubectl --namespace=kube-system apply -f "$yaml_path" 101 | else 102 | info $LINENO "The cloudantdb-ext service is already defined:" 103 | info $LINENO "$exists" 104 | fi 105 | 106 | info $LINENO "END $SCRIPT" 107 | -------------------------------------------------------------------------------- /scripts/configureHelmCLI.sh: -------------------------------------------------------------------------------- 1 | MASTER_IP=$1 2 | 3 | echo Configuring Helm CLI for ICP on $MASTER_IP 4 | cloudctl login -a https://$MASTER_IP:8443 --skip-ssl-validation 5 | -------------------------------------------------------------------------------- /scripts/createCloudantPVC.sh: -------------------------------------------------------------------------------- 1 | echo Creating Cloudant PVC... 2 | 3 | kubectl delete pvc cloudant-backup 4 | 5 | kubectl create -f ../resources/cloudant_backup_pvc.yaml 6 | -------------------------------------------------------------------------------- /scripts/createConfigMaps.sh: -------------------------------------------------------------------------------- 1 | COUNTER=0 2 | RANGE=100 3 | SNAKE_SIZE=20 4 | 5 | while true 6 | do 7 | 8 | let DELETE=($RANGE+$COUNTER-$SNAKE_SIZE)%100 9 | echo "Deleting key snake-$DELETE" 10 | kubectl delete configmap snake-$DELETE 11 | 12 | CONFIG_MAP=snake-$COUNTER 13 | KEY=k-$COUNTER 14 | VALUE=$(date) 15 | echo "Adding key: $KEY, value: $VALUE" 16 | kubectl create configmap $CONFIG_MAP --from-literal="$KEY=$VALUE" 17 | 18 | let COUNTER=(COUNTER+1)%RANGE 19 | 20 | count=$(kubectl get cm | grep snake- | wc -l) 21 | 22 | echo "Number of config maps: $count" 23 | 24 | sleep 1 25 | done -------------------------------------------------------------------------------- /scripts/createVolumes.sh: -------------------------------------------------------------------------------- 1 | FILE_SERVER=fsf-wdc0701b-fz.adn.networklayer.com 2 | FILE_PATH=/IBM02SV625675_7/data01 3 | TEMP_FILE=/tmp/volume.yaml 4 | 5 | # args: 6 | # 1: file 7 | # 2: begin 8 | # 3: end 9 | function create_pv () { 10 | for i in $(eval echo "{$2..$3}") 11 | do 12 | echo Creating PV $i 13 | kubectl delete pv d-${i} 14 | sed s/FILE_SERVER/$FILE_SERVER/g < volume_config/$1.yaml | \ 15 | sed s:FILE_PATH:$FILE_PATH:g | \ 16 | sed s/FILE_SYSTEM/d-${i}/g > $TEMP_FILE 17 | 18 | cat $TEMP_FILE 19 | kubectl create -f /tmp/volume.yaml 20 | done 21 | } 22 | 23 | create_pv 'rwo' 1 20 24 | create_pv 'rwx' 21 40 25 | create_pv 'rwx-large' 41 50 26 | create_pv 'rwo-large' 51 60 27 | 28 | -------------------------------------------------------------------------------- /scripts/etcd.sh: -------------------------------------------------------------------------------- 1 | export org=ibmcom 2 | export repo=etcd 3 | export tag=3.2.24 4 | #export endpoint=10.0.0.1 5 | 6 | export etcdctl2="docker run --entrypoint=etcdctl -v /etc/cfc/conf/etcd:/certs -v /var/lib/etcd:/var/lib/etcd -v /tmp:/data $org/$repo:$tag --cert-file=/certs/client.pem --key-file=/certs/client-key.pem --ca-file=/certs/ca.pem --endpoints https://${endpoint}:4001" 7 | export etcdctl3="docker run --entrypoint=etcdctl -e ETCDCTL_API=3 -v /tmp:/data -v /etc/cfc/conf/etcd:/certs -v /var/lib/etcd:/var/lib/etcd $org/$repo:$tag --cert /certs/client.pem --key /certs/client-key.pem --cacert /certs/ca.pem --endpoints https://${endpoint}:4001" 8 | -------------------------------------------------------------------------------- /scripts/mariadb/ansible/archive_mariadb_on_masters.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Description: 3 | # Create an archive of the /var/lib/mysql directory 4 | # on the master nodes. 5 | # 6 | # Stop docker on the master nodes. 7 | # Create the archive of the mariadb working directory 8 | # Start docker on the master nodes. 9 | # 10 | # The archive is put in /tmp/mariadb 11 | # It is assumed there is sufficient space in /tmp to hold the mariadb archive. 12 | # The archive is typically less that 5 MB. 13 | # 14 | # INPUTS: 15 | # vars: 16 | # master_nodes - a string that defines the master nodes 17 | # master_nodes can be the name of a group in the inventory. 18 | # master_nodes can also be a regular expression that matches 19 | # the master node names (and only them) in the inventory. 20 | # 21 | # You can define the vars on the ansible-playbook command line using --extra-vars. 22 | # Or define vars in your hosts inventory or any of the other ways to define 23 | # Ansible variables. 24 | # The --inventory option can be used to provide a path to an inventory file 25 | # on the ansible-playbook command line. 26 | # 27 | # The tasks where root is needed use the "become" option. 28 | # 29 | # Sample invocation: 30 | # ansible-playbook archive_mariadb_on_master_nodes.yml --extra-vars "master_nodes=master* 31 | # 32 | # 33 | 34 | - hosts: "{{ master_nodes }}" 35 | 36 | tasks: 37 | - name: Stop docker 38 | service: name=docker state=stopped 39 | become: True 40 | 41 | - name: Create mariadb-backup directory in tmp 42 | file: 43 | path: /tmp/mariadb/backups 44 | state: directory 45 | 46 | - name: archive mariadb database working directory 47 | archive: 48 | path: /var/lib/mysql 49 | dest: /tmp/mariadb/backups/mariadb-backup.tgz 50 | 51 | - name: Start docker 52 | service: name=docker state=started 53 | become: True 54 | 55 | 56 | ... 57 | -------------------------------------------------------------------------------- /scripts/multimaster-etcd-restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | data_dir="/var/lib/etcd" 3 | restore_dir="/var/lib/etcd/restored" 4 | 5 | 6 | ## Get etcd docker image details 7 | etcd_image=$(jq -r '.spec.containers[].image' /etc/cfc/podbackup/etcd.json) 8 | volume_mounts="-v /tmp:/data -v /etc/cfc/conf/etcd:/certs -v /var/lib/etcd:/var/lib/etcd" 9 | self=$(jq -r '.spec.containers[].command[] | select(contains("advertise-client-urls="))' /etc/cfc/podbackup/etcd.json | cut -d= -f2) 10 | etcdctl3="docker run --entrypoint=etcdctl -e ETCDCTL_API=3 ${volume_mounts} ${etcd_image} --cert /certs/client.pem --key /certs/client-key.pem --cacert /certs/ca.pem --endpoints ${self}" 11 | etcdctl2="docker run --entrypoint=etcdctl ${volume_mounts} ${etcd_image} --cert /certs/client.pem --key /certs/client-key.pem --cacert /certs/ca.pem --endpoints ${self}" 12 | 13 | ## Get etcd cluster settings 14 | node_name=$(jq -r '.spec.containers[].command[] | select(contains("name="))' /etc/cfc/podbackup/etcd.json) 15 | initial_advertise_peer_urls=$(jq -r '.spec.containers[].command[] | select(contains("initial-advertise-peer-urls="))' /etc/cfc/podbackup/etcd.json) 16 | initial_cluster=$(jq -r '.spec.containers[].command[] | select(contains("initial-cluster="))' /etc/cfc/podbackup/etcd.json) 17 | initial_cluster_token=$(jq -r '.spec.containers[].command[] | select(contains("initial-cluster-token="))' /etc/cfc/podbackup/etcd.json) 18 | 19 | 20 | ## Run the restore on the node 21 | $etcdctl3 snapshot restore /data/snapshot.db \ 22 | --data-dir=$restore_dir \ 23 | $node_name \ 24 | $initial_advertise_peer_urls \ 25 | $initial_cluster_token \ 26 | $initial_cluster 27 | 28 | if [[ "$?" == "0" ]] 29 | then 30 | echo "Restore successful" 31 | else 32 | echo "Restore failed" 33 | fi 34 | -------------------------------------------------------------------------------- /scripts/purge_kubelet_pods.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mount | grep kubelet | awk '{ system("umount "$3)}' 4 | rm -rf /var/lib/kubelet/pods 5 | -------------------------------------------------------------------------------- /scripts/pushComponent.sh: -------------------------------------------------------------------------------- 1 | COMPONENT=$1 2 | VERSION=$2 3 | 4 | IMAGE=patrocinio/icp-backup-$COMPONENT:$VERSION 5 | 6 | echo Pushing component $COMPONENT as latest version 7 | LATEST=patrocinio/icp-backup-$COMPONENT:latest 8 | docker tag $IMAGE $LATEST 9 | docker push $LATEST 10 | 11 | echo Pushing component $COMPONENT as version $VERSION 12 | docker tag $IMAGE $IMAGE 13 | docker push $IMAGE 14 | -------------------------------------------------------------------------------- /scripts/restoreCloudant.sh: -------------------------------------------------------------------------------- 1 | JOB=icp-cloudant-restore 2 | CONFIGMAP=cloudant-dbs 3 | 4 | DBS=$1 5 | 6 | echo Deleting job 7 | kubectl delete job $JOB 8 | 9 | echo Deleting config map 10 | kubectl delete configmap $CONFIGMAP 11 | 12 | echo Creating config map 13 | export DBNAME=$DBS 14 | kubectl create configmap $CONFIGMAP --from-literal=dbnames=$DBS 15 | 16 | echo Creating job 17 | kubectl create -f ../resources/icp-cloudant-restore-job.yaml 18 | 19 | kubectl describe job $JOB -------------------------------------------------------------------------------- /scripts/restoreEtcd.sh: -------------------------------------------------------------------------------- 1 | echo Restore snapshot $1 2 | 3 | . ./etcd.sh 4 | $etcdctl3 snapshot restore /data/$1 \ 5 | --name=etcd0 --data-dir=/var/lib/etcd/restored \ 6 | --initial-advertise-peer-urls=https://${endpoint}:2380 \ 7 | --initial-cluster-token=etcd-cluster-1 \ 8 | --initial-cluster=etcd0=https://${endpoint}:2380 -------------------------------------------------------------------------------- /scripts/switchNamespace.sh: -------------------------------------------------------------------------------- 1 | CONTEXT=mycluster.icp-context 2 | USER=admin 3 | NAMESPACE=$1 4 | 5 | kubectl config set-context $CONTEXT --user=$USER --namespace=$NAMESPACE 6 | 7 | echo Namespace set to $NAMESPACE 8 | 9 | -------------------------------------------------------------------------------- /scripts/volume_config/large.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: FILE_SYSTEM 5 | spec: 6 | capacity: 7 | storage: 20Gi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | path: "FILE_PATH/FILE_SYSTEM" 13 | server: FILE_SERVER -------------------------------------------------------------------------------- /scripts/volume_config/rwo-large.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: FILE_SYSTEM 5 | spec: 6 | capacity: 7 | storage: 20Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | path: "FILE_PATH/FILE_SYSTEM" 13 | server: FILE_SERVER -------------------------------------------------------------------------------- /scripts/volume_config/rwo.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: FILE_SYSTEM 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | path: "FILE_PATH/FILE_SYSTEM" 13 | server: FILE_SERVER -------------------------------------------------------------------------------- /scripts/volume_config/rwx-large.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: FILE_SYSTEM 5 | spec: 6 | capacity: 7 | storage: 20Gi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | path: "FILE_PATH/FILE_SYSTEM" 13 | server: FILE_SERVER -------------------------------------------------------------------------------- /scripts/volume_config/rwx.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: FILE_SYSTEM 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Recycle 11 | nfs: 12 | path: "FILE_PATH/FILE_SYSTEM" 13 | server: FILE_SERVER -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/01_install-kubectl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install kubctl 4 | 5 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl 6 | 7 | 8 | chmod +x ./kubectl 9 | sudo mv ./kubectl /usr/local/bin/kubectl 10 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/02_install-node9x.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install node.js 4 | # Ubuntu specific 5 | # See: https://nodejs.org/en/download/package-manager/#debian-and-ubuntu-based-linux-distributions 6 | # couchbackup needs at least node v6+ 7 | # Need to run as root or use sudo 8 | 9 | curl -sL https://deb.nodesource.com/setup_9.x | sudo -E bash - 10 | sudo apt-get install -y nodejs 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/03_install-npm-latest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install NPM 4 | # Ubuntu specific 5 | # Need to be root or use sudo 6 | # 7 | # See: https://docs.npmjs.com/getting-started/installing-node 8 | # 9 | # It is assumed nodejs has already been install and that npm came with it, 10 | # The following updates npm to the latest version. 11 | 12 | sudo npm install npm@latest -g 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/04_install-cloudant-utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install couchbackup (and restore) utility 4 | # Install couchdb-cli (coucher) command line utility 5 | # 6 | # Assumes npm is already installed (with nodejs). 7 | # 8 | # For details on couchbackup: 9 | # See https://www.npmjs.com/package/@cloudant/couchbackup 10 | # 11 | # Minimum required nodejs 6.13.0 12 | # Minimum required CloudantDB 2.0.0 13 | # 14 | # For detaions on couchdb-cli: 15 | # See https://www.npmjs.com/package/couchdb-cli 16 | # 17 | 18 | 19 | sudo npm install -g @cloudant/couchbackup 20 | 21 | sudo npm install -g couchdb-cli 22 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/05_install-jq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install jq. 4 | # 5 | # jq is used by several other scripts to parse JSON output from kubernetes objects. 6 | # 7 | 8 | sudo apt install -y jq 9 | 10 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/cloudant-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | # 15 | # DESCRIPTION: 16 | # Extract backups for all of the ICP Cloudant databases. 17 | # Write the backups to a timestamped directory in a given backups home directory. 18 | # 19 | # INPUTS: 20 | # 1. Path to backup directories home. (optional) 21 | # Each backup gets its own directory with a timestamp. 22 | # The timestamped backup directory for this backup will be created 23 | # in the given backup directories home. 24 | # The backup directories home defaults to "backups" in the current 25 | # working directory. 26 | # 27 | # 2. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 28 | # Defaults to localhost. This needs to be one of the ICP master nodes 29 | # where the Cloudant database service is running. 30 | # 31 | # 3. Database names of databases to back up. (optional) 32 | # Defaults to all databases defined in the Cloudant instance. 33 | # 34 | # Assumptions: 35 | # 1. The user has a current kubernetes context for the admin user. 36 | # 37 | # 2. User has write permission for the backups directory home. 38 | # 39 | # 3. If a Cloudant DB server host name is not provided it is assumed 40 | # this script is being run on the Cloudant DB server host as 41 | # localhost is used in the Cloudant DB URL. 42 | # 43 | function usage { 44 | echo "" 45 | echo "Usage: cloudant-backup.sh [options]" 46 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 47 | echo " For example, one of the ICP master nodes." 48 | echo " Defaults to cloudant." 49 | echo "" 50 | echo " --backup-home - (optional) Full path to a backups home directory." 51 | echo " Defaults to directory /backup." 52 | echo "" 53 | echo " --dbnames - (optional) Space separated list of database names to back up." 54 | echo " The dbnames list needs to be quoted." 55 | echo " Defaults to all databases defined in the Cloudant instance." 56 | echo "" 57 | echo " --help|-h - emit this usage information" 58 | echo "" 59 | echo " - and -- are accepted as keyword argument indicators" 60 | echo "" 61 | echo "Sample invocations:" 62 | echo " ./cloudant-backup.sh" 63 | echo " ./cloudant-backup.sh --dbhost master01.xxx.yyy --backup-home /backups" 64 | echo "" 65 | echo " User is assumed to have write permission on backup home directory." 66 | echo " User is assumed to have a current kubernetes context with admin credentials." 67 | echo "" 68 | } 69 | 70 | 71 | # import helper functions 72 | . ./helperFunctions.sh 73 | 74 | # MAIN 75 | 76 | backupHome="" 77 | dbhost="" 78 | dbnames="" 79 | 80 | # process the input args 81 | # For keyword-value arguments the arg gets the keyword and 82 | # the case statement assigns the value to a script variable. 83 | # If any "switch" args are added to the command line args, 84 | # then it wouldn't need a shift after processing the switch 85 | # keyword. The script variable for a switch argument would 86 | # be initialized to "false" or the empty string and if the 87 | # switch is provided on the command line it would be assigned 88 | # "true". 89 | # 90 | while (( $# > 0 )); do 91 | arg=$1 92 | case $arg in 93 | -h|--help ) usage; exit 0 94 | ;; 95 | 96 | -backup-home|--backup-home ) backupHome=$2; shift 97 | ;; 98 | 99 | -dbhost|--dbhost) dbhost=$2; shift 100 | ;; 101 | 102 | -dbnames|--dbnames) dbnames=$2; shift 103 | ;; 104 | 105 | * ) usage; 106 | info $LINENO "ERROR: Unknown option: $arg in command line." 107 | exit 1 108 | ;; 109 | esac 110 | # shift to next key-value pair 111 | shift 112 | done 113 | 114 | 115 | if [ -z "$backupHome" ]; then 116 | backupHome="/backup" 117 | fi 118 | info $LINENO "Backup directory will be created in: $backupHome" 119 | 120 | if [ -z "$dbhost" ]; then 121 | dbhost=cloudantdb 122 | fi 123 | info $LINENO "Cloudant DB host: $dbhost" 124 | 125 | 126 | port=$(getCloudantNodePort) 127 | password=$(getCloudantPassword) 128 | 129 | if [ -z "$port" ]; then 130 | info $LINENO "ERROR: port must be defined. Check getCloudantNodePort helper function." 131 | exit 1 132 | fi 133 | 134 | if [ -z "$password" ]; then 135 | info $LINENO "ERROR: password must not be empty. Check getCloudantPassword helper function." 136 | exit 2 137 | fi 138 | 139 | info $LINENO "Cloudant NodePort: $port" 140 | 141 | cloudantURL=$(getCloudantURL $dbhost) 142 | 143 | allDBs=$(curl --silent $cloudantURL/_all_dbs) 144 | 145 | allDBs=$(getCloudantDatabaseNames $dbhost) 146 | 147 | if [ -z "$allDBs" ]; then 148 | info $LINENO "ERROR: Cloudant database name list must not be empty. Check getCloudantDatabaseNames helper function." 149 | exit 3 150 | fi 151 | 152 | if [ -z "$dbnames" ]; then 153 | dbnames="$allDBs" 154 | else 155 | # make sure all user provided dbnames are valid 156 | ERROR="" 157 | for name in $dbnames; do 158 | isvalid=$(echo "$allDBs" | grep $name) 159 | if [ -z "$isvalid" ]; then 160 | info $LINENO "ERROR: The name: \"$name\" is not a valid ICP Cloudant database name." 161 | ERROR="true" 162 | fi 163 | done 164 | if [ -n "$ERROR" ]; then 165 | info $LINENO "Valid ICP Cloudant database names: $allDBs" 166 | exit 6 167 | fi 168 | fi 169 | 170 | info $LINENO "Databases to be backed up: $dbnames" 171 | 172 | # backup timestamp 173 | ts=$(date +%Y-%m-%d-%H-%M-%S) 174 | backupDir="${backupHome}/icp-cloudant-backup-$ts" 175 | 176 | mkdir -p $backupDir 177 | if [ "$?" != "0" ]; then 178 | info $LINENO "ERROR: Failed to create: $backupDir" 179 | exit 4 180 | fi 181 | 182 | info $LINENO "Backups will be written to: $backupDir" 183 | 184 | exportCloudantDatabaseNames $dbhost "$backupDir" 185 | exportDBnames "$dbnames" "$backupDir" 186 | 187 | for dbname in $dbnames; do 188 | couchbackup --url "http://admin:$password@$dbhost:$port" --log "$backupDir/$dbname-backup.log" --db $dbname > "$backupDir/$dbname-backup.json" 189 | done 190 | 191 | 192 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/create-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Create one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to create databases. 23 | # 24 | # INPUTS: 25 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 26 | # Defaults to localhost. This needs to be one of the ICP master nodes 27 | # where the Cloudant database service is running. 28 | # 29 | # 2. One or more names of the databases to be created. 30 | # If more than one name is provided it is in the form of a quoted string 31 | # with the names separated by spaces. 32 | # 33 | # Assumptions: 34 | # 1. The user has a current kubernetes context for the admin user. 35 | # 36 | # 2. If a Cloudant DB server host name is not provided it is assumed 37 | # this script is being run on the Cloudant DB server host as 38 | # localhost is used in the Cloudant DB URL. 39 | # 40 | 41 | function usage { 42 | echo "" 43 | echo "Usage: create-database.sh [options]" 44 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 45 | echo " For example, one of the ICP master nodes." 46 | echo " Defaults to localhost." 47 | echo "" 48 | echo " --dbnames - (required) One or more names of the databases to be created." 49 | echo " If more than one name is provided it must be a quoted string of" 50 | echo " space separated names." 51 | echo "" 52 | echo " --help|-h - emit this usage information" 53 | echo "" 54 | echo " - and -- are accepted as keyword argument indicators" 55 | echo "" 56 | echo "Sample invocations:" 57 | echo " ./create-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 58 | echo "" 59 | } 60 | 61 | # import helper functions 62 | . ./helperFunctions.sh 63 | 64 | # MAIN 65 | 66 | dbhost="" 67 | dbnames="" 68 | 69 | # process the input args 70 | # For keyword-value arguments the arg gets the keyword and 71 | # the case statement assigns the value to a script variable. 72 | # If any "switch" args are added to the command line args, 73 | # then it wouldn't need a shift after processing the switch 74 | # keyword. The script variable for a switch argument would 75 | # be initialized to "false" or the empty string and if the 76 | # switch is provided on the command line it would be assigned 77 | # "true". 78 | # 79 | while (( $# > 0 )); do 80 | arg=$1 81 | case $arg in 82 | -h|--help ) usage; exit 83 | ;; 84 | 85 | -dbhost|--dbhost) dbhost=$2; shift 86 | ;; 87 | 88 | -dbnames|--dbnames) dbnames=$2; shift 89 | ;; 90 | 91 | * ) usage; 92 | info $LINENO "ERROR: Unknown option: $arg in command line." 93 | exit 1 94 | ;; 95 | esac 96 | # shift to next key-value pair 97 | shift 98 | done 99 | 100 | if [ -z "$dbhost" ]; then 101 | dbhost=localhost 102 | fi 103 | info $LINENO "Cloudant DB host: $dbhost" 104 | 105 | 106 | if [ -z "$dbnames" ]; then 107 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 108 | exit 2 109 | fi 110 | 111 | currentDBs=$(getCloudantDatabaseNames $dbhost) 112 | 113 | for name in $dbnames; do 114 | dbexists=$(echo "$currentDBs" | grep $name) 115 | if [ -z "$dbexists" ]; then 116 | info $LINENO "Creating database: $name on Cloudant instance host: $dbhost" 117 | createDatabase $dbhost $name 118 | else 119 | info $LINENO "Database: $name already exists on Cloudant instance host: $dbhost" 120 | fi 121 | done 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/delete-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Delete one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to delete databases. 23 | # 24 | # INPUTS: 25 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 26 | # Defaults to localhost. This needs to be one of the ICP master nodes 27 | # where the Cloudant database service is running. 28 | # 29 | # 2. One or more names of the databases to be deleted. 30 | # If more than one name is provided it is in the form of a quoted string 31 | # with the names separated by spaces. 32 | # 33 | # Assumptions: 34 | # 1. The user has a current kubernetes context for the admin user. 35 | # 36 | # 2. If a Cloudant DB server host name is not provided it is assumed 37 | # this script is being run on the Cloudant DB server host as 38 | # localhost is used in the Cloudant DB URL. 39 | # 40 | 41 | function usage { 42 | echo "" 43 | echo "Usage: delete-database.sh [options]" 44 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 45 | echo " For example, one of the ICP master nodes." 46 | echo " Defaults to localhost." 47 | echo "" 48 | echo " --dbnames - (required) One or more names of the databases to be created." 49 | echo " If more than one name is provided it must be a quoted string of" 50 | echo " space separated names." 51 | echo "" 52 | echo " --help|-h - emit this usage information" 53 | echo "" 54 | echo " - and -- are accepted as keyword argument indicators" 55 | echo "" 56 | echo "Sample invocations:" 57 | echo " ./delete-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 58 | echo "" 59 | } 60 | 61 | # import helper functions 62 | . ./helperFunctions.sh 63 | 64 | # MAIN 65 | 66 | dbhost="" 67 | dbnames="" 68 | 69 | # process the input args 70 | # For keyword-value arguments the arg gets the keyword and 71 | # the case statement assigns the value to a script variable. 72 | # If any "switch" args are added to the command line args, 73 | # then it wouldn't need a shift after processing the switch 74 | # keyword. The script variable for a switch argument would 75 | # be initialized to "false" or the empty string and if the 76 | # switch is provided on the command line it would be assigned 77 | # "true". 78 | # 79 | while (( $# > 0 )); do 80 | arg=$1 81 | case $arg in 82 | -h|--help ) usage; exit 83 | ;; 84 | 85 | -dbhost|--dbhost) dbhost=$2; shift 86 | ;; 87 | 88 | -dbnames|--dbnames) dbnames=$2; shift 89 | ;; 90 | 91 | * ) usage; 92 | info $LINENO "ERROR: Unknown option: $arg in command line." 93 | exit 1 94 | ;; 95 | esac 96 | # shift to next key-value pair 97 | shift 98 | done 99 | 100 | if [ -z "$dbhost" ]; then 101 | dbhost=localhost 102 | fi 103 | info $LINENO "Cloudant DB host: $dbhost" 104 | 105 | 106 | if [ -z "$dbnames" ]; then 107 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 108 | exit 2 109 | fi 110 | 111 | currentDBs=$(getCloudantDatabaseNames $dbhost) 112 | 113 | for name in $dbnames; do 114 | dbexists=$(echo "$currentDBs" | grep $name) 115 | if [ -n "$dbexists" ]; then 116 | info $LINENO "Deleting database: $name on Cloudant instance host: $dbhost" 117 | deleteDatabase $dbhost $name 118 | else 119 | info $LINENO "Database: $name does not exist on Cloudant instance host: $dbhost" 120 | fi 121 | done 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/externalize-cloudantdb-service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Use the cloudant-db-node-port.yaml from the icp-backup git repo 18 | # to externalize the ICP cloudantdb service. 19 | # 20 | # Pre-reqs: 21 | # 1. Clone 22 | # 2. kubectl is needed to apply the yaml to externalize the service. 23 | # 24 | # Assumptions: 25 | # 1. User has a current kube context configured. (See icp-client-config.sh) 26 | # 2. The default place to run this script from the icp-backup/scripts directory. 27 | # 28 | # 29 | ################################################################################ 30 | function usage { 31 | echo "" 32 | echo "Usage: externalize-cloudantdb-service.sh [options]" 33 | echo " --yaml-path - (optional) Path to yaml file that externalizes the cloudantdb service." 34 | echo " Defaults to cloudant-db-node-port.yaml in the current directory." 35 | echo "" 36 | echo " --help|-h - emit this usage information" 37 | echo "" 38 | echo " - and -- are accepted as keyword argument indicators" 39 | echo "" 40 | echo "Sample invocations:" 41 | echo " ./externalize-cloudantdb-service.sh" 42 | echo " ./externalize-cloudantdb-service.sh --yaml-path ../resources/cloudant-db-node-port.yaml" 43 | echo "" 44 | } 45 | 46 | # The info() function is used to emit log messages. 47 | # It is assumed that SCRIPT is set in the caller. 48 | function info { 49 | local lineno=$1; shift 50 | local ts=$(date +[%Y/%m/%d-%T]) 51 | echo "$ts $SCRIPT($lineno) $*" 52 | } 53 | 54 | ############ "Main" starts here 55 | SCRIPT=${0##*/} 56 | 57 | info $LINENO "BEGIN $SCRIPT" 58 | 59 | yaml_path="" 60 | 61 | # process the input args 62 | # For keyword-value arguments the arg gets the keyword and 63 | # the case statement assigns the value to a script variable. 64 | # If any "switch" args are added to the command line args, 65 | # then it wouldn't need a shift after processing the switch 66 | # keyword. The script variable for a switch argument would 67 | # be initialized to "false" or the empty string and if the 68 | # switch is provided on the command line it would be assigned 69 | # "true". 70 | # 71 | while (( $# > 0 )); do 72 | arg=$1 73 | case $arg in 74 | -h|--help ) usage; exit 0 75 | ;; 76 | 77 | -yaml-path|--yaml-path ) yaml_path=$2; shift 78 | ;; 79 | 80 | * ) usage; 81 | info $LINENO "ERROR: Unknown option: $arg in command line." 82 | exit 1 83 | ;; 84 | esac 85 | # shift to next key-value pair 86 | shift 87 | done 88 | 89 | if [ -z "$yaml_path" ]; then 90 | yaml_path=cloudant-db-node-port.yaml 91 | fi 92 | 93 | if [ ! -f "$yaml_path" ]; then 94 | info $LINENO "ERROR: $yaml_path does not exist." 95 | exit 1 96 | fi 97 | 98 | exists=$(kubectl get svc --namespace=kube-system | grep cloudantdb-ext) 99 | if [ -z "$exists" ]; then 100 | kubectl --namespace=kube-system apply -f "$yaml_path" 101 | else 102 | info $LINENO "The cloudantdb-ext service is already defined:" 103 | info $LINENO "$exists" 104 | fi 105 | 106 | info $LINENO "END $SCRIPT" 107 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/get-database-names.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Get a list of all the ICP Cloudant databases defined and write it to stdout. 18 | # It is handy to be able to quickly see the list of databases names for testing. 19 | # 20 | # Pre-reqs: 21 | # kubectl is needed to interact with the ICP cluster. 22 | # jq is needed to do JSON parsing. 23 | # 24 | # 25 | # INPUTS: 26 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 27 | # Defaults to localhost. This needs to be one of the ICP master nodes 28 | # where the Cloudant database service is running. 29 | # 30 | # Assumptions: 31 | # 1. The user has a current kubernetes context for the admin user. 32 | # 33 | # 2. If a Cloudant DB server host name is not provided it is assumed 34 | # this script is being run on the Cloudant DB server host as 35 | # localhost is used in the Cloudant DB URL. 36 | # 37 | 38 | function usage { 39 | echo "" 40 | echo "Usage: get-database-names.sh [options]" 41 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 42 | echo " For example, one of the ICP master nodes." 43 | echo " Defaults to localhost." 44 | echo "" 45 | echo " --help|-h - emit this usage information" 46 | echo "" 47 | echo " - and -- are accepted as keyword argument indicators" 48 | echo "" 49 | echo "Sample invocations:" 50 | echo " ./get-database-names.sh --dbhost master01.xxx.yyy" 51 | echo "" 52 | } 53 | 54 | # import helper functions 55 | . ./helperFunctions.sh 56 | 57 | # MAIN 58 | 59 | dbhost="" 60 | 61 | # process the input args 62 | # For keyword-value arguments the arg gets the keyword and 63 | # the case statement assigns the value to a script variable. 64 | # If any "switch" args are added to the command line args, 65 | # then it wouldn't need a shift after processing the switch 66 | # keyword. The script variable for a switch argument would 67 | # be initialized to "false" or the empty string and if the 68 | # switch is provided on the command line it would be assigned 69 | # "true". 70 | # 71 | while (( $# > 0 )); do 72 | arg=$1 73 | case $arg in 74 | -h|--help ) usage; exit 75 | ;; 76 | 77 | -dbhost|--dbhost) dbhost=$2; shift 78 | ;; 79 | 80 | * ) usage; info $LINENO "ERROR: Unknown option: $arg in command line." 81 | exit 1 82 | ;; 83 | esac 84 | # shift to next key-value pair 85 | shift 86 | done 87 | 88 | if [ -z "$dbhost" ]; then 89 | dbhost=localhost 90 | fi 91 | info $LINENO "Cloudant DB host: $dbhost" 92 | 93 | allDBs=$(getCloudantDatabaseNames $dbhost) 94 | 95 | if [ -z "$allDBs" ]; then 96 | info $LINENO "No databases are defined in the Cloudant instance hosted by: $dbhost" 97 | else 98 | info $LINENO "ICP Cloudant database names:" 99 | echo "\"$allDBs\"" 100 | fi 101 | 102 | 103 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/helperFunctions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | 15 | # DESCRIPTION: 16 | # Functions to assist with cloudant backup and restore. 17 | # Sourced by other scripts that need to use these functions. 18 | 19 | function info { 20 | local lineno=$1; shift 21 | ts=$(date +[%Y/%m/%d-%T]) 22 | echo "$ts $SCRIPT($lineno) $*" 23 | } 24 | 25 | getCloudantPasswordUsingSecret () { 26 | ### Get the cloudant password from kube secret 27 | raw_secret=$(kubectl get secret cloudant-credentials --namespace=kube-system -o json | jq '.["metadata"]["annotations"]["kubectl.kubernetes.io/last-applied-configuration"]') 28 | 29 | # Remove trailing double quote and what looks like a newline but is actually \n (3 charaters to remove). 30 | raw_secret=${raw_secret%???} 31 | 32 | # Remove leading double quote. 33 | raw_secret=${raw_secret#\"} 34 | 35 | # Remove all back slash characters. 36 | secret=$(echo $raw_secret | tr -d '\\') 37 | 38 | # Parse out the part of the secret with the data we are interested in. 39 | cloudant_password=$(echo $secret | jq '.["data"]["cloudant_password"]') 40 | 41 | # Strip leading and trailing double quotes 42 | cloudant_password=${cloudant_password#\"} 43 | cloudant_password=${cloudant_password%\"} 44 | 45 | echo $cloudant_password | base64 -d 46 | } 47 | 48 | getCloudantPassword () { 49 | echo "orange" 50 | } 51 | 52 | 53 | getCloudantNodePort () { 54 | 55 | # local port=$(kubectl --namespace=kube-system get svc cloudantdb-ext -o json | jq '.["spec"]["ports"][1]["nodePort"]') 56 | 57 | # echo $port 58 | echo 5984 59 | } 60 | 61 | 62 | getCloudantURL () { 63 | # Construct the cloudant URL echo it back to caller. 64 | # $1 is Cloudant DB host name or IP address. 65 | # defaults to cloudant 66 | 67 | local dbhost=$1 68 | 69 | if [ -z "$dbhost" ]; then 70 | dbhost=cloudant 71 | fi 72 | 73 | local password=$(getCloudantPassword) 74 | local port=$(getCloudantNodePort) 75 | 76 | echo "http://admin:$password@$dbhost:$port" 77 | 78 | } 79 | 80 | # The _all_dbs REST API returns a JSON list: 81 | # [ "_users", "helm_repos", "metrics", "metrics_app", "platform-db", "security-data", "stats", "tgz_files_icp" ] 82 | # The actual output from jq has newlines after each item in the list. 83 | # Also note the leading and trailing white space character of the string inside the brackets which needs to 84 | # be trimmed out. 85 | 86 | getCloudantDatabaseNames () { 87 | # $1 is Cloudant DB host name or IP address. 88 | # localhost is valid if running script on Cloudant DB host. 89 | 90 | local cloudantURL=$(getCloudantURL $1) 91 | local allDBs=$(curl --silent $cloudantURL/_all_dbs | jq '.') 92 | 93 | # Use tr to remove the newlines, double quotes, left and right square bracket and commasa. 94 | # The awk idiom trims leading and trailing white space. 95 | allDBs=$(echo "$allDBs" | tr -d '[\n",]' | awk '{$1=$1};1' ) 96 | 97 | echo "$allDBs" 98 | } 99 | 100 | 101 | exportCloudantDatabaseNames () { 102 | # $1 is the Cloudant DB host name or IP address 103 | # localhost is valid if running script on Cloudant DB host. 104 | # $2 is the path to directory where databases names are to be exported 105 | local dbhost=$1 106 | local destDir=$2 107 | 108 | if [ -z "$destDir" ]; then 109 | destDir="$PWD" 110 | fi 111 | 112 | local allDBs=$(getCloudantDatabaseNames $dbhost) 113 | local dest="$destDir/dbnames.sh" 114 | 115 | if [ -f "$dest" ]; then 116 | # dbnames.sh already exists 117 | exported=$(grep ALL_DBS "$dest") 118 | if [ -z "$exported" ]; then 119 | # ALL_DBS not written in dbnames.sh, append it 120 | echo "export ALL_DBS=\"$allDBs\"" >> "$dest" 121 | fi 122 | else 123 | # Create dbnames.sh and write ALL_DBS to it 124 | echo "export ALL_DBS=\"$allDBs\"" > "$dest" 125 | chmod +x "$dest" 126 | fi 127 | } 128 | 129 | 130 | exportDBnames () { 131 | # Export the given dbnames to dbnames.sh in the given directory. 132 | # INPUTS: 133 | # 1. Quoted string space separated list of database names. 134 | # 2. Destination directory path. If not provided, current working 135 | # directory is used. 136 | 137 | local dbnames=$1 138 | local destDir=$2 139 | 140 | if [ -z "$destDir" ]; then 141 | destDir=$PWD 142 | fi 143 | 144 | local dest="$destDir/dbnames.sh" 145 | 146 | if [ -f "$dest" ]; then 147 | # dbnames.sh already exists 148 | exported=$(grep -q BACKED_UP_DBNAMES "$dest") 149 | if [ -z "$exported" ]; then 150 | # BACKED_UP_DBNAMES not written in dbnames.sh, append it 151 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" >> "$dest" 152 | fi 153 | else 154 | # Create dbnames.sh and write BACKED_UP_DBNAMES to it 155 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" > "$dest" 156 | chmod +x "$dest" 157 | fi 158 | } 159 | 160 | 161 | makeBackupFilePath () { 162 | # Return the full path of the file name with the backup for the given database name. 163 | # $1 is the backup directory path 164 | # $2 is the Cloudant database name 165 | local fileName="$2-backup.json" 166 | echo "$1/$fileName" 167 | } 168 | 169 | createDatabase () { 170 | # Create a Cloudant database 171 | # $1 is the host name of the Cloudant DB instance 172 | # localhost is valid if the script is run on the instanct host. 173 | # $2 is the database name 174 | # 175 | # Both parameters are required. 176 | 177 | local dbhost=$1 178 | local dbname=$2 179 | 180 | local cloudantURL=$(getCloudantURL $dbhost) 181 | 182 | coucher database -c $cloudantURL -a create -d $dbname 183 | 184 | } 185 | 186 | 187 | deleteDatabase () { 188 | # Delete a Cloudant database 189 | # $1 is the host name of the Cloudant DB instance 190 | # localhost is valid if the script is run on the instanct host. 191 | # $2 is the database name 192 | # 193 | # Both parameters are required. 194 | 195 | local dbhost=$1 196 | local dbname=$2 197 | 198 | local cloudantURL=$(getCloudantURL $dbhost) 199 | 200 | coucher database -c $cloudantURL -a delete -d $dbname 201 | 202 | } 203 | 204 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/icp-client-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | ## Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Login and context for kubectl. Only valid for 12 hours. 18 | # Edit with new cut-and-paste from ICP Console "configure client" 19 | # 20 | 21 | ### Replace this section 22 | # There should be 5 kubectl lines when the configuration commands are pasted in. 23 | 24 | kubectl config set-cluster mycluster.icp --server=https://10.0.0.10:8001 --insecure-skip-tls-verify=true 25 | kubectl config set-context mycluster.icp-context --cluster=mycluster.icp 26 | kubectl config set-credentials admin --token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdF9oYXNoIjoiZTN1OG0ybG9meG1ncmo4MWs4OWUiLCJyZWFsbU5hbWUiOiJjdXN0b21SZWFsbSIsInVuaXF1ZVNlY3VyaXR5TmFtZSI6ImFkbWluIiwiaXNzIjoiaHR0cHM6Ly9teWNsdXN0ZXIuaWNwOjk0NDMvb2lkYy9lbmRwb2ludC9PUCIsImF1ZCI6IjYzZmRmYmE4MTZlYzNlYWZiYzZlODQ5NjU0MGM1ZDI2IiwiZXhwIjoxNTIwOTA3MTI2LCJpYXQiOjE1MjA5MDcxMjYsInN1YiI6ImFkbWluIiwidGVhbVJvbGVNYXBwaW5ncyI6W119.ELl90UevuxZCjY74WKLKrFZiRRzJC8dHeYD3vipJqwox-crWONfSCdGzUmmlavGBcXS6HnqfDcFjqNCnmp07Rzz0Ns1exhMOjIxGYQ0cUxkmUHxhbxOu-tPoOW2RvsCLm5Boh0DkhSTvoi48G0r15elRHpEQ_L-MmpRl_CRPN_5b6Yif4PWfKW5EDPGbXwdS8BWgnM2Ueb2CnHwY72lmdsrf_YCQGyzHtGmb41IErphs6X4tDdLXRPjX4fCqFKPl9BiPZevEfwqBffk_EEbZ6A55865pFF4WEls0J2MYy3sJ6qHJdPI9PgIwtZ7mMyIXyd8H6JQ6WUveP8of4GeZGg 27 | kubectl config set-context mycluster.icp-context --user=admin --namespace=default 28 | kubectl config use-context mycluster.icp-context 29 | 30 | ### End of section to replace 31 | 32 | # Change context to use the kube-system namespace as preferred 33 | kubectl config set-context mycluster.icp-context --user admin --namespace=kube-system 34 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.0/set-namespace-kube-system.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Switch the preferred namespace to kube-system 18 | # 19 | # Assumptions: 20 | # 1. A kube context has been established. 21 | # 2. Logged in as admin. 22 | # 3. ICP cluster name is default, mycluster 23 | # 24 | # "kubectl config view", can be used to confirm namespace change. 25 | # 26 | 27 | kubectl config set-context mycluster.icp-context --user admin --namespace=kube-system 28 | 29 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/01_install-kubectl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install kubctl 4 | 5 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl 6 | 7 | 8 | chmod +x ./kubectl 9 | sudo mv ./kubectl /usr/local/bin/kubectl 10 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/02_install-node9x.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install node.js 4 | # Ubuntu specific 5 | # See: https://nodejs.org/en/download/package-manager/#debian-and-ubuntu-based-linux-distributions 6 | # couchbackup needs at least node v6+ 7 | # Need to run as root or use sudo 8 | 9 | curl -sL https://deb.nodesource.com/setup_9.x | sudo -E bash - 10 | sudo apt-get install -y nodejs 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/03_install-npm-latest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install NPM 4 | # Ubuntu specific 5 | # Need to be root or use sudo 6 | # 7 | # See: https://docs.npmjs.com/getting-started/installing-node 8 | # 9 | # It is assumed nodejs has already been install and that npm came with it, 10 | # The following updates npm to the latest version. 11 | 12 | sudo npm install npm@latest -g 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/04_install-cloudant-utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install couchbackup (and restore) utility 4 | # Install couchdb-cli (coucher) command line utility 5 | # 6 | # Assumes npm is already installed (with nodejs). 7 | # 8 | # For details on couchbackup: 9 | # See https://www.npmjs.com/package/@cloudant/couchbackup 10 | # 11 | # Minimum required nodejs 6.13.0 12 | # Minimum required CloudantDB 2.0.0 13 | # 14 | # For detaions on couchdb-cli: 15 | # See https://www.npmjs.com/package/couchdb-cli 16 | # 17 | 18 | 19 | sudo npm install -g @cloudant/couchbackup 20 | 21 | sudo npm install -g couchdb-cli 22 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/05_install-jq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Install jq. 4 | # 5 | # jq is used by several other scripts to parse JSON output from kubernetes objects. 6 | # 7 | 8 | sudo apt install -y jq 9 | 10 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/cloudant-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | # 15 | # DESCRIPTION: 16 | # Extract backups for all of the ICP Cloudant databases. 17 | # Write the backups to a timestamped directory in a given backups home directory. 18 | # 19 | # INPUTS: 20 | # 1. Path to backup directories home. (optional) 21 | # Each backup gets its own directory with a timestamp. 22 | # The timestamped backup directory for this backup will be created 23 | # in the given backup directories home. 24 | # The backup directories home defaults to "backups" in the current 25 | # working directory. 26 | # 27 | # 2. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 28 | # Defaults to localhost. This needs to be one of the ICP master nodes 29 | # where the Cloudant database service is running. 30 | # 31 | # 3. Database names of databases to back up. (optional) 32 | # Defaults to all databases defined in the Cloudant instance. 33 | # 34 | # Assumptions: 35 | # 1. The user has a current kubernetes context for the admin user. 36 | # 37 | # 2. User has write permission for the backups directory home. 38 | # 39 | # 3. If a Cloudant DB server host name is not provided it is assumed 40 | # this script is being run on the Cloudant DB server host as 41 | # localhost is used in the Cloudant DB URL. 42 | # 43 | function usage { 44 | echo "" 45 | echo "Usage: cloudant-backup.sh [options]" 46 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 47 | echo " For example, one of the ICP master nodes." 48 | echo " Defaults to cloudant." 49 | echo "" 50 | echo " --backup-home - (optional) Full path to a backups home directory." 51 | echo " Defaults to directory /backup." 52 | echo "" 53 | echo " --dbnames - (optional) Space separated list of database names to back up." 54 | echo " The dbnames list needs to be quoted." 55 | echo " Defaults to all databases defined in the Cloudant instance." 56 | echo "" 57 | echo " --help|-h - emit this usage information" 58 | echo "" 59 | echo " - and -- are accepted as keyword argument indicators" 60 | echo "" 61 | echo "Sample invocations:" 62 | echo " ./cloudant-backup.sh" 63 | echo " ./cloudant-backup.sh --dbhost master01.xxx.yyy --backup-home /backups" 64 | echo "" 65 | echo " User is assumed to have write permission on backup home directory." 66 | echo " User is assumed to have a current kubernetes context with admin credentials." 67 | echo "" 68 | } 69 | 70 | 71 | # import helper functions 72 | . ./helperFunctions.sh 73 | 74 | # MAIN 75 | 76 | backupHome="" 77 | dbhost="" 78 | dbnames="" 79 | 80 | # process the input args 81 | # For keyword-value arguments the arg gets the keyword and 82 | # the case statement assigns the value to a script variable. 83 | # If any "switch" args are added to the command line args, 84 | # then it wouldn't need a shift after processing the switch 85 | # keyword. The script variable for a switch argument would 86 | # be initialized to "false" or the empty string and if the 87 | # switch is provided on the command line it would be assigned 88 | # "true". 89 | # 90 | while (( $# > 0 )); do 91 | arg=$1 92 | case $arg in 93 | -h|--help ) usage; exit 0 94 | ;; 95 | 96 | -backup-home|--backup-home ) backupHome=$2; shift 97 | ;; 98 | 99 | -dbhost|--dbhost) dbhost=$2; shift 100 | ;; 101 | 102 | -dbnames|--dbnames) dbnames=$2; shift 103 | ;; 104 | 105 | * ) usage; 106 | info $LINENO "ERROR: Unknown option: $arg in command line." 107 | exit 1 108 | ;; 109 | esac 110 | # shift to next key-value pair 111 | shift 112 | done 113 | 114 | 115 | if [ -z "$backupHome" ]; then 116 | backupHome="/backup" 117 | fi 118 | info $LINENO "Backup directory will be created in: $backupHome" 119 | 120 | if [ -z "$dbhost" ]; then 121 | dbhost=cloudantdb 122 | fi 123 | info $LINENO "Cloudant DB host: $dbhost" 124 | 125 | 126 | port=$(getCloudantNodePort) 127 | password=$(getCloudantPassword) 128 | 129 | if [ -z "$port" ]; then 130 | info $LINENO "ERROR: port must be defined. Check getCloudantNodePort helper function." 131 | exit 1 132 | fi 133 | 134 | if [ -z "$password" ]; then 135 | info $LINENO "ERROR: password must not be empty. Check getCloudantPassword helper function." 136 | exit 2 137 | fi 138 | 139 | info $LINENO "Cloudant NodePort: $port" 140 | 141 | #cloudantURL=$(getCloudantURL $dbhost) 142 | 143 | #info $LINENO "Cloudant URL: $cloudantURL" 144 | 145 | #allDBs=$(curl --silent $cloudantURL/_all_dbs) 146 | 147 | cloudantURL=$(getCloudantURL $dbhost) 148 | info $LINENO "cloudantURL: $cloudantURL" 149 | 150 | c=$(curl --silent $cloudantURL/_all_dbs) 151 | info $LINENO "c: $c" 152 | 153 | allDBs=$(curl --silent $cloudantURL/_all_dbs | jq '.') 154 | info $LINENO "allDBs: $allDBs" 155 | 156 | # Use tr to remove the newlines, double quotes, left and right square bracket and commasa. 157 | # The awk idiom trims leading and trailing white space. 158 | allDBs=$(echo "$allDBs" | tr -d '[\n",]' | awk '{$1=$1};1' ) 159 | info $LINENO "allDBs: $allDBs" 160 | 161 | echo "$allDBs" 162 | 163 | 164 | allDBs=$(getCloudantDatabaseNames $dbhost) 165 | 166 | if [ -z "$allDBs" ]; then 167 | info $LINENO "ERROR: Cloudant database name list must not be empty. Check getCloudantDatabaseNames helper function." 168 | exit 3 169 | fi 170 | 171 | if [ -z "$dbnames" ]; then 172 | dbnames="$allDBs" 173 | else 174 | # make sure all user provided dbnames are valid 175 | ERROR="" 176 | for name in $dbnames; do 177 | isvalid=$(echo "$allDBs" | grep $name) 178 | if [ -z "$isvalid" ]; then 179 | info $LINENO "ERROR: The name: \"$name\" is not a valid ICP Cloudant database name." 180 | ERROR="true" 181 | fi 182 | done 183 | if [ -n "$ERROR" ]; then 184 | info $LINENO "Valid ICP Cloudant database names: $allDBs" 185 | exit 6 186 | fi 187 | fi 188 | 189 | info $LINENO "Databases to be backed up: $dbnames" 190 | 191 | # backup timestamp 192 | ts=$(date +%Y-%m-%d-%H-%M-%S) 193 | backupDir="${backupHome}/icp-cloudant-backup-$ts" 194 | 195 | mkdir -p $backupDir 196 | if [ "$?" != "0" ]; then 197 | info $LINENO "ERROR: Failed to create: $backupDir" 198 | exit 4 199 | fi 200 | 201 | info $LINENO "Backups will be written to: $backupDir" 202 | 203 | exportCloudantDatabaseNames $dbhost "$backupDir" 204 | exportDBnames "$dbnames" "$backupDir" 205 | 206 | for dbname in $dbnames; do 207 | couchbackup --url "http://admin:$password@$dbhost:$port" --log "$backupDir/$dbname-backup.log" --db $dbname > "$backupDir/$dbname-backup.json" 208 | done 209 | 210 | 211 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/create-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Create one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to create databases. 23 | # 24 | # INPUTS: 25 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 26 | # Defaults to localhost. This needs to be one of the ICP master nodes 27 | # where the Cloudant database service is running. 28 | # 29 | # 2. One or more names of the databases to be created. 30 | # If more than one name is provided it is in the form of a quoted string 31 | # with the names separated by spaces. 32 | # 33 | # Assumptions: 34 | # 1. The user has a current kubernetes context for the admin user. 35 | # 36 | # 2. If a Cloudant DB server host name is not provided it is assumed 37 | # this script is being run on the Cloudant DB server host as 38 | # localhost is used in the Cloudant DB URL. 39 | # 40 | 41 | function usage { 42 | echo "" 43 | echo "Usage: create-database.sh [options]" 44 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 45 | echo " For example, one of the ICP master nodes." 46 | echo " Defaults to localhost." 47 | echo "" 48 | echo " --dbnames - (required) One or more names of the databases to be created." 49 | echo " If more than one name is provided it must be a quoted string of" 50 | echo " space separated names." 51 | echo "" 52 | echo " --help|-h - emit this usage information" 53 | echo "" 54 | echo " - and -- are accepted as keyword argument indicators" 55 | echo "" 56 | echo "Sample invocations:" 57 | echo " ./create-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 58 | echo "" 59 | } 60 | 61 | # import helper functions 62 | . ./helperFunctions.sh 63 | 64 | # MAIN 65 | 66 | dbhost="" 67 | dbnames="" 68 | 69 | # process the input args 70 | # For keyword-value arguments the arg gets the keyword and 71 | # the case statement assigns the value to a script variable. 72 | # If any "switch" args are added to the command line args, 73 | # then it wouldn't need a shift after processing the switch 74 | # keyword. The script variable for a switch argument would 75 | # be initialized to "false" or the empty string and if the 76 | # switch is provided on the command line it would be assigned 77 | # "true". 78 | # 79 | while (( $# > 0 )); do 80 | arg=$1 81 | case $arg in 82 | -h|--help ) usage; exit 83 | ;; 84 | 85 | -dbhost|--dbhost) dbhost=$2; shift 86 | ;; 87 | 88 | -dbnames|--dbnames) dbnames=$2; shift 89 | ;; 90 | 91 | * ) usage; 92 | info $LINENO "ERROR: Unknown option: $arg in command line." 93 | exit 1 94 | ;; 95 | esac 96 | # shift to next key-value pair 97 | shift 98 | done 99 | 100 | if [ -z "$dbhost" ]; then 101 | dbhost=localhost 102 | fi 103 | info $LINENO "Cloudant DB host: $dbhost" 104 | 105 | 106 | if [ -z "$dbnames" ]; then 107 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 108 | exit 2 109 | fi 110 | 111 | currentDBs=$(getCloudantDatabaseNames $dbhost) 112 | 113 | for name in $dbnames; do 114 | dbexists=$(echo "$currentDBs" | grep $name) 115 | if [ -z "$dbexists" ]; then 116 | info $LINENO "Creating database: $name on Cloudant instance host: $dbhost" 117 | createDatabase $dbhost $name 118 | else 119 | info $LINENO "Database: $name already exists on Cloudant instance host: $dbhost" 120 | fi 121 | done 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/delete-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Delete one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to delete databases. 23 | # 24 | # INPUTS: 25 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 26 | # Defaults to localhost. This needs to be one of the ICP master nodes 27 | # where the Cloudant database service is running. 28 | # 29 | # 2. One or more names of the databases to be deleted. 30 | # If more than one name is provided it is in the form of a quoted string 31 | # with the names separated by spaces. 32 | # 33 | # Assumptions: 34 | # 1. The user has a current kubernetes context for the admin user. 35 | # 36 | # 2. If a Cloudant DB server host name is not provided it is assumed 37 | # this script is being run on the Cloudant DB server host as 38 | # localhost is used in the Cloudant DB URL. 39 | # 40 | 41 | function usage { 42 | echo "" 43 | echo "Usage: delete-database.sh [options]" 44 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 45 | echo " For example, one of the ICP master nodes." 46 | echo " Defaults to localhost." 47 | echo "" 48 | echo " --dbnames - (required) One or more names of the databases to be created." 49 | echo " If more than one name is provided it must be a quoted string of" 50 | echo " space separated names." 51 | echo "" 52 | echo " --help|-h - emit this usage information" 53 | echo "" 54 | echo " - and -- are accepted as keyword argument indicators" 55 | echo "" 56 | echo "Sample invocations:" 57 | echo " ./delete-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 58 | echo "" 59 | } 60 | 61 | # import helper functions 62 | . ./helperFunctions.sh 63 | 64 | # MAIN 65 | 66 | dbhost="" 67 | dbnames="" 68 | 69 | # process the input args 70 | # For keyword-value arguments the arg gets the keyword and 71 | # the case statement assigns the value to a script variable. 72 | # If any "switch" args are added to the command line args, 73 | # then it wouldn't need a shift after processing the switch 74 | # keyword. The script variable for a switch argument would 75 | # be initialized to "false" or the empty string and if the 76 | # switch is provided on the command line it would be assigned 77 | # "true". 78 | # 79 | while (( $# > 0 )); do 80 | arg=$1 81 | case $arg in 82 | -h|--help ) usage; exit 83 | ;; 84 | 85 | -dbhost|--dbhost) dbhost=$2; shift 86 | ;; 87 | 88 | -dbnames|--dbnames) dbnames=$2; shift 89 | ;; 90 | 91 | * ) usage; 92 | info $LINENO "ERROR: Unknown option: $arg in command line." 93 | exit 1 94 | ;; 95 | esac 96 | # shift to next key-value pair 97 | shift 98 | done 99 | 100 | if [ -z "$dbhost" ]; then 101 | dbhost=localhost 102 | fi 103 | info $LINENO "Cloudant DB host: $dbhost" 104 | 105 | 106 | if [ -z "$dbnames" ]; then 107 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 108 | exit 2 109 | fi 110 | 111 | currentDBs=$(getCloudantDatabaseNames $dbhost) 112 | 113 | for name in $dbnames; do 114 | dbexists=$(echo "$currentDBs" | grep $name) 115 | if [ -n "$dbexists" ]; then 116 | info $LINENO "Deleting database: $name on Cloudant instance host: $dbhost" 117 | deleteDatabase $dbhost $name 118 | else 119 | info $LINENO "Database: $name does not exist on Cloudant instance host: $dbhost" 120 | fi 121 | done 122 | 123 | 124 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/externalize-cloudantdb-service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Use the CloudantDBNodePort.yaml from the icp-backup git repo 18 | # to externalize the ICP cloudantdb service. 19 | # 20 | # Assumptions: 21 | # 1. The icp-backup git repo was cloned in the current directory 22 | # 2. kubectl has been installed. 23 | # 3. User has a current kube context configured. (See ICPClientConfig.sh) 24 | # 25 | if [ ! -f ./icp-backup/scripts/CloudantDBNodePort.yaml ]; then 26 | echo "ERROR: ./icp-backup/scripts/CloudantDBNodePort.yaml does not exist." 27 | echo "Clone the icp-backup git repo before running this script." 28 | exit 1 29 | fi 30 | 31 | exists=$(kubectl get svc --namespace=kube-system | grep cloudantdb-ext) 32 | if [ -z "$exists" ]; then 33 | kubectl --namespace=kube-system apply -f ./icp-backup/scripts/CloudantDBNodePort.yaml 34 | else 35 | echo "The cloudantdb-ext service is already defined:" 36 | echo "$exists" 37 | fi 38 | 39 | 40 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/get-database-names.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Get a list of all the ICP Cloudant databases defined and write it to stdout. 18 | # It is handy to be able to quickly see the list of databases names for testing. 19 | # 20 | # Pre-reqs: 21 | # kubectl is needed to interact with the ICP cluster. 22 | # jq is needed to do JSON parsing. 23 | # 24 | # 25 | # INPUTS: 26 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 27 | # Defaults to localhost. This needs to be one of the ICP master nodes 28 | # where the Cloudant database service is running. 29 | # 30 | # Assumptions: 31 | # 1. The user has a current kubernetes context for the admin user. 32 | # 33 | # 2. If a Cloudant DB server host name is not provided it is assumed 34 | # this script is being run on the Cloudant DB server host as 35 | # localhost is used in the Cloudant DB URL. 36 | # 37 | 38 | function usage { 39 | echo "" 40 | echo "Usage: get-database-names.sh [options]" 41 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 42 | echo " For example, one of the ICP master nodes." 43 | echo " Defaults to localhost." 44 | echo "" 45 | echo " --help|-h - emit this usage information" 46 | echo "" 47 | echo " - and -- are accepted as keyword argument indicators" 48 | echo "" 49 | echo "Sample invocations:" 50 | echo " ./get-database-names.sh --dbhost master01.xxx.yyy" 51 | echo "" 52 | } 53 | 54 | # import helper functions 55 | . ./helperFunctions.sh 56 | 57 | # MAIN 58 | 59 | dbhost="" 60 | 61 | # process the input args 62 | # For keyword-value arguments the arg gets the keyword and 63 | # the case statement assigns the value to a script variable. 64 | # If any "switch" args are added to the command line args, 65 | # then it wouldn't need a shift after processing the switch 66 | # keyword. The script variable for a switch argument would 67 | # be initialized to "false" or the empty string and if the 68 | # switch is provided on the command line it would be assigned 69 | # "true". 70 | # 71 | while (( $# > 0 )); do 72 | arg=$1 73 | case $arg in 74 | -h|--help ) usage; exit 75 | ;; 76 | 77 | -dbhost|--dbhost) dbhost=$2; shift 78 | ;; 79 | 80 | * ) usage; info $LINENO "ERROR: Unknown option: $arg in command line." 81 | exit 1 82 | ;; 83 | esac 84 | # shift to next key-value pair 85 | shift 86 | done 87 | 88 | if [ -z "$dbhost" ]; then 89 | dbhost=localhost 90 | fi 91 | info $LINENO "Cloudant DB host: $dbhost" 92 | 93 | allDBs=$(getCloudantDatabaseNames $dbhost) 94 | 95 | if [ -z "$allDBs" ]; then 96 | info $LINENO "No databases are defined in the Cloudant instance hosted by: $dbhost" 97 | else 98 | info $LINENO "ICP Cloudant database names:" 99 | echo "\"$allDBs\"" 100 | fi 101 | 102 | 103 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/helperFunctions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | 15 | # DESCRIPTION: 16 | # Functions to assist with cloudant backup and restore. 17 | # Sourced by other scripts that need to use these functions. 18 | 19 | function info { 20 | local lineno=$1; shift 21 | ts=$(date +[%Y/%m/%d-%T]) 22 | echo "$ts $SCRIPT($lineno) $*" 23 | } 24 | 25 | getCloudantPasswordUsingSecret () { 26 | ### Get the cloudant password from kube secret 27 | raw_secret=$(kubectl get secret cloudant-credentials --namespace=kube-system -o json | jq '.["metadata"]["annotations"]["kubectl.kubernetes.io/last-applied-configuration"]') 28 | 29 | # Remove trailing double quote and what looks like a newline but is actually \n (3 charaters to remove). 30 | raw_secret=${raw_secret%???} 31 | 32 | # Remove leading double quote. 33 | raw_secret=${raw_secret#\"} 34 | 35 | # Remove all back slash characters. 36 | secret=$(echo $raw_secret | tr -d '\\') 37 | 38 | # Parse out the part of the secret with the data we are interested in. 39 | cloudant_password=$(echo $secret | jq '.["data"]["cloudant_password"]') 40 | 41 | # Strip leading and trailing double quotes 42 | cloudant_password=${cloudant_password#\"} 43 | cloudant_password=${cloudant_password%\"} 44 | 45 | echo $cloudant_password | base64 -d 46 | } 47 | 48 | getCloudantPassword () { 49 | echo "orange" 50 | } 51 | 52 | 53 | getCloudantNodePort () { 54 | 55 | # local port=$(kubectl --namespace=kube-system get svc cloudantdb-ext -o json | jq '.["spec"]["ports"][1]["nodePort"]') 56 | 57 | # echo $port 58 | echo 5984 59 | } 60 | 61 | 62 | getCloudantURL () { 63 | # Construct the cloudant URL echo it back to caller. 64 | # $1 is Cloudant DB host name or IP address. 65 | # defaults to cloudant 66 | 67 | local dbhost=$1 68 | 69 | if [ -z "$dbhost" ]; then 70 | dbhost=cloudantdb 71 | fi 72 | 73 | local password=$(getCloudantPassword) 74 | local port=$(getCloudantNodePort) 75 | 76 | echo "http://admin:$password@$dbhost:$port" 77 | 78 | } 79 | 80 | # The _all_dbs REST API returns a JSON list: 81 | # [ "_users", "helm_repos", "metrics", "metrics_app", "platform-db", "security-data", "stats", "tgz_files_icp" ] 82 | # The actual output from jq has newlines after each item in the list. 83 | # Also note the leading and trailing white space character of the string inside the brackets which needs to 84 | # be trimmed out. 85 | 86 | getCloudantDatabaseNames () { 87 | # $1 is Cloudant DB host name or IP address. 88 | # localhost is valid if running script on Cloudant DB host. 89 | 90 | local cloudantURL=$(getCloudantURL $1) 91 | local allDBs=$(curl --silent $cloudantURL/_all_dbs | jq '.') 92 | 93 | # Use tr to remove the newlines, double quotes, left and right square bracket and commasa. 94 | # The awk idiom trims leading and trailing white space. 95 | allDBs=$(echo "$allDBs" | tr -d '[\n",]' | awk '{$1=$1};1' ) 96 | 97 | echo "$allDBs" 98 | } 99 | 100 | 101 | exportCloudantDatabaseNames () { 102 | # $1 is the Cloudant DB host name or IP address 103 | # localhost is valid if running script on Cloudant DB host. 104 | # $2 is the path to directory where databases names are to be exported 105 | local dbhost=$1 106 | local destDir=$2 107 | 108 | if [ -z "$destDir" ]; then 109 | destDir="$PWD" 110 | fi 111 | 112 | local allDBs=$(getCloudantDatabaseNames $dbhost) 113 | local dest="$destDir/dbnames.sh" 114 | 115 | if [ -f "$dest" ]; then 116 | # dbnames.sh already exists 117 | exported=$(grep ALL_DBS "$dest") 118 | if [ -z "$exported" ]; then 119 | # ALL_DBS not written in dbnames.sh, append it 120 | echo "export ALL_DBS=\"$allDBs\"" >> "$dest" 121 | fi 122 | else 123 | # Create dbnames.sh and write ALL_DBS to it 124 | echo "export ALL_DBS=\"$allDBs\"" > "$dest" 125 | chmod +x "$dest" 126 | fi 127 | } 128 | 129 | 130 | exportDBnames () { 131 | # Export the given dbnames to dbnames.sh in the given directory. 132 | # INPUTS: 133 | # 1. Quoted string space separated list of database names. 134 | # 2. Destination directory path. If not provided, current working 135 | # directory is used. 136 | 137 | local dbnames=$1 138 | local destDir=$2 139 | 140 | if [ -z "$destDir" ]; then 141 | destDir=$PWD 142 | fi 143 | 144 | local dest="$destDir/dbnames.sh" 145 | 146 | if [ -f "$dest" ]; then 147 | # dbnames.sh already exists 148 | exported=$(grep -q BACKED_UP_DBNAMES "$dest") 149 | if [ -z "$exported" ]; then 150 | # BACKED_UP_DBNAMES not written in dbnames.sh, append it 151 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" >> "$dest" 152 | fi 153 | else 154 | # Create dbnames.sh and write BACKED_UP_DBNAMES to it 155 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" > "$dest" 156 | chmod +x "$dest" 157 | fi 158 | } 159 | 160 | 161 | makeBackupFilePath () { 162 | # Return the full path of the file name with the backup for the given database name. 163 | # $1 is the backup directory path 164 | # $2 is the Cloudant database name 165 | local fileName="$2-backup.json" 166 | echo "$1/$fileName" 167 | } 168 | 169 | createDatabase () { 170 | # Create a Cloudant database 171 | # $1 is the host name of the Cloudant DB instance 172 | # localhost is valid if the script is run on the instanct host. 173 | # $2 is the database name 174 | # 175 | # Both parameters are required. 176 | 177 | local dbhost=$1 178 | local dbname=$2 179 | 180 | local cloudantURL=$(getCloudantURL $dbhost) 181 | 182 | coucher database -c $cloudantURL -a create -d $dbname 183 | 184 | } 185 | 186 | 187 | deleteDatabase () { 188 | # Delete a Cloudant database 189 | # $1 is the host name of the Cloudant DB instance 190 | # localhost is valid if the script is run on the instanct host. 191 | # $2 is the database name 192 | # 193 | # Both parameters are required. 194 | 195 | local dbhost=$1 196 | local dbname=$2 197 | 198 | local cloudantURL=$(getCloudantURL $dbhost) 199 | 200 | coucher database -c $cloudantURL -a delete -d $dbname 201 | 202 | } 203 | 204 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/icp-client-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | ## Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Login and context for kubectl. Only valid for 12 hours. 18 | # Edit with new cut-and-paste from ICP Console "configure client" 19 | # 20 | 21 | ### Replace this section 22 | # There should be 5 kubectl lines when the configuration commands are pasted in. 23 | 24 | kubectl config set-cluster mycluster.icp --server=https://10.0.0.10:8001 --insecure-skip-tls-verify=true 25 | kubectl config set-context mycluster.icp-context --cluster=mycluster.icp 26 | kubectl config set-credentials admin --token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdF9oYXNoIjoiZTN1OG0ybG9meG1ncmo4MWs4OWUiLCJyZWFsbU5hbWUiOiJjdXN0b21SZWFsbSIsInVuaXF1ZVNlY3VyaXR5TmFtZSI6ImFkbWluIiwiaXNzIjoiaHR0cHM6Ly9teWNsdXN0ZXIuaWNwOjk0NDMvb2lkYy9lbmRwb2ludC9PUCIsImF1ZCI6IjYzZmRmYmE4MTZlYzNlYWZiYzZlODQ5NjU0MGM1ZDI2IiwiZXhwIjoxNTIwOTA3MTI2LCJpYXQiOjE1MjA5MDcxMjYsInN1YiI6ImFkbWluIiwidGVhbVJvbGVNYXBwaW5ncyI6W119.ELl90UevuxZCjY74WKLKrFZiRRzJC8dHeYD3vipJqwox-crWONfSCdGzUmmlavGBcXS6HnqfDcFjqNCnmp07Rzz0Ns1exhMOjIxGYQ0cUxkmUHxhbxOu-tPoOW2RvsCLm5Boh0DkhSTvoi48G0r15elRHpEQ_L-MmpRl_CRPN_5b6Yif4PWfKW5EDPGbXwdS8BWgnM2Ueb2CnHwY72lmdsrf_YCQGyzHtGmb41IErphs6X4tDdLXRPjX4fCqFKPl9BiPZevEfwqBffk_EEbZ6A55865pFF4WEls0J2MYy3sJ6qHJdPI9PgIwtZ7mMyIXyd8H6JQ6WUveP8of4GeZGg 27 | kubectl config set-context mycluster.icp-context --user=admin --namespace=default 28 | kubectl config use-context mycluster.icp-context 29 | 30 | ### End of section to replace 31 | 32 | # Change context to use the kube-system namespace as preferred 33 | kubectl config set-context mycluster.icp-context --user admin --namespace=kube-system 34 | -------------------------------------------------------------------------------- /src/cloudant-backup/1.1/set-namespace-kube-system.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Switch the preferred namespace to kube-system 18 | # 19 | # Assumptions: 20 | # 1. A kube context has been established. 21 | # 2. Logged in as admin. 22 | # 3. ICP cluster name is default, mycluster 23 | # 24 | # "kubectl config view", can be used to confirm namespace change. 25 | # 26 | 27 | kubectl config set-context mycluster.icp-context --user admin --namespace=kube-system 28 | 29 | -------------------------------------------------------------------------------- /src/cloudant-backup/2.0/backup-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | # 15 | # 16 | # DESCRIPTION: 17 | # Count the number of backup directories in the backup home directory. 18 | # Compare the actual count with the retention limit and delete the oldest 19 | # directories until the count is at the retention limit. 20 | # 21 | # INPUTS: 22 | # 1. Path to backup directories home. (optional) 23 | # The backup directories home defaults to "backups" in the current 24 | # working directory. 25 | # 26 | # ASSUMPTIONS: 27 | # 1. It is assumed that all directories in the backup home directory 28 | # that start with "icp-cloudant-backup" are backup directories. 29 | # 30 | # 2. User has write permission for the backups directory home. 31 | # 32 | 33 | function usage { 34 | echo "" 35 | echo "Usage: backup-cleanup.sh [options]" 36 | echo " --backup-home - (optional) Full path to a backups home directory." 37 | echo " Defaults to backups in current working directory." 38 | echo "" 39 | echo " --retain - (optional) Number of backups to retain." 40 | echo " Defaults to 5" 41 | echo "" 42 | echo " --help|-h - emit this usage information" 43 | echo "" 44 | echo "Sample invocations:" 45 | echo " ./backup-cleanup.sh" 46 | echo " ./backup-cleanup.sh --backup-home /data/backups" 47 | echo "" 48 | echo " User is assumed to have write permission on backup home directory." 49 | echo "" 50 | } 51 | 52 | # The info() function is used to emit log messages. 53 | # It is assumed that SCRIPT is set in the caller. 54 | function info { 55 | local lineno=$1; shift 56 | local ts=$(date +[%Y/%m/%d-%T]) 57 | echo "$ts $SCRIPT($lineno) $*" 58 | } 59 | 60 | # member() returns 0 if the first argument is a member of the second argument. 61 | # $1 is the string that represents the item of interest 62 | # $2 is the string that represents a list of items separated by space characters. 63 | # If item is in list the status 0 is returned otherwise status 1 is returned. 64 | # NOTE: When using member() in a condition do not use [ ] or [[ ]] expressions. 65 | # Example: if $(member "A" "a B C d A"); then 66 | # echo "A is a member" 67 | # else 68 | # echo "A is not a member" 69 | # fi 70 | # 71 | function member() { 72 | local item=$1 73 | local list=$2 74 | 75 | rc=1 76 | for x in $list; do 77 | if [ "$x" == "$item" ]; then 78 | rc=0 79 | break 80 | fi 81 | done 82 | 83 | return $rc 84 | } 85 | 86 | 87 | ############ "Main" starts here 88 | SCRIPT=${0##*/} 89 | 90 | info $LINENO "BEGIN $SCRIPT" 91 | 92 | backupHome="" 93 | 94 | # process the input args 95 | # For keyword-value arguments the arg gets the keyword and 96 | # the case statement assigns the value to a script variable. 97 | # If any "switch" args are added to the command line args, 98 | # then it wouldn't need a shift after processing the switch 99 | # keyword. The script variable for a switch argument would 100 | # be initialized to "false" or the empty string and if the 101 | # switch is provided on the command line it would be assigned 102 | # "true". 103 | # 104 | while (( $# > 0 )); do 105 | arg=$1 106 | case $arg in 107 | -h|--help ) usage; exit 0 108 | ;; 109 | 110 | -backup-home|--backup-home ) backupHome=$2; shift 111 | ;; 112 | 113 | -retain|--retain ) retainCount=$2; shift 114 | ;; 115 | 116 | * ) usage; 117 | info $LINENO "ERROR: Unknown option: $arg in command line." 118 | exit 1 119 | ;; 120 | esac 121 | # shift to next key-value pair 122 | shift 123 | done 124 | 125 | if [ -z "$backupHome" ]; then 126 | backupHome="${PWD}/backups" 127 | fi 128 | info $LINENO "Backup home directory is: $backupHome" 129 | 130 | if [ -z "$retainCount" ]; then 131 | retainCount=5 132 | fi 133 | info $LINENO "Retaining $retainCount backup directories." 134 | 135 | allBackupDirs=$( ls "${backupHome}" | grep icp-cloudant-backup ) 136 | keepBackupDirs=$( ls -rt "${backupHome}" | grep icp-cloudant-backup | tail -${retainCount} ) 137 | 138 | for backupDir in $keepBackupDirs; do 139 | info $LINENO "Keeping backup directory: ${backupHome}/$backupDir" 140 | done 141 | 142 | for backupDir in $allBackupDirs; do 143 | if ! $(member "$backupDir" "$keepBackupDirs"); then 144 | rm -rf "${backupHome}/${backupDir}" 145 | info $LINENO "Removed backup directory: ${backupHome}/$backupDir" 146 | fi 147 | done 148 | 149 | info $LINENO "END $SCRIPT" 150 | -------------------------------------------------------------------------------- /src/cloudant-backup/2.0/create-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Create one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to create databases. 23 | # 24 | # INPUTS: 25 | # 1. Kubernetes service host name, host name (FQDN) or IP address of the 26 | # Cloudant DB server. (optional) Defaults to cloudantdb.kube-system. 27 | # If running outside a container, this needs to be one of the ICP master 28 | # nodes where the Cloudant database service is running. 29 | # 30 | # 2. One or more names of the databases to be created. 31 | # If more than one name is provided it is in the form of a quoted string 32 | # with the names separated by spaces. 33 | # 34 | # Assumptions: 35 | # 1. If running in a container in a pod, a kubernetes config context is 36 | # auto-magically created and kubectl commands "just work." 37 | # If running outside of a kube pod, it is assumed the user has a current 38 | # kubernetes context for the admin user. 39 | # 40 | # 2. If a Cloudant DB server host name is not provided it is assumed 41 | # this script is being run in the context of a Kubernetes pod and the 42 | # cloudantdb.kube-system host is used. If this script is running at 43 | # a host command line on a master node, then localhost needs to be 44 | # provided for the --dbhost argument value. 45 | # 46 | 47 | function usage { 48 | echo "" 49 | echo "Usage: create-database.sh [options]" 50 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 51 | echo " For example, one of the ICP master nodes." 52 | echo " Defaults to cloudantdb.kube-system." 53 | echo "" 54 | echo " --dbnames - (required) One or more names of the databases to be created." 55 | echo " If more than one name is provided it must be a quoted string of" 56 | echo " space separated names." 57 | echo "" 58 | echo " --help|-h - emit this usage information" 59 | echo "" 60 | echo " - and -- are accepted as keyword argument indicators" 61 | echo "" 62 | echo "Sample invocations:" 63 | echo " ./create-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 64 | echo "" 65 | } 66 | 67 | # import helper functions 68 | . ./helper-functions.sh 69 | 70 | ############ "Main" starts here 71 | SCRIPT=${0##*/} 72 | 73 | info $LINENO "BEGIN $SCRIPT" 74 | 75 | dbhost="" 76 | dbnames="" 77 | 78 | # process the input args 79 | # For keyword-value arguments the arg gets the keyword and 80 | # the case statement assigns the value to a script variable. 81 | # If any "switch" args are added to the command line args, 82 | # then it wouldn't need a shift after processing the switch 83 | # keyword. The script variable for a switch argument would 84 | # be initialized to "false" or the empty string and if the 85 | # switch is provided on the command line it would be assigned 86 | # "true". 87 | # 88 | while (( $# > 0 )); do 89 | arg=$1 90 | case $arg in 91 | -h|--help ) usage; exit 92 | ;; 93 | 94 | -dbhost|--dbhost) dbhost=$2; shift 95 | ;; 96 | 97 | -dbnames|--dbnames) dbnames=$2; shift 98 | ;; 99 | 100 | * ) usage; 101 | info $LINENO "ERROR: Unknown option: $arg in command line." 102 | exit 1 103 | ;; 104 | esac 105 | # shift to next key-value pair 106 | shift 107 | done 108 | 109 | if [ -z "$dbhost" ]; then 110 | dbhost=cloudantdb.kube-system 111 | fi 112 | info $LINENO "Cloudant DB host: $dbhost" 113 | 114 | 115 | if [ -z "$dbnames" ]; then 116 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 117 | exit 2 118 | fi 119 | 120 | currentDBs=$(getCloudantDatabaseNames $dbhost) 121 | 122 | for name in $dbnames; do 123 | dbexists=$(echo "$currentDBs" | grep $name) 124 | if [ -z "$dbexists" ]; then 125 | info $LINENO "Creating database: $name on Cloudant instance host: $dbhost" 126 | createDatabase $dbhost $name 127 | else 128 | info $LINENO "Database: $name already exists on Cloudant instance host: $dbhost" 129 | fi 130 | done 131 | 132 | info $LINENO "END $SCRIPT" 133 | -------------------------------------------------------------------------------- /src/cloudant-backup/2.0/delete-database.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Delete one or more ICP Cloudant databases. 18 | # 19 | # Pre-reqs: 20 | # kubectl is needed to interact with the ICP cluster. 21 | # jq is needed to do JSON parsing. 22 | # coucher-cli is used to delete databases. 23 | # 24 | # INPUTS: 25 | # 1. Kubernetes service host name, host name (FQDN) or IP address of the 26 | # Cloudant DB server. (optional) Defaults to cloudantdb.kube-system. 27 | # If running outside a container, this needs to be one of the ICP master 28 | # nodes where the Cloudant database service is running. 29 | # 30 | # 2. One or more names of the databases to be deleted. 31 | # If more than one name is provided it is in the form of a quoted string 32 | # with the names separated by spaces. 33 | # 34 | # Assumptions: 35 | # 1. The user has a current kubernetes context for the admin user. 36 | # 37 | # 2. If a Cloudant DB server host name is not provided it is assumed 38 | # this script is being run on the Cloudant DB server host as 39 | # localhost is used in the Cloudant DB URL. 40 | # 41 | 42 | function usage { 43 | echo "" 44 | echo "Usage: delete-database.sh [options]" 45 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 46 | echo " For example, one of the ICP master nodes." 47 | echo " Defaults to cloudantdb.kube-system." 48 | echo "" 49 | echo " --dbnames - (required) One or more names of the databases to be created." 50 | echo " If more than one name is provided it must be a quoted string of" 51 | echo " space separated names." 52 | echo "" 53 | echo " --help|-h - emit this usage information" 54 | echo "" 55 | echo " - and -- are accepted as keyword argument indicators" 56 | echo "" 57 | echo "Sample invocations:" 58 | echo " ./delete-database.sh --dbhost master01.xxx.yyy --dbnames \"platform-db security-data\"" 59 | echo "" 60 | } 61 | 62 | # import helper functions 63 | . ./helper-functions.sh 64 | 65 | ############ "Main" starts here 66 | SCRIPT=${0##*/} 67 | 68 | info $LINENO "BEGIN $SCRIPT" 69 | 70 | dbhost="" 71 | dbnames="" 72 | 73 | # process the input args 74 | # For keyword-value arguments the arg gets the keyword and 75 | # the case statement assigns the value to a script variable. 76 | # If any "switch" args are added to the command line args, 77 | # then it wouldn't need a shift after processing the switch 78 | # keyword. The script variable for a switch argument would 79 | # be initialized to "false" or the empty string and if the 80 | # switch is provided on the command line it would be assigned 81 | # "true". 82 | # 83 | while (( $# > 0 )); do 84 | arg=$1 85 | case $arg in 86 | -h|--help ) usage; exit 87 | ;; 88 | 89 | -dbhost|--dbhost) dbhost=$2; shift 90 | ;; 91 | 92 | -dbnames|--dbnames) dbnames=$2; shift 93 | ;; 94 | 95 | * ) usage; 96 | info $LINENO "ERROR: Unknown option: $arg in command line." 97 | exit 1 98 | ;; 99 | esac 100 | # shift to next key-value pair 101 | shift 102 | done 103 | 104 | if [ -z "$dbhost" ]; then 105 | dbhost=cloudantdb.kube-system 106 | fi 107 | info $LINENO "Cloudant DB host: $dbhost" 108 | 109 | 110 | if [ -z "$dbnames" ]; then 111 | info $LINENO "ERROR: A list of database names (--dbnames) is required." 112 | exit 2 113 | fi 114 | 115 | currentDBs=$(getCloudantDatabaseNames $dbhost) 116 | 117 | for name in $dbnames; do 118 | dbexists=$(echo "$currentDBs" | grep $name) 119 | if [ -n "$dbexists" ]; then 120 | info $LINENO "Deleting database: $name on Cloudant instance host: $dbhost" 121 | deleteDatabase $dbhost $name 122 | else 123 | info $LINENO "Database: $name does not exist on Cloudant instance host: $dbhost" 124 | fi 125 | done 126 | -------------------------------------------------------------------------------- /src/cloudant-backup/2.0/get-database-names.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Get a list of all the ICP Cloudant databases defined and write it to stdout. 18 | # It is handy to be able to quickly see the list of databases names for testing. 19 | # 20 | # Pre-reqs: 21 | # 1. bash is needed for various scripting conventions 22 | # Experiments with Ash in Alpine showed that bash is needed. 23 | # 2. kubectl is needed to interact with the ICP cluster. 24 | # 3. jq is needed to do JSON parsing. 25 | # 26 | # 27 | # INPUTS: 28 | # 1. Host name (FQDN) or IP address of the Cloudant DB server. (optional) 29 | # Defaults to localhost. This needs to be one of the ICP master nodes 30 | # where the Cloudant database service is running. 31 | # 32 | # Assumptions: 33 | # 1. The user has a current kubernetes context for the admin user. 34 | # 35 | # 2. If a Cloudant DB server host name is not provided it is assumed 36 | # this script is being run on the Cloudant DB server host as 37 | # localhost is used in the Cloudant DB URL. 38 | # 39 | 40 | function usage { 41 | echo "" 42 | echo "Usage: get-database-names.sh [options]" 43 | echo " --dbhost - (optional) Host name or IP address of the Cloudant DB service provider" 44 | echo " For example, one of the ICP master nodes." 45 | echo " Defaults to cloudantdb." 46 | echo "" 47 | echo " --help|-h - emit this usage information" 48 | echo "" 49 | echo " - and -- are accepted as keyword argument indicators" 50 | echo "" 51 | echo "Sample invocations:" 52 | echo " ./get-database-names.sh" 53 | echo " ./get-database-names.sh --dbhost master01.xxx.yyy" 54 | echo "" 55 | } 56 | 57 | # import helper functions 58 | . ./helper-functions.sh 59 | 60 | # MAIN 61 | SCRIPT=${0##*/} 62 | 63 | info $LINENO "BEGIN $SCRIPT" 64 | 65 | dbhost="" 66 | 67 | # process the input args 68 | # For keyword-value arguments the arg gets the keyword and 69 | # the case statement assigns the value to a script variable. 70 | # If any "switch" args are added to the command line args, 71 | # then it wouldn't need a shift after processing the switch 72 | # keyword. The script variable for a switch argument would 73 | # be initialized to "false" or the empty string and if the 74 | # switch is provided on the command line it would be assigned 75 | # "true". 76 | # 77 | while (( $# > 0 )); do 78 | arg=$1 79 | case $arg in 80 | -h|--help ) usage; exit 81 | ;; 82 | 83 | -dbhost|--dbhost) dbhost=$2; shift 84 | ;; 85 | 86 | * ) usage; info $LINENO "ERROR: Unknown option: $arg in command line." 87 | exit 1 88 | ;; 89 | esac 90 | # shift to next key-value pair 91 | shift 92 | done 93 | 94 | if [ -z "$dbhost" ]; then 95 | dbhost=cloudantdb.kube-system 96 | fi 97 | info $LINENO "Cloudant DB host: $dbhost" 98 | 99 | allDBs=$(getCloudantDatabaseNames $dbhost) 100 | 101 | if [ -z "$allDBs" ]; then 102 | info $LINENO "No databases are defined in the Cloudant instance hosted by: $dbhost" 103 | info $LINENO "END $SCRIPT" 104 | else 105 | info $LINENO "END $SCRIPT" 106 | info $LINENO "ICP Cloudant database names:" 107 | echo "\"$allDBs\"" 108 | fi 109 | -------------------------------------------------------------------------------- /src/cloudant-backup/Dockerfile-1.0: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | ARG version 3 | 4 | # No time for careful testing now, bash is needed for its parameter expansion capabilities. 5 | # Perhaps ash supports same parameter expansion capabilites. 6 | # Curl is needed for interaction with Cloudant REST API. 7 | # Nodejs is needed from Cloudant backup/restore utilities. 8 | # Install bash, curl, nodejs, npm, jq (npm is a nodejs pre-req) 9 | # 10 | RUN apk add --update bash curl nodejs jq 11 | RUN npm install -g @cloudant/couchbackup 12 | RUN npm install -g coucher 13 | 14 | # Install kubectl 15 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ 16 | && chmod +x ./kubectl \ 17 | && mv ./kubectl /usr/local/bin/kubectl 18 | 19 | COPY $version/*.sh / 20 | 21 | CMD /cloudant-backup.sh 22 | -------------------------------------------------------------------------------- /src/cloudant-backup/Dockerfile-2.0: -------------------------------------------------------------------------------- 1 | # Sample invocation: 2 | # From the directory of the Dockerfile-2.0 3 | # docker build -f Dockerfile-2.0 --build-arg version=2.0 -t ibmcase/icp-cloudant-backup . 4 | # 5 | FROM alpine 6 | ARG version 7 | 8 | # bash is needed for the various scripting idioms used in the shell scripts 9 | # Experiments showed that the Alpine default shell (Almquist shell) is not sufficient. 10 | # Curl is needed for interaction with Cloudant REST API. 11 | # Nodejs is needed for Cloudant backup/restore utilities. 12 | # The jq utility is used to parse the JSON that is returned from cloudantdb REST calls. 13 | # couchbackup has the couchbackup and couchrestore utilities. 14 | # couchdb-cli is used for creating and deleting databases. 15 | # 16 | RUN apk add --update bash curl nodejs jq 17 | RUN npm install -g @cloudant/couchbackup 18 | RUN npm install -g couchdb-cli 19 | 20 | # Kubectl is needed to get information from cloudantdb service and from cloudant-credentials secret 21 | RUN curl --silent -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ 22 | && chmod +x ./kubectl \ 23 | && mv ./kubectl /usr/local/bin/kubectl 24 | 25 | # If the +x bit is on in the source then it will be retained in the image. 26 | COPY $version/*.sh / 27 | 28 | CMD [ "/cloudant-backup.sh", "--dbhost", "cloudantdb.kube-system", \ 29 | "--backup-home", "/data/backups", \ 30 | "--exclude", "metrics metrics_app" ] 31 | -------------------------------------------------------------------------------- /src/cloudant-backup/doc/ICP_cloudant_backup_and_restore.md: -------------------------------------------------------------------------------- 1 | # ICP Cloudant Backup and Restore Scripts 2 | 3 | The backup and restore scripts can be considered in two groups: 4 | 1. Installation and configuration scripts 5 | 2. Backup and restore utility scripts 6 | 7 | ## Installation and configuration scripts 8 | - The installation and configuration scripts must be executed before using any of the backup and restore scripts. 9 | - *NOTE:* The scripts are Ubuntu specific. (*TODO* - Create equivalent scripts for RHEL.) 10 | - The installation and configuration scripts have a number in the first part of the script name that indicates the order in which they are expected to be executed. 11 | 12 | The following table describes briefly the purpose of each installation and configuration script. 13 | 14 | | **Script Name** | **Comments** | 15 | |------------------------------|------------------------------------------------------| 16 | |`01_install-kubectl.sh` |Install kubectl which is needed to interact with the ICP kubernetes cluster | 17 | |`02_install-node9x.sh` |Node is the implementation language of the Cloudant back and restore utilities used by the scripts. | 18 | |`03_install-npm-latest.sh` |NPM is the node package manager and is needed to install various node packages. | 19 | |`05_install-cloudant-utils.sh` |Installs couchbackup (and restore) as well as couchdb-cli | 20 | |`06_install-jq.sh` |jq is used to parse JSON when interacting with the kubernetes cluster | 21 | |`icp-client-config.sh` |Used to configure a current kubernetes context with admin access to the cluster.
Sets the preferred namespace to kube-system.
*NOTE:* Before running `icp-client-config.sh`, you need to edit it and paste in the `kubectl` client configuration commands that configure the context.
The client configuration commands are available from the ICP console in the user icon drop-down menu.
Depending on the scenario, it may be convenient to cut-and-paste the client configuration kubectl commands directly into a shell window on the backup and restore staging server.
`icp-client-config.sh` also sets the preferred namespace to `kube-system`.| 22 | |`set-namespace-kube-system.sh` |Used to set the preferred namespace to kube-system.
The scripts that use kubectl commands all include a `--namespace=kube-system` option, so it is not necessary to use this script. However, it is convenient for the user if kubectl commands need to be run from the shell prompt for any reason.
The only namespace of interest in the context of Cloudant backup and restore is kube-system.| 23 | 24 | ## Backup and restore utility scripts 25 | - The backup and restore utility scripts have pre-reqs that the following software is installed on the machine running the scripts: 26 | - kubectl 27 | - node (The provided script installs Node 9.x. Couchbackup requires at least Node 6.3.) 28 | - npm 29 | - jq 30 | - couchbackup 31 | - couchdb-cli 32 | 33 | - The backup and restore utilities can be run on a host remote to the ICP master nodes as long as the Cloudant database server host name or IP address is provided using the `--dbhost` argument to the scripts. 34 | - All backup and restore utility scripts have usage information available with the `--help` or `-h` option. 35 | - All scripts assume the user has a current kubernetes context with "cluster administrator" access to the cluster. 36 | - Backups are created in a backup directory with the name `icp-cloudant-backup-`, where `` has the form: `YYYY-mm-dd-HH-MM-SS`. (The shell command: `date +%Y-%m-%d-%H-%M-%S` is used to create the timestamp.) 37 | - The file names in the backup directory have the form: `-backup.json`. 38 | - A "backup home" directory is where the time-stamped backup directory is created. When a backup is invoked the user provides a path to the backup home directory using the `--backup-home` parameter to the `cloudant-backup.sh` script. The backup home is a directory of directories. 39 | - When a backup is taken, the backup directory gets an executable file created in it named `dbnames.sh`. The `dbnames.sh` file is sourced by the `cloudant-restore.sh` script to get a list of the database names for which a backup is stored in the directory. The backed up database names are the value of the `BACKED_UP_DBNAMES` environment variable exported in the `dbnames.sh` file. 40 | 41 | The following table describes briefly the role of each backup and restore utility script. 42 | 43 | | **Script Name** | **Comments** | 44 | |-------------------------------------|------------------------------------------------------| 45 | |`helperFunctions.sh` |This script is sourced by the other scripts for its collection of "helper" functions.
This script is not intended to be run directly.| 46 | |`get-database-names.sh` |Get a list of ICP Cloudant database names.
The list is returned as a quoted string with the names separated by a space character. | 47 | |`cloudant-backup.sh` |Backup the ICP Cloudant databases. The default behavior is to backup all databases to a time-stamped directory in `./backups`.
The `--backup-home` parameter can be used to provide a path to the backup home.
The `--dbnames` script option can be used to provide a list of 1 or more database names to be backed up. | 48 | |`cloudant-restore.sh` |Restore ICP Cloudant databases. The path to a backup directory is a required input using the `--backup-dir` parameter. | 49 | |`create-database.sh` |Create 1 or more databases with the given names in the ICP Cloudant database instance.
The database names are provided using the `--dbnames` parameter. | 50 | |`delete-database.sh` |Delete 1 or more databases with the given names in the ICP Cloudant database instance.
The database names are provided using the `--dbnames` parameter. | 51 | -------------------------------------------------------------------------------- /src/cloudant-backup/doc/ICP_cloudant_backup_and_restore.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibm-cloud-architecture/icp-backup/2056dea4f122e066eb3fcfa69f0f449a2b5b3970/src/cloudant-backup/doc/ICP_cloudant_backup_and_restore.pdf -------------------------------------------------------------------------------- /src/cloudant-ppa/build-ppa-archive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # Description 17 | # Build a helm and image PPA. 18 | # 19 | # ASSUMPTIONS: 20 | # 1. It is assumed that the helm chart has been archived and gzipped into a file with 21 | # the name of the chart, e.g. icp-cloudant-backup with a suffix of -chart-VERSION.tgz 22 | # where VERSION is Semver 2.0 version number, e.g., 0.1.0. (Semver seems to be the 23 | # proper version numbering scheme to use here.) 24 | # 25 | # 2. Likewise the image file with a docker image is assumed to be named with the name 26 | # of the chart, e.g., icp-cloudant-backup with a suffix of -image-VERSION.tgz 27 | # The following command can be used to get an image in the locak registry into 28 | # a tgz file: 29 | # docker save ibmcase/icp-cloudant-backup | gzip > icp-cloudant-backup-image-0.1.0.tgz 30 | # 31 | function usage { 32 | echo "" 33 | echo "Usage: build-ppa-archive.sh [options]" 34 | echo " --chart-name - (required) The name of the chart." 35 | echo " --chart-version - (required) The version number of the chart file." 36 | echo " --chart-home - (required) The path to the home directory for charts." 37 | echo "" 38 | echo " --image-name - (required) The name of the image." 39 | echo " --image-version - (required) The version number of the image file." 40 | echo " --image-tag - (optional) The tag of the image. Defaults to latest." 41 | echo " --image-home - (required) The path to the home directory for images." 42 | echo "" 43 | echo " --help|-h - emit this usage information" 44 | echo "" 45 | echo "Sample invocations (all on one line):" 46 | echo "In the sample below the chart and image .tgz files are in the user's home directory." 47 | echo " ./build-ppa-archive.sh --chart-name icp-cloudant-backup " 48 | echo " --chart-version 0.1.0 --chart-home ~/ " 49 | echo " --image-name icp-cloudant-backup " 50 | echo " --image-version 0.1.0 --image-home ~/" 51 | echo "" 52 | } 53 | 54 | function info { 55 | local lineno=$1; shift 56 | local ts=$(date +[%Y/%m/%d-%T]) 57 | echo "$ts $SCRIPT($lineno) $*" 58 | } 59 | 60 | ############ "Main" starts here 61 | SCRIPT=${0##*/} 62 | 63 | image_name="" 64 | image_version="" 65 | image_tag="" 66 | image_home="" 67 | chart_name="" 68 | chart_version="" 69 | chart_home="" 70 | 71 | # process the input args 72 | # For keyword-value arguments the arg gets the keyword and 73 | # the case statement assigns the value to a script variable. 74 | # If any "switch" args are added to the command line args, 75 | # then it wouldn't need a shift after processing the switch 76 | # keyword. The script variable for a switch argument would 77 | # be initialized to "false" or the empty string and if the 78 | # switch is provided on the command line it would be assigned 79 | # "true". 80 | # 81 | while (( $# > 0 )); do 82 | arg=$1 83 | case $arg in 84 | -h|--help ) usage; exit 85 | ;; 86 | 87 | -image-name|--image-name) image_name=$2; shift 88 | ;; 89 | 90 | -image-version|--image-version) image_version=$2; shift 91 | ;; 92 | 93 | -tag|--tag|-image-tag|--image-tag) image_tag=$2; shift 94 | ;; 95 | 96 | -image-home|--image-home) image_home=$2; shift 97 | ;; 98 | 99 | -chart-name|--chart-name) chart_name=$2; shift 100 | ;; 101 | 102 | -version|--version|-chart-version|--chart-version) chart_version=$2; shift 103 | ;; 104 | 105 | -chart-home|--chart-home) chart_home=$2; shift 106 | ;; 107 | 108 | * ) usage; 109 | info $LINENO "ERROR: Unknown option: $arg in command line." 110 | exit 1 111 | ;; 112 | esac 113 | # shift to next key-value pair 114 | shift 115 | done 116 | 117 | 118 | info $LINENO "BEGIN Build PPA archive." 119 | 120 | if [ -z "$image_tag" ]; then 121 | image_tag=latest 122 | fi 123 | 124 | mkdir ppa_archive 125 | mkdir -p ppa_archive/images 126 | mkdir -p ppa_archive/charts 127 | 128 | if [ -f ${chart_home}/${chart_name}-chart-${chart_version}.tgz ]; then 129 | echo "Copying chart from ${chart_home}/${chart_name}-chart-${chart_version}.tgz to charts/ ..." 130 | cp ${chart_home}/${chart_name}-chart-${chart_version}.tgz ppa_archive/charts/ 131 | else 132 | info $LINENO "ERROR: ${chart_home}/${chart_name}-chart-${chart_version}.tgz file does not exist." 133 | exit 2 134 | fi 135 | 136 | if [ -f ${image_home}/${image_name}-image-${image_version}.tgz ]; then 137 | echo "Copying image from ${image_home}/${image_name}-image-${image_version}.tgz to images/ ..." 138 | cp ${image_home}/${image_name}-image-${image_version}.tgz ppa_archive/images/ 139 | else 140 | info $LINENO "ERROR: ${image_home}/${image_name}-image-${image_version}.tgz file does not exist." 141 | exit 3 142 | fi 143 | 144 | 145 | info $LINENO "Updating manifest.json with chart version, image version and tag ..." 146 | sed -e 's/__CHART-VERSION__/'${chart_version}'/' \ 147 | -e 's/__IMAGE-TAG__/'${image_tag}'/' \ 148 | -e 's/__IMAGE-VERSION__/'${image_version}'/' manifest.json.tmpl > ppa_archive/manifest.json 149 | 150 | info $LINENO "Updating manifest.yaml with chart version and image tag ..." 151 | sed -e 's/__CHART-VERSION__/'${chart_version}'/' \ 152 | -e 's/__IMAGE-TAG__/'${image_tag}'/' manifest.yaml.tmpl > ppa_archive/manifest.yaml 153 | 154 | 155 | echo "Building ${chart_name}-ppa-${chart_version}.tgz ..." 156 | tar -C ./ppa_archive -czvf ${chart_name}-ppa-${chart_version}.tgz images charts manifest.json manifest.yaml 157 | 158 | rm -rf ./ppa_archive 159 | 160 | info $LINENO "END build PPA archive." 161 | -------------------------------------------------------------------------------- /src/cloudant-ppa/manifest.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "manifest-revision": "1.0", 3 | "charts": [ 4 | { 5 | "archive": "charts/icp-cloudant-backup-chart-__CHART-VERSION__.tgz" 6 | } 7 | ], 8 | "images": [ 9 | { 10 | "image": "ibmcase/icp-cloudant-backup", 11 | "tag": "__IMAGE-TAG__", 12 | "archive": "images/icp-cloudant-backup-image-__IMAGE-VERSION__.tgz" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /src/cloudant-ppa/manifest.yaml.tmpl: -------------------------------------------------------------------------------- 1 | revision: "1.0" 2 | output-filename: "" 3 | charts: 4 | - archive: charts/icp-cloudant-backup-chart-__CHART-VERSION__.tgz 5 | 6 | images: 7 | - image: ibmcase/icp-cloudant-backup:__IMAGE-TAG__ 8 | refereneces: 9 | - repository: ibmcase/icp-cloudant-backup:__IMAGE-TAG__ 10 | -------------------------------------------------------------------------------- /src/mariadb-backup/1.0/backup-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | # 15 | # 16 | # DESCRIPTION: 17 | # Count the number of backup directories in the backup home directory. 18 | # Compare the actual count with the retention limit and delete the oldest 19 | # directories until the count is at the retention limit. 20 | # 21 | # INPUTS: 22 | # 1. Path to backup directories home. (optional) 23 | # The backup directories home defaults to "backups" in the current 24 | # working directory. 25 | # 26 | # ASSUMPTIONS: 27 | # 1. It is assumed that all directories in the backup home directory 28 | # that start with "icp-cloudant-backup" are backup directories. 29 | # 30 | # 2. User has write permission for the backups directory home. 31 | # 32 | 33 | function usage { 34 | echo "" 35 | echo "Usage: backup-cleanup.sh [options]" 36 | echo " --backup-home - (optional) Full path to a backups home directory." 37 | echo " Defaults to backups in current working directory." 38 | echo "" 39 | echo " --retain - (optional) Number of backups to retain." 40 | echo " Defaults to 5" 41 | echo "" 42 | echo " --help|-h - emit this usage information" 43 | echo "" 44 | echo "Sample invocations:" 45 | echo " ./backup-cleanup.sh" 46 | echo " ./backup-cleanup.sh --backup-home /data/backups" 47 | echo "" 48 | echo " User is assumed to have write permission on backup home directory." 49 | echo "" 50 | } 51 | 52 | # The info() function is used to emit log messages. 53 | # It is assumed that SCRIPT is set in the caller. 54 | function info { 55 | local lineno=$1; shift 56 | local ts=$(date +[%Y/%m/%d-%T]) 57 | echo "$ts $SCRIPT($lineno) $*" 58 | } 59 | 60 | # member() returns 0 if the first argument is a member of the second argument. 61 | # $1 is the string that represents the item of interest 62 | # $2 is the string that represents a list of items separated by space characters. 63 | # If item is in list the status 0 is returned otherwise status 1 is returned. 64 | # NOTE: When using member() in a condition do not use [ ] or [[ ]] expressions. 65 | # Example: if $(member "A" "a B C d A"); then 66 | # echo "A is a member" 67 | # else 68 | # echo "A is not a member" 69 | # fi 70 | # 71 | function member() { 72 | local item=$1 73 | local list=$2 74 | 75 | rc=1 76 | for x in $list; do 77 | if [ "$x" == "$item" ]; then 78 | rc=0 79 | break 80 | fi 81 | done 82 | 83 | return $rc 84 | } 85 | 86 | 87 | ############ "Main" starts here 88 | SCRIPT=${0##*/} 89 | 90 | info $LINENO "BEGIN $SCRIPT" 91 | 92 | backupHome="" 93 | 94 | # process the input args 95 | # For keyword-value arguments the arg gets the keyword and 96 | # the case statement assigns the value to a script variable. 97 | # If any "switch" args are added to the command line args, 98 | # then it wouldn't need a shift after processing the switch 99 | # keyword. The script variable for a switch argument would 100 | # be initialized to "false" or the empty string and if the 101 | # switch is provided on the command line it would be assigned 102 | # "true". 103 | # 104 | while (( $# > 0 )); do 105 | arg=$1 106 | case $arg in 107 | -h|--help ) usage; exit 0 108 | ;; 109 | 110 | -backup-home|--backup-home ) backupHome=$2; shift 111 | ;; 112 | 113 | -retain|--retain ) retainCount=$2; shift 114 | ;; 115 | 116 | * ) usage; 117 | info $LINENO "ERROR: Unknown option: $arg in command line." 118 | exit 1 119 | ;; 120 | esac 121 | # shift to next key-value pair 122 | shift 123 | done 124 | 125 | if [ -z "$backupHome" ]; then 126 | backupHome="${PWD}/backups" 127 | fi 128 | info $LINENO "Backup home directory is: $backupHome" 129 | 130 | if [ -z "$retainCount" ]; then 131 | retainCount=5 132 | fi 133 | info $LINENO "Retaining $retainCount backup directories." 134 | 135 | allBackupDirs=$( ls "${backupHome}" | grep icp-cloudant-backup ) 136 | keepBackupDirs=$( ls -rt "${backupHome}" | grep icp-cloudant-backup | tail -${retainCount} ) 137 | 138 | for backupDir in $keepBackupDirs; do 139 | info $LINENO "Keeping backup directory: ${backupHome}/$backupDir" 140 | done 141 | 142 | for backupDir in $allBackupDirs; do 143 | if ! $(member "$backupDir" "$keepBackupDirs"); then 144 | rm -rf "${backupHome}/${backupDir}" 145 | info $LINENO "Removed backup directory: ${backupHome}/$backupDir" 146 | fi 147 | done 148 | 149 | info $LINENO "END $SCRIPT" 150 | -------------------------------------------------------------------------------- /src/mariadb-backup/1.0/get-database-names.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed Material - Property of IBM 4 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 5 | # US Government Users Restricted Rights - Use, duplication or disclosure 6 | # restricted by GSA ADP Schedule Contract with IBM Corp. 7 | # 8 | # DISCLAIMER: 9 | # The following source code is sample code created by IBM Corporation. 10 | # This sample code is provided to you solely for the purpose of assisting you 11 | # in the use of the product. The code is provided 'AS IS', without warranty or 12 | # condition of any kind. IBM shall not be liable for any damages arising out of 13 | # your use of the sample code, even if IBM has been advised of the possibility 14 | # of such damages. 15 | # 16 | # DESCRIPTION: 17 | # Get a list of all the ICP MariaDB databases defined and write it to stdout. 18 | # It is handy to be able to quickly see the list of databases names for testing. 19 | # 20 | # Pre-reqs: 21 | # 1. bash is needed for various scripting conventions 22 | # Experiments with Ash in Alpine showed that bash is needed. 23 | # 2. kubectl is needed to interact with the ICP cluster. 24 | # 3. jq is needed to do JSON parsing. 25 | # 26 | # 27 | # INPUTS: 28 | # 1. Kubernetes service host name or host name (FQDN) or IP address of the 29 | # MariaDB server. (optional) 30 | # Defaults to mariadb.kube-system. 31 | # If running outside of a container, then a machine host name needs to be 32 | # passed in, even localhost. 33 | # 34 | # Assumptions: 35 | # 1. The user has a current kubernetes context for the admin user. 36 | # 37 | # 2. If a MariaDB service host name is not provided it is assumed 38 | # this script is being run in a container and mariadb.kube-system 39 | # is used as the service host name. 40 | # 41 | 42 | function usage { 43 | echo "" 44 | echo "Usage: get-database-names.sh [options]" 45 | echo " --dbhost - (optional) Host name or IP address of the MariaDB service provider" 46 | echo " Defaults to mariadb.kube-system" 47 | echo "" 48 | echo " --help|-h - emit this usage information" 49 | echo "" 50 | echo " - and -- are accepted as keyword argument indicators" 51 | echo "" 52 | echo "Sample invocations:" 53 | echo " ./get-database-names.sh" 54 | echo " ./get-database-names.sh --dbhost master01.xxx.yyy" 55 | echo "" 56 | } 57 | 58 | # import helper functions 59 | . ./helper-functions.sh 60 | 61 | # MAIN 62 | SCRIPT=${0##*/} 63 | 64 | info $LINENO "BEGIN $SCRIPT" 65 | 66 | dbhost="" 67 | 68 | # process the input args 69 | # For keyword-value arguments the arg gets the keyword and 70 | # the case statement assigns the value to a script variable. 71 | # If any "switch" args are added to the command line args, 72 | # then it wouldn't need a shift after processing the switch 73 | # keyword. The script variable for a switch argument would 74 | # be initialized to "false" or the empty string and if the 75 | # switch is provided on the command line it would be assigned 76 | # "true". 77 | # 78 | while (( $# > 0 )); do 79 | arg=$1 80 | case $arg in 81 | -h|--help ) usage; exit 82 | ;; 83 | 84 | -dbhost|--dbhost) dbhost=$2; shift 85 | ;; 86 | 87 | * ) usage; info $LINENO "ERROR: Unknown option: $arg in command line." 88 | exit 1 89 | ;; 90 | esac 91 | # shift to next key-value pair 92 | shift 93 | done 94 | 95 | if [ -z "$dbhost" ]; then 96 | dbhost=mariadb.kube-system 97 | fi 98 | info $LINENO "MariaDB host: $dbhost" 99 | 100 | allDBs=$(getDatabaseNames $dbhost) 101 | 102 | if [ -z "$allDBs" ]; then 103 | info $LINENO "No databases are defined in the MariaDB instance hosted by: $dbhost" 104 | info $LINENO "END $SCRIPT" 105 | else 106 | info $LINENO "END $SCRIPT" 107 | info $LINENO "ICP MariaDB database names:" 108 | echo "\"$allDBs\"" 109 | fi 110 | -------------------------------------------------------------------------------- /src/mariadb-backup/1.0/helper-functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed Material - Property of IBM 3 | # 5724-I63, 5724-H88, (C) Copyright IBM Corp. 2018 - All Rights Reserved. 4 | # US Government Users Restricted Rights - Use, duplication or disclosure 5 | # restricted by GSA ADP Schedule Contract with IBM Corp. 6 | # 7 | # DISCLAIMER: 8 | # The following source code is sample code created by IBM Corporation. 9 | # This sample code is provided to you solely for the purpose of assisting you 10 | # in the use of the product. The code is provided 'AS IS', without warranty or 11 | # condition of any kind. IBM shall not be liable for any damages arising out of 12 | # your use of the sample code, even if IBM has been advised of the possibility 13 | # of such damages. 14 | 15 | # DESCRIPTION: 16 | # Functions to assist with mariadb backup and restore. 17 | # Sourced by other scripts that need to use these functions. 18 | # 19 | # Pre-reqs: 20 | # 1. bash is needed for various scripting conventions 21 | # Experiments with Ash in Alpine showed that bash is needed. 22 | # 2. kubectl is needed to interact with the ICP cluster. 23 | # 3. jq is needed to do JSON parsing. 24 | # 25 | # ASSUMPTIONS: 26 | # 1. If running externally (outside a container) kubectl login context has 27 | # been established. 28 | # 2. If running in a container a kubectl "just works" without a login. 29 | # 30 | 31 | # The info() function is used to emit log messages. 32 | # It is assumed that SCRIPT is set in the caller. 33 | function info { 34 | local lineno=$1; shift 35 | local ts=$(date +[%Y/%m/%d-%T]) 36 | echo "$ts $SCRIPT($lineno) $*" 37 | } 38 | 39 | 40 | # member() returns 0 if the first argument is a member of the second argument. 41 | # $1 is the string that represents the item of interest 42 | # $2 is the string that represents a list of items separated by space characters. 43 | # If item is in list the status 0 is returned otherwise status 1 is returned. 44 | # NOTE: When using member() in a condition do not use [ ] or [[ ]] expressions. 45 | # Example: if $(member "A" "a B C d A"); then 46 | # echo "A is a member" 47 | # else 48 | # echo "A is not a member" 49 | # fi 50 | # if ! $(member "A" "a B C d A"); when you need not member(). 51 | # 52 | # It is likely the list parameter is going to be a shell variable, 53 | # in which cast it must be double quoted on the invocation: 54 | # $(member $item "${some_list}") 55 | # 56 | function member() { 57 | local item=$1 58 | local list=$2 59 | 60 | rc=1 61 | for x in $list; do 62 | if [ "$x" == "$item" ]; then 63 | rc=0 64 | break 65 | fi 66 | done 67 | 68 | return $rc 69 | } 70 | 71 | 72 | getMariaDBPassword () { 73 | ### Get the mariadb password from kube secret 74 | local mariadb_password=$(kubectl get secret platform-mariadb-credentials --namespace=kube-system -o json | jq '.["data"]["OAUTH2DB_PASSWORD"]') 75 | 76 | # Strip leading and trailing double quotes 77 | mariadb_password=${mariadb_password#\"} 78 | mariadb_password=${mariadb_password%\"} 79 | 80 | echo $mariadb_password | base64 -d 81 | } 82 | 83 | 84 | getMariaDBUser () { 85 | ### Get the mariadb user from kube secret 86 | local mariadb_user=$(kubectl get secret platform-mariadb-credentials --namespace=kube-system -o json | jq '.["data"]["OAUTH2DB_USER"]') 87 | 88 | # Strip leading and trailing double quotes 89 | mariadb_user=${mariadb_user#\"} 90 | mariadb_user=${mariadb_user%\"} 91 | 92 | echo $mariadb_user | base64 -d 93 | } 94 | 95 | 96 | getMariaDBPort () { 97 | local port=$(kubectl --namespace kube-system get service/mariadb -o json| jq '.["spec"]["ports"][0]["port"]') 98 | 99 | echo $port 100 | } 101 | 102 | 103 | getDatabaseNames () { 104 | # $1 is MariaDB host name or IP address. 105 | # localhost is valid if running script on MariaDB host. 106 | # mariadb.kube-system is valid when running in a container. 107 | # If the container is running in the kube-system namespace, then mariadb is sufficient. 108 | local user=$(getMariaDBUser) 109 | local password=$(getMariaDBPassword) 110 | local host=$1 111 | local port=$(getMariaDBPort) 112 | 113 | if [ -z "$host" ]; then 114 | host=mariadb.kube-system 115 | fi 116 | 117 | local allDBs=$(mysql --host=$host --port=$port --user=$user --password=$password -e 'show databases') 118 | 119 | # Replace newlines with space character and remove double quotes 120 | allDBs=$(echo "$allDBs" | tr '\n' ' ' | tr -d '"') 121 | 122 | # Remove the word Database. 123 | allDBs="${allDBs//Database/}" 124 | 125 | # Remove leading and trailing spaces. 126 | allDBs=$(echo "$allDBs" | awk '{$1=$1};1') 127 | 128 | echo "$allDBs" 129 | } 130 | 131 | 132 | exportAllDBNames () { 133 | # Export the names of all databases in the MariaDB instance. 134 | # $1 is the MariaDB host name or IP address 135 | # localhost is valid if running script on MariaDB host. 136 | # $2 is the path to directory where databases names are to be exported 137 | local dbhost=$1 138 | local destDir=$2 139 | 140 | if [ -z "$destDir" ]; then 141 | destDir="$PWD" 142 | fi 143 | 144 | local allDBs=$(getDatabaseNames $dbhost) 145 | local dest="$destDir/dbnames.sh" 146 | 147 | if [ -f "$dest" ]; then 148 | # dbnames.sh already exists 149 | exported=$(grep ALL_DBS "$dest") 150 | if [ -z "$exported" ]; then 151 | # ALL_DBS not written in dbnames.sh, append it 152 | echo "export ALL_DBS=\"$allDBs\"" >> "$dest" 153 | fi 154 | else 155 | # Create dbnames.sh and write ALL_DBS to it 156 | echo "export ALL_DBS=\"$allDBs\"" > "$dest" 157 | chmod +x "$dest" 158 | fi 159 | } 160 | 161 | 162 | exportDBnames () { 163 | # Export the given dbnames to dbnames.sh in the given directory. 164 | # INPUTS: 165 | # 1. Quoted string space separated list of database names. 166 | # 2. Destination directory path. If not provided, current working 167 | # directory is used. 168 | 169 | local dbnames=$1 170 | local destDir=$2 171 | 172 | if [ -z "$destDir" ]; then 173 | destDir=$PWD 174 | fi 175 | 176 | local dest="$destDir/dbnames.sh" 177 | 178 | if [ -f "$dest" ]; then 179 | # dbnames.sh already exists 180 | exported=$(grep -q BACKED_UP_DBNAMES "$dest") 181 | if [ -z "$exported" ]; then 182 | # BACKED_UP_DBNAMES not written in dbnames.sh, append it 183 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" >> "$dest" 184 | fi 185 | else 186 | # Create dbnames.sh and write BACKED_UP_DBNAMES to it 187 | echo "export BACKED_UP_DBNAMES=\"$dbnames\"" > "$dest" 188 | chmod +x "$dest" 189 | fi 190 | } 191 | 192 | 193 | makeBackupFilePath () { 194 | # Return the full path of the file name with the backup for the given database name. 195 | # $1 is the backup directory path 196 | # $2 is the MariaDB database name 197 | local backupDir=${1%/} 198 | local fileName="$2-backup.sql" 199 | echo "${backupDir}/${fileName}" 200 | } 201 | 202 | createDatabase () { 203 | # Create a Cloudant database 204 | # $1 is the host name of the Cloudant DB instance 205 | # localhost is valid if the script is run on the instanct host. 206 | # $2 is the database name 207 | # 208 | # Both parameters are required. 209 | 210 | local dbhost=$1 211 | local dbname=$2 212 | 213 | local cloudantURL=$(getCloudantURL $dbhost) 214 | 215 | coucher database -c $cloudantURL -a create -d $dbname 216 | 217 | } 218 | 219 | 220 | deleteDatabase () { 221 | # Delete a Cloudant database 222 | # $1 is the host name of the Cloudant DB instance 223 | # localhost is valid if the script is run on the instanct host. 224 | # $2 is the database name 225 | # 226 | # Both parameters are required. 227 | 228 | local dbhost=$1 229 | local dbname=$2 230 | 231 | local cloudantURL=$(getCloudantURL $dbhost) 232 | 233 | coucher database -c $cloudantURL -a delete -d $dbname 234 | 235 | } 236 | -------------------------------------------------------------------------------- /src/mariadb-backup/Dockerfile-1.0: -------------------------------------------------------------------------------- 1 | # Sample invocation: 2 | # From the directory of the Dockerfile-#.# 3 | # docker build -f Dockerfile-1.0 --build-arg version=1.0 -t ibmcase/icp-mariadb-backup . 4 | # 5 | FROM alpine 6 | ARG version 7 | 8 | # bash is needed for the various scripting idioms used in the shell scripts 9 | # Experiments showed that the Alpine default shell (Almquist shell) is not sufficient. 10 | # curl is needed to install kubectl 11 | # The jq utility is used to parse the JSON that is returned from kubectl commands. 12 | # 13 | # 14 | RUN apk add --update bash curl jq mysql-client 15 | 16 | # Kubectl is needed to get information from mariadb service and from platform-mariadb-credentials secret 17 | RUN curl --silent -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \ 18 | && chmod +x ./kubectl \ 19 | && mv ./kubectl /usr/local/bin/kubectl 20 | 21 | # If the +x bit is on in the source then it will be retained in the image. 22 | COPY $version/*.sh / 23 | 24 | CMD [ "/get-database-names.sh", "--dbhost", "mariadb.kube-system" \ 25 | ] 26 | --------------------------------------------------------------------------------